Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Jan 2012 01:59:33 +0000 (17:59 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Jan 2012 01:59:33 +0000 (17:59 -0800)
* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu: (56 commits)
  m68k: allow ColdFire 547x and 548x CPUs to be built with MMU enabled
  m68k/Kconfig: Separate classic m68k and coldfire early
  m68k: add ColdFire with MMU enabled support to the m68k mem init code
  m68k: do not use m68k startup or interrupt code for ColdFire CPUs
  m68k: add ColdFire FPU support for the V4e ColdFire CPUs
  m68k: adjustments to stack frame for ColdFire with MMU enabled
  m68k: use non-MMU linker script for ColdFire MMU builds
  m68k: ColdFire with MMU enabled uses same clocking code as non-MMU
  m68k: add code to setup a ColdFire 54xx platform when MMU enabled
  m68k: use non-MMU entry.S code when compiling for ColdFire CPU
  m68k: create ColdFire MMU pgalloc code
  m68k: compile appropriate mm arch files for ColdFire MMU support
  m68k: ColdFire V4e MMU paging init code and miss handler
  m68k: use ColdFire MMU read/write bit flags when ioremapping
  m68k: modify cache push and clear code for ColdFire with MMU enable
  m68k: use tracehook_report_syscall_entry/exit for ColdFire MMU ptrace path
  m68k: ColdFire V4e MMU context support code
  m68k: MMU enabled ColdFire needs 8k ELF alignment
  m68k: set ColdFire MMU page size
  m68k: define PAGE_OFFSET_RAW for ColdFire CPU with MMU enabled
  ...

76 files changed:
arch/m68k/Kconfig
arch/m68k/Kconfig.cpu
arch/m68k/Kconfig.debug
arch/m68k/Kconfig.machine
arch/m68k/include/asm/anchor.h [deleted file]
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/cacheflush_mm.h
arch/m68k/include/asm/checksum.h
arch/m68k/include/asm/div64.h
arch/m68k/include/asm/elf.h
arch/m68k/include/asm/entry.h
arch/m68k/include/asm/fpu.h
arch/m68k/include/asm/gpio.h
arch/m68k/include/asm/irq.h
arch/m68k/include/asm/m54xxacr.h
arch/m68k/include/asm/mcf_pgalloc.h [new file with mode: 0644]
arch/m68k/include/asm/mcf_pgtable.h [new file with mode: 0644]
arch/m68k/include/asm/mcfmmu.h [new file with mode: 0644]
arch/m68k/include/asm/mmu_context.h
arch/m68k/include/asm/motorola_pgtable.h
arch/m68k/include/asm/page.h
arch/m68k/include/asm/page_no.h
arch/m68k/include/asm/page_offset.h
arch/m68k/include/asm/pgalloc.h
arch/m68k/include/asm/pgtable_mm.h
arch/m68k/include/asm/processor.h
arch/m68k/include/asm/segment.h
arch/m68k/include/asm/setup.h
arch/m68k/include/asm/sigcontext.h
arch/m68k/include/asm/thread_info.h
arch/m68k/include/asm/tlbflush.h
arch/m68k/include/asm/traps.h
arch/m68k/include/asm/uaccess_mm.h
arch/m68k/include/asm/ucontext.h
arch/m68k/kernel/Makefile
arch/m68k/kernel/asm-offsets.c
arch/m68k/kernel/entry.S
arch/m68k/kernel/entry_mm.S
arch/m68k/kernel/entry_no.S
arch/m68k/kernel/init_task.c
arch/m68k/kernel/m68k_ksyms.c
arch/m68k/kernel/process_mm.c
arch/m68k/kernel/ptrace_mm.c
arch/m68k/kernel/setup_mm.c
arch/m68k/kernel/setup_no.c
arch/m68k/kernel/signal_mm.c
arch/m68k/kernel/time.c
arch/m68k/kernel/time_no.c
arch/m68k/kernel/traps.c
arch/m68k/kernel/vmlinux-nommu.lds [new file with mode: 0644]
arch/m68k/kernel/vmlinux-std.lds
arch/m68k/kernel/vmlinux-sun3.lds
arch/m68k/kernel/vmlinux.lds.S
arch/m68k/kernel/vmlinux.lds_mm.S [deleted file]
arch/m68k/kernel/vmlinux.lds_no.S [deleted file]
arch/m68k/lib/Makefile
arch/m68k/lib/checksum.c [new file with mode: 0644]
arch/m68k/lib/checksum_mm.c [deleted file]
arch/m68k/lib/checksum_no.c [deleted file]
arch/m68k/lib/uaccess.c
arch/m68k/mm/Makefile
arch/m68k/mm/cache.c
arch/m68k/mm/init_mm.c
arch/m68k/mm/kmap.c
arch/m68k/mm/mcfmmu.c [new file with mode: 0644]
arch/m68k/mm/memory.c
arch/m68k/platform/54xx/config.c
arch/m68k/platform/68328/Makefile
arch/m68k/platform/68328/bootlogo.h
arch/m68k/platform/68328/bootlogo.pl [deleted file]
arch/m68k/platform/68328/config.c
arch/m68k/platform/68328/head-pilot.S
arch/m68k/platform/68328/head-rom.S
arch/m68k/platform/coldfire/entry.S
arch/m68k/platform/coldfire/head.S
arch/m68k/platform/coldfire/sltimers.c

index 361d54019bb0bcafaed6e7e6d408d638ae4cd5c5..81fdaa72c540e4315c2d9872d75ff0f4e17afba5 100644 (file)
@@ -3,7 +3,6 @@ config M68K
        default y
        select HAVE_IDE
        select HAVE_AOUT if MMU
-       select GENERIC_ATOMIC64 if MMU
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_SHOW
        select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
@@ -41,12 +40,15 @@ config GENERIC_CALIBRATE_DELAY
 config GENERIC_IOMAP
        def_bool MMU
 
+config GENERIC_CSUM
+       bool
+
 config TIME_LOW_RES
        bool
        default y
 
 config ARCH_USES_GETTIMEOFFSET
-       def_bool MMU
+       def_bool MMU && !COLDFIRE
 
 config NO_IOPORT
        def_bool y
@@ -61,6 +63,12 @@ config ZONE_DMA
 config CPU_HAS_NO_BITFIELDS
        bool
 
+config CPU_HAS_NO_MULDIV64
+       bool
+
+config CPU_HAS_ADDRESS_SPACES
+       bool
+
 config HZ
        int
        default 1000 if CLEOPATRA
@@ -80,9 +88,12 @@ config MMU
 config MMU_MOTOROLA
        bool
 
+config MMU_COLDFIRE
+       bool
+
 config MMU_SUN3
        bool
-       depends on MMU && !MMU_MOTOROLA
+       depends on MMU && !MMU_MOTOROLA && !MMU_COLDFIRE
 
 menu "Platform setup"
 
index e632b2d1210615d89aa74490a8e8cbf2d8897b97..8a9c767125a4758b27e08f1488e27fa8b85ad258 100644 (file)
@@ -1,8 +1,42 @@
 comment "Processor Type"
 
+choice
+       prompt "CPU family support"
+       default M68KCLASSIC if MMU
+       default COLDFIRE if !MMU
+       help
+         The Freescale (was Motorola) M68K family of processors implements
+         the full 68000 processor instruction set.
+         The Freescale ColdFire family of processors is a modern derivitive
+         of the 68000 processor family. They are mainly targeted at embedded
+         applications, and are all System-On-Chip (SOC) devices, as opposed
+         to stand alone CPUs. They implement a subset of the original 68000
+         processor instruction set.
+         If you anticipate running this kernel on a computer with a classic
+         MC68xxx processor, select M68KCLASSIC.
+         If you anticipate running this kernel on a computer with a ColdFire
+         processor, select COLDFIRE.
+
+config M68KCLASSIC
+       bool "Classic M68K CPU family support"
+
+config COLDFIRE
+       bool "Coldfire CPU family support"
+       select GENERIC_GPIO
+       select ARCH_REQUIRE_GPIOLIB
+       select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_MULDIV64
+       select GENERIC_CSUM
+
+endchoice
+
+if M68KCLASSIC
+
 config M68000
        bool
        select CPU_HAS_NO_BITFIELDS
+       select CPU_HAS_NO_MULDIV64
+       select GENERIC_CSUM
        help
          The Freescale (was Motorola) 68000 CPU is the first generation of
          the well known M68K family of processors. The CPU core as well as
@@ -18,21 +52,11 @@ config MCPU32
          based on the 68020 processor. For the most part it is used in
          System-On-Chip parts, and does not contain a paging MMU.
 
-config COLDFIRE
-       bool
-       select GENERIC_GPIO
-       select ARCH_REQUIRE_GPIOLIB
-       select CPU_HAS_NO_BITFIELDS
-       help
-         The Freescale ColdFire family of processors is a modern derivitive
-         of the 68000 processor family. They are mainly targeted at embedded
-         applications, and are all System-On-Chip (SOC) devices, as opposed
-         to stand alone CPUs. They implement a subset of the original 68000
-         processor instruction set.
-
 config M68020
        bool "68020 support"
        depends on MMU
+       select GENERIC_ATOMIC64
+       select CPU_HAS_ADDRESS_SPACES
        help
          If you anticipate running this kernel on a computer with a MC68020
          processor, say Y. Otherwise, say N. Note that the 68020 requires a
@@ -42,6 +66,8 @@ config M68020
 config M68030
        bool "68030 support"
        depends on MMU && !MMU_SUN3
+       select GENERIC_ATOMIC64
+       select CPU_HAS_ADDRESS_SPACES
        help
          If you anticipate running this kernel on a computer with a MC68030
          processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
@@ -50,6 +76,8 @@ config M68030
 config M68040
        bool "68040 support"
        depends on MMU && !MMU_SUN3
+       select GENERIC_ATOMIC64
+       select CPU_HAS_ADDRESS_SPACES
        help
          If you anticipate running this kernel on a computer with a MC68LC040
          or MC68040 processor, say Y. Otherwise, say N. Note that an
@@ -59,6 +87,8 @@ config M68040
 config M68060
        bool "68060 support"
        depends on MMU && !MMU_SUN3
+       select GENERIC_ATOMIC64
+       select CPU_HAS_ADDRESS_SPACES
        help
          If you anticipate running this kernel on a computer with a MC68060
          processor, say Y. Otherwise, say N.
@@ -91,10 +121,13 @@ config M68360
        help
          Motorola 68360 processor support.
 
+endif # M68KCLASSIC
+
+if COLDFIRE
+
 config M5206
        bool "MCF5206"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -103,7 +136,6 @@ config M5206
 config M5206e
        bool "MCF5206e"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -112,7 +144,6 @@ config M5206e
 config M520x
        bool "MCF520x"
        depends on !MMU
-       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        help
@@ -121,7 +152,6 @@ config M520x
 config M523x
        bool "MCF523x"
        depends on !MMU
-       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -131,7 +161,6 @@ config M523x
 config M5249
        bool "MCF5249"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -143,7 +172,6 @@ config M527x
 config M5271
        bool "MCF5271"
        depends on !MMU
-       select COLDFIRE
        select M527x
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -154,7 +182,6 @@ config M5271
 config M5272
        bool "MCF5272"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_MBAR
        help
@@ -163,7 +190,6 @@ config M5272
 config M5275
        bool "MCF5275"
        depends on !MMU
-       select COLDFIRE
        select M527x
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -174,7 +200,6 @@ config M5275
 config M528x
        bool "MCF528x"
        depends on !MMU
-       select COLDFIRE
        select GENERIC_CLOCKEVENTS
        select HAVE_CACHE_SPLIT
        select HAVE_IPSBAR
@@ -184,7 +209,6 @@ config M528x
 config M5307
        bool "MCF5307"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_CACHE_CB
        select HAVE_MBAR
@@ -194,7 +218,6 @@ config M5307
 config M532x
        bool "MCF532x"
        depends on !MMU
-       select COLDFIRE
        select HAVE_CACHE_CB
        help
          Freescale (Motorola) ColdFire 532x processor support.
@@ -202,7 +225,6 @@ config M532x
 config M5407
        bool "MCF5407"
        depends on !MMU
-       select COLDFIRE
        select COLDFIRE_SW_A7
        select HAVE_CACHE_CB
        select HAVE_MBAR
@@ -214,9 +236,8 @@ config M54xx
 
 config M547x
        bool "MCF547x"
-       depends on !MMU
-       select COLDFIRE
        select M54xx
+       select MMU_COLDFIRE if MMU
        select HAVE_CACHE_CB
        select HAVE_MBAR
        help
@@ -224,14 +245,15 @@ config M547x
 
 config M548x
        bool "MCF548x"
-       depends on !MMU
-       select COLDFIRE
+       select MMU_COLDFIRE if MMU
        select M54xx
        select HAVE_CACHE_CB
        select HAVE_MBAR
        help
          Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
 
+endif # COLDFIRE
+
 
 comment "Processor Specific Options"
 
index 2bdb1b01115c24f93a21e9e095918a3d44ab89ea..af4fd5f8f8d5c9852359a47f9657bf32c395a2f9 100644 (file)
@@ -2,6 +2,14 @@ menu "Kernel hacking"
 
 source "lib/Kconfig.debug"
 
+config BOOTPARAM
+       bool 'Compiled-in Kernel Boot Parameter'
+
+config BOOTPARAM_STRING
+       string 'Kernel Boot Parameter'
+       default 'console=ttyS0,19200'
+       depends on BOOTPARAM
+
 if !MMU
 
 config FULLDEBUG
@@ -15,14 +23,6 @@ config HIGHPROFILE
        help
          Use a fast secondary clock to produce profiling information.
 
-config BOOTPARAM
-       bool 'Compiled-in Kernel Boot Parameter'
-
-config BOOTPARAM_STRING
-       string 'Kernel Boot Parameter'
-       default 'console=ttyS0,19200'
-       depends on BOOTPARAM
-
 config NO_KERNEL_MSG
        bool "Suppress Kernel BUG Messages"
        help
index ef4a26aff780c9b039e4cf59daa067f564cfc507..7cdf6b010381f8ae31332012c18d93345232514a 100644 (file)
@@ -1,5 +1,7 @@
 comment "Machine Types"
 
+if M68KCLASSIC
+
 config AMIGA
        bool "Amiga support"
        depends on MMU
@@ -130,6 +132,8 @@ config SUN3
 
          If you don't want to compile a kernel exclusively for a Sun 3, say N.
 
+endif # M68KCLASSIC
+
 config PILOT
        bool
 
diff --git a/arch/m68k/include/asm/anchor.h b/arch/m68k/include/asm/anchor.h
deleted file mode 100644 (file)
index 871c0d5..0000000
+++ /dev/null
@@ -1,112 +0,0 @@
-/****************************************************************************/
-
-/*
- *     anchor.h -- Anchor CO-MEM Lite PCI host bridge part.
- *
- *     (C) Copyright 2000, Moreton Bay (www.moreton.com.au)
- */
-
-/****************************************************************************/
-#ifndef        anchor_h
-#define        anchor_h
-/****************************************************************************/
-
-/*
- *     Define basic addressing info.
- */
-#if defined(CONFIG_M5407C3)
-#define        COMEM_BASE      0xFFFF0000      /* Base of CO-MEM address space */
-#define        COMEM_IRQ       25              /* IRQ of anchor part */
-#else
-#define        COMEM_BASE      0x80000000      /* Base of CO-MEM address space */
-#define        COMEM_IRQ       25              /* IRQ of anchor part */
-#endif
-
-/****************************************************************************/
-
-/*
- *     4-byte registers of CO-MEM, so adjust register addresses for
- *     easy access. Handy macro for word access too.
- */
-#define        LREG(a)         ((a) >> 2)
-#define        WREG(a)         ((a) >> 1)
-
-
-/*
- *     Define base addresses within CO-MEM Lite register address space.
- */
-#define        COMEM_I2O       0x0000          /* I2O registers */
-#define        COMEM_OPREGS    0x0400          /* Operation registers */
-#define        COMEM_PCIBUS    0x2000          /* Direct access to PCI bus */
-#define        COMEM_SHMEM     0x4000          /* Shared memory region */
-
-#define        COMEM_SHMEMSIZE 0x4000          /* Size of shared memory */
-
-
-/*
- *     Define CO-MEM Registers.
- */
-#define        COMEM_I2OHISR   0x0030          /* I2O host interrupt status */
-#define        COMEM_I2OHIMR   0x0034          /* I2O host interrupt mask */
-#define        COMEM_I2OLISR   0x0038          /* I2O local interrupt status */
-#define        COMEM_I2OLIMR   0x003c          /* I2O local interrupt mask */
-#define        COMEM_IBFPFIFO  0x0040          /* I2O inbound free/post FIFO */
-#define        COMEM_OBPFFIFO  0x0044          /* I2O outbound post/free FIFO */
-#define        COMEM_IBPFFIFO  0x0048          /* I2O inbound post/free FIFO */
-#define        COMEM_OBFPFIFO  0x004c          /* I2O outbound free/post FIFO */
-
-#define        COMEM_DAHBASE   0x0460          /* Direct access base address */
-
-#define        COMEM_NVCMD     0x04a0          /* I2C serial command */
-#define        COMEM_NVREAD    0x04a4          /* I2C serial read */
-#define        COMEM_NVSTAT    0x04a8          /* I2C status */
-
-#define        COMEM_DMALBASE  0x04b0          /* DMA local base address */
-#define        COMEM_DMAHBASE  0x04b4          /* DMA host base address */
-#define        COMEM_DMASIZE   0x04b8          /* DMA size */
-#define        COMEM_DMACTL    0x04bc          /* DMA control */
-
-#define        COMEM_HCTL      0x04e0          /* Host control */
-#define        COMEM_HINT      0x04e4          /* Host interrupt control/status */
-#define        COMEM_HLDATA    0x04e8          /* Host to local data mailbox */
-#define        COMEM_LINT      0x04f4          /* Local interrupt contole status */
-#define        COMEM_LHDATA    0x04f8          /* Local to host data mailbox */
-
-#define        COMEM_LBUSCFG   0x04fc          /* Local bus configuration */
-
-
-/*
- *     Commands and flags for use with Direct Access Register.
- */
-#define        COMEM_DA_IACK   0x00000000      /* Interrupt acknowledge (read) */
-#define        COMEM_DA_SPCL   0x00000010      /* Special cycle (write) */
-#define        COMEM_DA_MEMRD  0x00000004      /* Memory read cycle */
-#define        COMEM_DA_MEMWR  0x00000004      /* Memory write cycle */
-#define        COMEM_DA_IORD   0x00000002      /* I/O read cycle */
-#define        COMEM_DA_IOWR   0x00000002      /* I/O write cycle */
-#define        COMEM_DA_CFGRD  0x00000006      /* Configuration read cycle */
-#define        COMEM_DA_CFGWR  0x00000006      /* Configuration write cycle */
-
-#define        COMEM_DA_ADDR(a)        ((a) & 0xffffe000)
-
-#define        COMEM_DA_OFFSET(a)      ((a) & 0x00001fff)
-
-
-/*
- *     The PCI bus will be limited in what slots will actually be used.
- *     Define valid device numbers for different boards.
- */
-#if defined(CONFIG_M5407C3)
-#define        COMEM_MINDEV    14              /* Minimum valid DEVICE */
-#define        COMEM_MAXDEV    14              /* Maximum valid DEVICE */
-#define        COMEM_BRIDGEDEV 15              /* Slot bridge is in */
-#else
-#define        COMEM_MINDEV    0               /* Minimum valid DEVICE */
-#define        COMEM_MAXDEV    3               /* Maximum valid DEVICE */
-#endif
-
-#define        COMEM_MAXPCI    (COMEM_MAXDEV+1)        /* Maximum PCI devices */
-
-
-/****************************************************************************/
-#endif /* anchor_h */
index 65c6be6c81807aa38b90b092bf6bb83f0541b73f..4eba796c00d4ae0c22008a9058b9bb6267c37ac4 100644 (file)
@@ -55,6 +55,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
        return c != 0;
 }
 
+static inline int atomic_dec_and_test_lt(atomic_t *v)
+{
+       char c;
+       __asm__ __volatile__(
+               "subql #1,%1; slt %0"
+               : "=d" (c), "=m" (*v)
+               : "m" (*v));
+       return c != 0;
+}
+
 static inline int atomic_inc_and_test(atomic_t *v)
 {
        char c;
index 73de7c89d8e0e54c8a65c4c040e84ed3b904642a..8104bd874649560e607689b3240d9c64487b2be3 100644 (file)
@@ -2,23 +2,89 @@
 #define _M68K_CACHEFLUSH_H
 
 #include <linux/mm.h>
+#ifdef CONFIG_COLDFIRE
+#include <asm/mcfsim.h>
+#endif
 
 /* cache code */
 #define FLUSH_I_AND_D  (0x00000808)
 #define FLUSH_I                (0x00000008)
 
+#ifndef ICACHE_MAX_ADDR
+#define ICACHE_MAX_ADDR        0
+#define ICACHE_SET_MASK        0
+#define DCACHE_MAX_ADDR        0
+#define DCACHE_SETMASK 0
+#endif
+
+static inline void flush_cf_icache(unsigned long start, unsigned long end)
+{
+       unsigned long set;
+
+       for (set = start; set <= end; set += (0x10 - 3)) {
+               __asm__ __volatile__ (
+                       "cpushl %%ic,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%ic,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%ic,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%ic,(%0)"
+                       : "=a" (set)
+                       : "a" (set));
+       }
+}
+
+static inline void flush_cf_dcache(unsigned long start, unsigned long end)
+{
+       unsigned long set;
+
+       for (set = start; set <= end; set += (0x10 - 3)) {
+               __asm__ __volatile__ (
+                       "cpushl %%dc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%dc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%dc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%dc,(%0)"
+                       : "=a" (set)
+                       : "a" (set));
+       }
+}
+
+static inline void flush_cf_bcache(unsigned long start, unsigned long end)
+{
+       unsigned long set;
+
+       for (set = start; set <= end; set += (0x10 - 3)) {
+               __asm__ __volatile__ (
+                       "cpushl %%bc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%bc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%bc,(%0)\n\t"
+                       "addq%.l #1,%0\n\t"
+                       "cpushl %%bc,(%0)"
+                       : "=a" (set)
+                       : "a" (set));
+       }
+}
+
 /*
  * Cache handling functions
  */
 
 static inline void flush_icache(void)
 {
-       if (CPU_IS_040_OR_060)
+       if (CPU_IS_COLDFIRE) {
+               flush_cf_icache(0, ICACHE_MAX_ADDR);
+       } else if (CPU_IS_040_OR_060) {
                asm volatile (  "nop\n"
                        "       .chip   68040\n"
                        "       cpusha  %bc\n"
                        "       .chip   68k");
-       else {
+       else {
                unsigned long tmp;
                asm volatile (  "movec  %%cacr,%0\n"
                        "       or.w    %1,%0\n"
@@ -51,12 +117,14 @@ extern void cache_push_v(unsigned long vaddr, int len);
    process changes.  */
 #define __flush_cache_all()                                    \
 ({                                                             \
-       if (CPU_IS_040_OR_060)                                  \
+       if (CPU_IS_COLDFIRE) {                                  \
+               flush_cf_dcache(0, DCACHE_MAX_ADDR);            \
+       } else if (CPU_IS_040_OR_060) {                         \
                __asm__ __volatile__("nop\n\t"                  \
                                     ".chip 68040\n\t"          \
                                     "cpusha %dc\n\t"           \
                                     ".chip 68k");              \
-       else {                                                  \
+       } else {                                                \
                unsigned long _tmp;                             \
                __asm__ __volatile__("movec %%cacr,%0\n\t"      \
                                     "orw %1,%0\n\t"            \
@@ -112,7 +180,17 @@ static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vm
 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 static inline void __flush_page_to_ram(void *vaddr)
 {
-       if (CPU_IS_040_OR_060) {
+       if (CPU_IS_COLDFIRE) {
+               unsigned long addr, start, end;
+               addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
+               start = addr & ICACHE_SET_MASK;
+               end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
+               if (start > end) {
+                       flush_cf_bcache(0, end);
+                       end = ICACHE_MAX_ADDR;
+               }
+               flush_cf_bcache(start, end);
+       } else if (CPU_IS_040_OR_060) {
                __asm__ __volatile__("nop\n\t"
                                     ".chip 68040\n\t"
                                     "cpushp %%bc,(%0)\n\t"
index ec514485c8b6557bc18804c8f53589d4ed6eaefe..2f88d867c711207cde22101226b11c3ff68802db 100644 (file)
@@ -3,6 +3,10 @@
 
 #include <linux/in6.h>
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 /*
  * computes the checksum of a memory block at buff, length len,
  * and adds in "sum" (32-bit)
@@ -34,30 +38,6 @@ extern __wsum csum_partial_copy_nocheck(const void *src,
                                              void *dst, int len,
                                              __wsum sum);
 
-
-#ifdef CONFIG_COLDFIRE
-
-/*
- *     The ColdFire cores don't support all the 68k instructions used
- *     in the optimized checksum code below. So it reverts back to using
- *     more standard C coded checksums. The fast checksum code is
- *     significantly larger than the optimized version, so it is not
- *     inlined here.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
-
-static inline __sum16 csum_fold(__wsum sum)
-{
-       unsigned int tmp = (__force u32)sum;
-
-       tmp = (tmp & 0xffff) + (tmp >> 16);
-       tmp = (tmp & 0xffff) + (tmp >> 16);
-
-       return (__force __sum16)~tmp;
-}
-
-#else
-
 /*
  *     This is a version of ip_fast_csum() optimized for IP headers,
  *     which always checksum on 4 octet boundaries.
@@ -97,8 +77,6 @@ static inline __sum16 csum_fold(__wsum sum)
        return (__force __sum16)~sum;
 }
 
-#endif /* CONFIG_COLDFIRE */
-
 static inline __wsum
 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
                  unsigned short proto, __wsum sum)
@@ -167,4 +145,5 @@ csum_ipv6_magic(const struct in6_addr *saddr, const struct in6_addr *daddr,
        return csum_fold(sum);
 }
 
+#endif /* CONFIG_GENERIC_CSUM */
 #endif /* _M68K_CHECKSUM_H */
index edb66148a71dc85886e3112d9af2f08019d7856e..444ea8a09e9f3386434e89d502c41d1f4107302e 100644 (file)
@@ -1,7 +1,9 @@
 #ifndef _M68K_DIV64_H
 #define _M68K_DIV64_H
 
-#ifdef CONFIG_MMU
+#ifdef CONFIG_CPU_HAS_NO_MULDIV64
+#include <asm-generic/div64.h>
+#else
 
 #include <linux/types.h>
 
@@ -27,8 +29,6 @@
        __rem;                                                  \
 })
 
-#else
-#include <asm-generic/div64.h>
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_CPU_HAS_NO_MULDIV64 */
 
 #endif /* _M68K_DIV64_H */
index 01c193d9141268c5f65ea1720ba1ecdd1db9d726..e9b7cda597440b5696360307cf0124ab569645c5 100644 (file)
@@ -59,10 +59,10 @@ typedef struct user_m68kfp_struct elf_fpregset_t;
    is actually used on ASV.  */
 #define ELF_PLAT_INIT(_r, load_addr)   _r->a1 = 0
 
-#ifndef CONFIG_SUN3
-#define ELF_EXEC_PAGESIZE      4096
-#else
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
 #define ELF_EXEC_PAGESIZE      8192
+#else
+#define ELF_EXEC_PAGESIZE      4096
 #endif
 
 /* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
index c3c5a8643e15c784e096189f1a29ef59e251d115..622138dc7288fcac734a3fa434af4780a1fa1184 100644 (file)
  * Non-MMU systems do not reserve %a2 in this way, and this definition is
  * not used for them.
  */
+#ifdef CONFIG_MMU
+
 #define curptr a2
 
 #define GET_CURRENT(tmp) get_current tmp
 .macro get_current reg=%d0
        movel   %sp,\reg
-       andw    #-THREAD_SIZE,\reg
+       andl    #-THREAD_SIZE,\reg
        movel   \reg,%curptr
        movel   %curptr@,%curptr
 .endm
 
+#else
+
+#define GET_CURRENT(tmp)
+
+#endif /* CONFIG_MMU */
+
 #else /* C source */
 
 #define STR(X) STR1(X)
index ffb6b8cfc6d59c73851c82060ff5bcbedeb8c615..526db9da9e43a3e6adf4bd939f4ff2e5c6347d74 100644 (file)
@@ -12,6 +12,8 @@
 #define FPSTATESIZE (96)
 #elif defined(CONFIG_M68KFPU_EMU)
 #define FPSTATESIZE (28)
+#elif defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#define FPSTATESIZE (16)
 #elif defined(CONFIG_M68060)
 #define FPSTATESIZE (12)
 #else
index b2046839f4b253dc4254115f548c87b81dc5a83d..00d0071de4c3a83e613755e0b314b28a48007a4c 100644 (file)
@@ -225,7 +225,8 @@ static inline void gpio_set_value(unsigned gpio, int value)
 
 static inline int gpio_to_irq(unsigned gpio)
 {
-       return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE : -EINVAL;
+       return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
+               : __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned irq)
index 6198df5ff245346971e3b947944591c50955a8ab..0e89fa05de0e60bda81f5df2056106c49160e3e8 100644 (file)
@@ -25,7 +25,8 @@
 #define NR_IRQS        0
 #endif
 
-#ifdef CONFIG_MMU
+#if defined(CONFIG_M68020) || defined(CONFIG_M68030) || \
+    defined(CONFIG_M68040) || defined(CONFIG_M68060)
 
 /*
  * Interrupt source definitions
@@ -80,7 +81,7 @@ extern unsigned int irq_canonicalize(unsigned int irq);
 
 #else
 #define irq_canonicalize(irq)  (irq)
-#endif /* CONFIG_MMU */
+#endif /* !(CONFIG_M68020 || CONFIG_M68030 || CONFIG_M68040 || CONFIG_M68060) */
 
 asmlinkage void do_IRQ(int irq, struct pt_regs *regs);
 extern atomic_t irq_err_count;
index 16a1835f9b2ae3a0ed824a2bd544a8cd93ba0f58..47906aafbf67f48ba816ffef54da09afcc1660e9 100644 (file)
 #define ACR_CM_OFF_PRE 0x00000040      /* No cache, precise */
 #define ACR_CM_OFF_IMP 0x00000060      /* No cache, imprecise */
 #define ACR_CM         0x00000060      /* Cache mode mask */
+#define ACR_SP         0x00000008      /* Supervisor protect */
 #define ACR_WPROTECT   0x00000004      /* Write protect */
 
+#define ACR_BA(x)      ((x) & 0xff000000)
+#define ACR_ADMSK(x)   ((((x) - 1) & 0xff000000) >> 8)
+
 #if defined(CONFIG_M5407)
 
 #define ICACHE_SIZE 0x4000     /* instruction - 16k */
 #define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
 #define CACHE_WAYS 4           /* 4 ways */
 
+#define ICACHE_SET_MASK        ((ICACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define DCACHE_SET_MASK        ((DCACHE_SIZE / 64 - 1) << CACHE_WAYS)
+#define ICACHE_MAX_ADDR        ICACHE_SET_MASK
+#define DCACHE_MAX_ADDR        DCACHE_SET_MASK
+
 /*
  *     Version 4 cores have a true harvard style separate instruction
  *     and data cache. Enable data and instruction caches, also enable write
 #else
 #define CACHE_MODE (CACR_DEC+CACR_DESB+CACR_DDCM_P+CACR_BEC+CACR_IEC+CACR_EUSP)
 #endif
+#define CACHE_INIT (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
+
+#if defined(CONFIG_MMU)
+/*
+ *     If running with the MMU enabled then we need to map the internal
+ *     register region as non-cacheable. And then we map all our RAM as
+ *     cacheable and supervisor access only.
+ */
+#define ACR0_MODE      (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP)
+#define ACR1_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_SP)
+#define ACR2_MODE      0
+#define ACR3_MODE      (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \
+                        ACR_ENABLE+ACR_SUPER+ACR_SP)
+
+#else
+
+/*
+ *     For the non-MMU enabled case we map all of RAM as cacheable.
+ */
 #if defined(CONFIG_CACHE_COPYBACK)
 #define DATA_CACHE_MODE (ACR_ENABLE+ACR_ANY+ACR_CM_CP)
 #else
 #endif
 #define INSN_CACHE_MODE (ACR_ENABLE+ACR_ANY)
 
-#define CACHE_INIT     (CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATE  (CACHE_MODE+CACR_DCINVA+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATEI (CACHE_MODE+CACR_BCINVA+CACR_ICINVA)
 #define CACHE_INVALIDATED (CACHE_MODE+CACR_DCINVA)
 #define        CACHE_PUSH
 #endif
 
+#endif /* CONFIG_MMU */
 #endif /* m54xxacr_h */
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
new file mode 100644 (file)
index 0000000..313f3dd
--- /dev/null
@@ -0,0 +1,102 @@
+#ifndef M68K_MCF_PGALLOC_H
+#define M68K_MCF_PGALLOC_H
+
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+extern inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+       free_page((unsigned long) pte);
+}
+
+extern const char bad_pmd_string[];
+
+extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+       unsigned long address)
+{
+       unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+
+       if (!page)
+               return NULL;
+
+       memset((void *)page, 0, PAGE_SIZE);
+       return (pte_t *) (page);
+}
+
+extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+{
+       return (pmd_t *) pgd;
+}
+
+#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
+#define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })
+
+#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
+
+#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
+       (unsigned long)(page_address(page)))
+
+#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+                                 unsigned long address)
+{
+       __free_page(page);
+}
+
+#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+       unsigned long address)
+{
+       struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+       pte_t *pte;
+
+       if (!page)
+               return NULL;
+
+       pte = kmap(page);
+       if (pte) {
+               clear_page(pte);
+               __flush_page_to_ram(pte);
+               flush_tlb_kernel_page(pte);
+               nocache_page(pte);
+       }
+       kunmap(page);
+
+       return page;
+}
+
+extern inline void pte_free(struct mm_struct *mm, struct page *page)
+{
+       __free_page(page);
+}
+
+/*
+ * In our implementation, each pgd entry contains 1 pmd that is never allocated
+ * or freed.  pgd_present is always 1, so this should never be called. -NL
+ */
+#define pmd_free(mm, pmd) BUG()
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+       free_page((unsigned long) pgd);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *new_pgd;
+
+       new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
+       if (!new_pgd)
+               return NULL;
+       memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+       memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
+       return new_pgd;
+}
+
+#define pgd_populate(mm, pmd, pte) BUG()
+
+#endif /* M68K_MCF_PGALLOC_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
new file mode 100644 (file)
index 0000000..756bde4
--- /dev/null
@@ -0,0 +1,425 @@
+#ifndef _MCF_PGTABLE_H
+#define _MCF_PGTABLE_H
+
+#include <asm/mcfmmu.h>
+#include <asm/page.h>
+
+/*
+ * MMUDR bits, in proper place. We write these directly into the MMUDR
+ * after masking from the pte.
+ */
+#define CF_PAGE_LOCKED         MMUDR_LK        /* 0x00000002 */
+#define CF_PAGE_EXEC           MMUDR_X         /* 0x00000004 */
+#define CF_PAGE_WRITABLE       MMUDR_W         /* 0x00000008 */
+#define CF_PAGE_READABLE       MMUDR_R         /* 0x00000010 */
+#define CF_PAGE_SYSTEM         MMUDR_SP        /* 0x00000020 */
+#define CF_PAGE_COPYBACK       MMUDR_CM_CCB    /* 0x00000040 */
+#define CF_PAGE_NOCACHE                MMUDR_CM_NCP    /* 0x00000080 */
+
+#define CF_CACHEMASK           (~MMUDR_CM_CCB)
+#define CF_PAGE_MMUDR_MASK     0x000000fe
+
+#define _PAGE_NOCACHE030       CF_PAGE_NOCACHE
+
+/*
+ * MMUTR bits, need shifting down.
+ */
+#define CF_PAGE_MMUTR_MASK     0x00000c00
+#define CF_PAGE_MMUTR_SHIFT    10
+
+#define CF_PAGE_VALID          (MMUTR_V << CF_PAGE_MMUTR_SHIFT)
+#define CF_PAGE_SHARED         (MMUTR_SG << CF_PAGE_MMUTR_SHIFT)
+
+/*
+ * Fake bits, not implemented in CF, will get masked out before
+ * hitting hardware.
+ */
+#define CF_PAGE_DIRTY          0x00000001
+#define CF_PAGE_FILE           0x00000200
+#define CF_PAGE_ACCESSED       0x00001000
+
+#define _PAGE_CACHE040         0x020   /* 68040 cache mode, cachable, copyback */
+#define _PAGE_NOCACHE_S                0x040   /* 68040 no-cache mode, serialized */
+#define _PAGE_NOCACHE          0x060   /* 68040 cache mode, non-serialized */
+#define _PAGE_CACHE040W                0x000   /* 68040 cache mode, cachable, write-through */
+#define _DESCTYPE_MASK         0x003
+#define _CACHEMASK040          (~0x060)
+#define _PAGE_GLOBAL040                0x400   /* 68040 global bit, used for kva descs */
+
+/*
+ * Externally used page protection values.
+ */
+#define _PAGE_PRESENT  (CF_PAGE_VALID)
+#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
+#define _PAGE_DIRTY    (CF_PAGE_DIRTY)
+#define _PAGE_READWRITE (CF_PAGE_READABLE \
+                               | CF_PAGE_WRITABLE \
+                               | CF_PAGE_SYSTEM \
+                               | CF_PAGE_SHARED)
+
+/*
+ * Compound page protection values.
+ */
+#define PAGE_NONE      __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED)
+
+#define PAGE_SHARED     __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_SHARED)
+
+#define PAGE_INIT      __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_WRITABLE \
+                                | CF_PAGE_EXEC \
+                                | CF_PAGE_SYSTEM)
+
+#define PAGE_KERNEL    __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_WRITABLE \
+                                | CF_PAGE_EXEC \
+                                | CF_PAGE_SYSTEM)
+
+#define PAGE_COPY      __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_DIRTY)
+
+/*
+ * Page protections for initialising protection_map. See mm/mmap.c
+ * for use. In general, the bit positions are xwr, and P-items are
+ * private, the S-items are shared.
+ */
+#define __P000         PAGE_NONE
+#define __P001         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE)
+#define __P010         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_WRITABLE)
+#define __P011         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_WRITABLE)
+#define __P100         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_EXEC)
+#define __P101         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_EXEC)
+#define __P110         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_WRITABLE \
+                                | CF_PAGE_EXEC)
+#define __P111         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_WRITABLE \
+                                | CF_PAGE_EXEC)
+
+#define __S000         PAGE_NONE
+#define __S001         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE)
+#define __S010         PAGE_SHARED
+#define __S011         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_SHARED \
+                                | CF_PAGE_READABLE)
+#define __S100         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_EXEC)
+#define __S101         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_EXEC)
+#define __S110         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_SHARED \
+                                | CF_PAGE_EXEC)
+#define __S111         __pgprot(CF_PAGE_VALID \
+                                | CF_PAGE_ACCESSED \
+                                | CF_PAGE_SHARED \
+                                | CF_PAGE_READABLE \
+                                | CF_PAGE_EXEC)
+
+#define PTE_MASK       PAGE_MASK
+#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
+       return pte;
+}
+
+#define pmd_set(pmdp, ptep) do {} while (0)
+
+static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+{
+       pgd_val(*pgdp) = virt_to_phys(pmdp);
+}
+
+#define __pte_page(pte)        ((unsigned long) (pte_val(pte) & PAGE_MASK))
+#define __pmd_page(pmd)        ((unsigned long) (pmd_val(pmd)))
+
+static inline int pte_none(pte_t pte)
+{
+       return !pte_val(pte);
+}
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_VALID;
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+       pte_t *ptep)
+{
+       pte_val(*ptep) = 0;
+}
+
+#define pte_pagenr(pte)        ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+#define pte_page(pte)  virt_to_page(__pte_page(pte))
+
+static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+#define pmd_none(pmd) pmd_none2(&(pmd))
+static inline int pmd_bad2(pmd_t *pmd) { return 0; }
+#define pmd_bad(pmd) pmd_bad2(&(pmd))
+#define pmd_present(pmd) (!pmd_none2(&(pmd)))
+static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
+
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgdp) {}
+
+#define pte_ERROR(e) \
+       printk(KERN_ERR "%s:%d: bad pte %08lx.\n",      \
+       __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+       printk(KERN_ERR "%s:%d: bad pmd %08lx.\n",      \
+       __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       printk(KERN_ERR "%s:%d: bad pgd %08lx.\n",      \
+       __FILE__, __LINE__, pgd_val(e))
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not...
+ * [we have the full set here even if they don't change from m68k]
+ */
+static inline int pte_read(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_READABLE;
+}
+
+static inline int pte_write(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_WRITABLE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_EXEC;
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_ACCESSED;
+}
+
+static inline int pte_file(pte_t pte)
+{
+       return pte_val(pte) & CF_PAGE_FILE;
+}
+
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_WRITABLE;
+       return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_READABLE;
+       return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_EXEC;
+       return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_DIRTY;
+       return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_ACCESSED;
+       return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       pte_val(pte) |= CF_PAGE_WRITABLE;
+       return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+       pte_val(pte) |= CF_PAGE_READABLE;
+       return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+       pte_val(pte) |= CF_PAGE_EXEC;
+       return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       pte_val(pte) |= CF_PAGE_DIRTY;
+       return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       pte_val(pte) |= CF_PAGE_ACCESSED;
+       return pte;
+}
+
+static inline pte_t pte_mknocache(pte_t pte)
+{
+       pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40);
+       return pte;
+}
+
+static inline pte_t pte_mkcache(pte_t pte)
+{
+       pte_val(pte) &= ~CF_PAGE_NOCACHE;
+       return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return pte;
+}
+
+#define swapper_pg_dir kernel_pg_dir
+extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
+
+/*
+ * Find an entry in a pagetable directory.
+ */
+#define pgd_index(address)     ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)        ((mm)->pgd + pgd_index(address))
+
+/*
+ * Find an entry in a kernel pagetable directory.
+ */
+#define pgd_offset_k(address)  pgd_offset(&init_mm, address)
+
+/*
+ * Find an entry in the second-level pagetable.
+ */
+static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
+{
+       return (pmd_t *) pgd;
+}
+
+/*
+ * Find an entry in the third-level pagetable.
+ */
+#define __pte_offset(address)  ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address) \
+       ((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
+
+/*
+ * Disable caching for page at given kernel virtual address.
+ */
+static inline void nocache_page(void *vaddr)
+{
+       pgd_t *dir;
+       pmd_t *pmdp;
+       pte_t *ptep;
+       unsigned long addr = (unsigned long) vaddr;
+
+       dir = pgd_offset_k(addr);
+       pmdp = pmd_offset(dir, addr);
+       ptep = pte_offset_kernel(pmdp, addr);
+       *ptep = pte_mknocache(*ptep);
+}
+
+/*
+ * Enable caching for page at given kernel virtual address.
+ */
+static inline void cache_page(void *vaddr)
+{
+       pgd_t *dir;
+       pmd_t *pmdp;
+       pte_t *ptep;
+       unsigned long addr = (unsigned long) vaddr;
+
+       dir = pgd_offset_k(addr);
+       pmdp = pmd_offset(dir, addr);
+       ptep = pte_offset_kernel(pmdp, addr);
+       *ptep = pte_mkcache(*ptep);
+}
+
+#define PTE_FILE_MAX_BITS      21
+#define PTE_FILE_SHIFT         11
+
+static inline unsigned long pte_to_pgoff(pte_t pte)
+{
+       return pte_val(pte) >> PTE_FILE_SHIFT;
+}
+
+static inline pte_t pgoff_to_pte(unsigned pgoff)
+{
+       return __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
+}
+
+/*
+ * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
+ */
+#define __swp_type(x)          ((x).val & 0xFF)
+#define __swp_offset(x)                ((x).val >> PTE_FILE_SHIFT)
+#define __swp_entry(typ, off)  ((swp_entry_t) { (typ) | \
+                                       (off << PTE_FILE_SHIFT) })
+#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)  (__pte((x).val))
+
+#define pmd_page(pmd)          (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+
+#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
+                                      __pte_offset(addr))
+#define pte_unmap(pte)         ((void) 0)
+#define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _MCF_PGTABLE_H */
diff --git a/arch/m68k/include/asm/mcfmmu.h b/arch/m68k/include/asm/mcfmmu.h
new file mode 100644 (file)
index 0000000..26cc3d5
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ *     mcfmmu.h -- definitions for the ColdFire v4e MMU
+ *
+ *     (C) Copyright 2011,  Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#ifndef        MCFMMU_H
+#define        MCFMMU_H
+
+/*
+ *     The MMU support registers are mapped into the address space using
+ *     the processor MMUBASE register. We used a fixed address for mapping,
+ *     there doesn't seem any need to make this configurable yet.
+ */
+#define        MMUBASE         0xfe000000
+
+/*
+ *     The support registers of the MMU. Names are the sames as those
+ *     used in the Freescale v4e documentation.
+ */
+#define        MMUCR           (MMUBASE + 0x00)        /* Control register */
+#define        MMUOR           (MMUBASE + 0x04)        /* Operation register */
+#define        MMUSR           (MMUBASE + 0x08)        /* Status register */
+#define        MMUAR           (MMUBASE + 0x10)        /* TLB Address register */
+#define        MMUTR           (MMUBASE + 0x14)        /* TLB Tag register */
+#define        MMUDR           (MMUBASE + 0x18)        /* TLB Data register */
+
+/*
+ *     MMU Control register bit flags
+ */
+#define        MMUCR_EN        0x00000001              /* Virtual mode enable */
+#define        MMUCR_ASM       0x00000002              /* Address space mode */
+
+/*
+ *     MMU Operation register.
+ */
+#define        MMUOR_UAA       0x00000001              /* Update allocatiom address */
+#define        MMUOR_ACC       0x00000002              /* TLB access */
+#define        MMUOR_RD        0x00000004              /* TLB access read */
+#define        MMUOR_WR        0x00000000              /* TLB access write */
+#define        MMUOR_ADR       0x00000008              /* TLB address select */
+#define        MMUOR_ITLB      0x00000010              /* ITLB operation */
+#define        MMUOR_CAS       0x00000020              /* Clear non-locked ASID TLBs */
+#define        MMUOR_CNL       0x00000040              /* Clear non-locked TLBs */
+#define        MMUOR_CA        0x00000080              /* Clear all TLBs */
+#define        MMUOR_STLB      0x00000100              /* Search TLBs */
+#define        MMUOR_AAN       16                      /* TLB allocation address */
+#define        MMUOR_AAMASK    0xffff0000              /* AA mask */
+
+/*
+ *     MMU Status register.
+ */
+#define        MMUSR_HIT       0x00000002              /* Search TLB hit */
+#define        MMUSR_WF        0x00000008              /* Write access fault */
+#define        MMUSR_RF        0x00000010              /* Read access fault */
+#define        MMUSR_SPF       0x00000020              /* Supervisor protect fault */
+
+/*
+ *     MMU Read/Write Tag register.
+ */
+#define        MMUTR_V         0x00000001              /* Valid */
+#define        MMUTR_SG        0x00000002              /* Shared global */
+#define        MMUTR_IDN       2                       /* Address Space ID */
+#define        MMUTR_IDMASK    0x000003fc              /* ASID mask */
+#define        MMUTR_VAN       10                      /* Virtual Address */
+#define        MMUTR_VAMASK    0xfffffc00              /* VA mask */
+
+/*
+ *     MMU Read/Write Data register.
+ */
+#define        MMUDR_LK        0x00000002              /* Lock entry */
+#define        MMUDR_X         0x00000004              /* Execute access enable */
+#define        MMUDR_W         0x00000008              /* Write access enable */
+#define        MMUDR_R         0x00000010              /* Read access enable */
+#define        MMUDR_SP        0x00000020              /* Supervisor access enable */
+#define        MMUDR_CM_CWT    0x00000000              /* Cachable write thru */
+#define        MMUDR_CM_CCB    0x00000040              /* Cachable copy back */
+#define        MMUDR_CM_NCP    0x00000080              /* Non-cachable precise */
+#define        MMUDR_CM_NCI    0x000000c0              /* Non-cachable imprecise */
+#define        MMUDR_SZ_1MB    0x00000000              /* 1MB page size */
+#define        MMUDR_SZ_4KB    0x00000100              /* 4kB page size */
+#define        MMUDR_SZ_8KB    0x00000200              /* 8kB page size */
+#define        MMUDR_SZ_1KB    0x00000300              /* 1kB page size */
+#define        MMUDR_PAN       10                      /* Physical address */
+#define        MMUDR_PAMASK    0xfffffc00              /* PA mask */
+
+#ifndef __ASSEMBLY__
+
+/*
+ *     Simple access functions for the MMU registers. Nothing fancy
+ *     currently required, just simple 32bit access.
+ */
+static inline u32 mmu_read(u32 a)
+{
+       return *((volatile u32 *) a);
+}
+
+static inline void mmu_write(u32 a, u32 v)
+{
+       *((volatile u32 *) a) = v;
+       __asm__ __volatile__ ("nop");
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word);
+
+#endif
+
+#endif /* MCFMMU_H */
index 7d4341e55a99b85396757228e924c3b5acbbc095..dc3be991d63431b78cd2ec2e3c2c6bd1dfaa3abd 100644 (file)
@@ -8,7 +8,206 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 }
 
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
+
+#if defined(CONFIG_COLDFIRE)
+
+#include <asm/atomic.h>
+#include <asm/bitops.h>
+#include <asm/mcfmmu.h>
+#include <asm/mmu.h>
+
+#define NO_CONTEXT             256
+#define LAST_CONTEXT           255
+#define FIRST_CONTEXT          1
+
+extern unsigned long context_map[];
+extern mm_context_t next_mmu_context;
+
+extern atomic_t nr_free_contexts;
+extern struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern void steal_context(void);
+
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+       mm_context_t ctx;
+
+       if (mm->context != NO_CONTEXT)
+               return;
+       while (atomic_dec_and_test_lt(&nr_free_contexts)) {
+               atomic_inc(&nr_free_contexts);
+               steal_context();
+       }
+       ctx = next_mmu_context;
+       while (test_and_set_bit(ctx, context_map)) {
+               ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+               if (ctx > LAST_CONTEXT)
+                       ctx = 0;
+       }
+       next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+       mm->context = ctx;
+       context_mm[ctx] = mm;
+}
+
+/*
+ * Set up the context for a new address space.
+ */
+#define init_new_context(tsk, mm)      (((mm)->context = NO_CONTEXT), 0)
+
+/*
+ * We're finished using the context for an address space.
+ */
+static inline void destroy_context(struct mm_struct *mm)
+{
+       if (mm->context != NO_CONTEXT) {
+               clear_bit(mm->context, context_map);
+               mm->context = NO_CONTEXT;
+               atomic_inc(&nr_free_contexts);
+       }
+}
+
+static inline void set_context(mm_context_t context, pgd_t *pgd)
+{
+       __asm__ __volatile__ ("movec %0,%%asid" : : "d" (context));
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+       struct task_struct *tsk)
+{
+       get_mmu_context(tsk->mm);
+       set_context(tsk->mm->context, next->pgd);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+static inline void activate_mm(struct mm_struct *active_mm,
+       struct mm_struct *mm)
+{
+       get_mmu_context(mm);
+       set_context(mm->context, mm->pgd);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+extern void mmu_context_init(void);
+#define prepare_arch_switch(next) load_ksp_mmu(next)
+
+static inline void load_ksp_mmu(struct task_struct *task)
+{
+       unsigned long flags;
+       struct mm_struct *mm;
+       int asid;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long mmuar;
+
+       local_irq_save(flags);
+       mmuar = task->thread.ksp;
+
+       /* Search for a valid TLB entry, if one is found, don't remap */
+       mmu_write(MMUAR, mmuar);
+       mmu_write(MMUOR, MMUOR_STLB | MMUOR_ADR);
+       if (mmu_read(MMUSR) & MMUSR_HIT)
+               goto end;
+
+       if (mmuar >= PAGE_OFFSET) {
+               mm = &init_mm;
+       } else {
+               pr_info("load_ksp_mmu: non-kernel mm found: 0x%p\n", task->mm);
+               mm = task->mm;
+       }
+
+       if (!mm)
+               goto bug;
+
+       pgd = pgd_offset(mm, mmuar);
+       if (pgd_none(*pgd))
+               goto bug;
+
+       pmd = pmd_offset(pgd, mmuar);
+       if (pmd_none(*pmd))
+               goto bug;
+
+       pte = (mmuar >= PAGE_OFFSET) ? pte_offset_kernel(pmd, mmuar)
+                                    : pte_offset_map(pmd, mmuar);
+       if (pte_none(*pte) || !pte_present(*pte))
+               goto bug;
+
+       set_pte(pte, pte_mkyoung(*pte));
+       asid = mm->context & 0xff;
+       if (!pte_dirty(*pte) && mmuar <= PAGE_OFFSET)
+               set_pte(pte, pte_wrprotect(*pte));
+
+       mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+               (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+               >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+       mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+               ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+       mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+
+       goto end;
+
+bug:
+       pr_info("ksp load failed: mm=0x%p ksp=0x08%lx\n", mm, mmuar);
+end:
+       local_irq_restore(flags);
+}
+
+#elif defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+#include <linux/sched.h>
+
+extern unsigned long get_free_context(struct mm_struct *mm);
+extern void clear_context(unsigned long context);
+
+/* set the context for a new task to unmapped */
+static inline int init_new_context(struct task_struct *tsk,
+                                  struct mm_struct *mm)
+{
+       mm->context = SUN3_INVALID_CONTEXT;
+       return 0;
+}
+
+/* find the context given to this process, and if it hasn't already
+   got one, go get one for it. */
+static inline void get_mmu_context(struct mm_struct *mm)
+{
+       if (mm->context == SUN3_INVALID_CONTEXT)
+               mm->context = get_free_context(mm);
+}
+
+/* flush context if allocated... */
+static inline void destroy_context(struct mm_struct *mm)
+{
+       if (mm->context != SUN3_INVALID_CONTEXT)
+               clear_context(mm->context);
+}
+
+static inline void activate_context(struct mm_struct *mm)
+{
+       get_mmu_context(mm);
+       sun3_put_context(mm->context);
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       activate_context(tsk->mm);
+}
+
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+                              struct mm_struct *next_mm)
+{
+       activate_context(next_mm);
+}
+
+#else
 
 #include <asm/setup.h>
 #include <asm/page.h>
@@ -103,55 +302,8 @@ static inline void activate_mm(struct mm_struct *prev_mm,
                switch_mm_0460(next_mm);
 }
 
-#else  /* CONFIG_SUN3 */
-#include <asm/sun3mmu.h>
-#include <linux/sched.h>
-
-extern unsigned long get_free_context(struct mm_struct *mm);
-extern void clear_context(unsigned long context);
-
-/* set the context for a new task to unmapped */
-static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
-{
-       mm->context = SUN3_INVALID_CONTEXT;
-       return 0;
-}
-
-/* find the context given to this process, and if it hasn't already
-   got one, go get one for it. */
-static inline void get_mmu_context(struct mm_struct *mm)
-{
-       if(mm->context == SUN3_INVALID_CONTEXT)
-               mm->context = get_free_context(mm);
-}
-
-/* flush context if allocated... */
-static inline void destroy_context(struct mm_struct *mm)
-{
-       if(mm->context != SUN3_INVALID_CONTEXT)
-               clear_context(mm->context);
-}
-
-static inline void activate_context(struct mm_struct *mm)
-{
-       get_mmu_context(mm);
-       sun3_put_context(mm->context);
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
-{
-       activate_context(tsk->mm);
-}
-
-#define deactivate_mm(tsk,mm)  do { } while (0)
-
-static inline void activate_mm(struct mm_struct *prev_mm,
-                              struct mm_struct *next_mm)
-{
-       activate_context(next_mm);
-}
-
 #endif
+
 #else /* !CONFIG_MMU */
 
 static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
index 45bd3f589bf0d62baeac34692aa7b6582cd3ae9f..e0fdd4d080750a1763b5477c0ed4a2c58b635436 100644 (file)
@@ -8,6 +8,7 @@
 #define _PAGE_PRESENT  0x001
 #define _PAGE_SHORT    0x002
 #define _PAGE_RONLY    0x004
+#define _PAGE_READWRITE        0x000
 #define _PAGE_ACCESSED 0x008
 #define _PAGE_DIRTY    0x010
 #define _PAGE_SUPER    0x080   /* 68040 supervisor only */
index dfebb7c1e3796bf49adf5c691095a4319424e906..98baa82a86158420ea6aa80167a0f1197994ce5f 100644 (file)
@@ -6,10 +6,10 @@
 #include <asm/page_offset.h>
 
 /* PAGE_SHIFT determines the page size */
-#ifndef CONFIG_SUN3
-#define PAGE_SHIFT     (12)
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
+#define PAGE_SHIFT     13
 #else
-#define PAGE_SHIFT     (13)
+#define PAGE_SHIFT     12
 #endif
 #define PAGE_SIZE      (_AC(1, UL) << PAGE_SHIFT)
 #define PAGE_MASK      (~(PAGE_SIZE-1))
@@ -36,6 +36,10 @@ typedef struct page *pgtable_t;
 #define __pgd(x)       ((pgd_t) { (x) } )
 #define __pgprot(x)    ((pgprot_t) { (x) } )
 
+extern unsigned long _rambase;
+extern unsigned long _ramstart;
+extern unsigned long _ramend;
+
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_MMU
index a8d1c60eb9cec846ad26662497513f676623c7e1..90595721185fc20a4b3ba79cacab1461fd50e3b4 100644 (file)
@@ -5,9 +5,6 @@
  
 extern unsigned long memory_start;
 extern unsigned long memory_end;
-extern unsigned long _rambase;
-extern unsigned long _ramstart;
-extern unsigned long _ramend;
 
 #define get_user_page(vaddr)           __get_free_page(GFP_KERNEL)
 #define free_user_page(page, addr)     free_page(addr)
index 1780152d81dace541507110a04e4065f49fb3cdc..82626a8f1d0a951597718ae68852050be20a60a7 100644 (file)
@@ -1,11 +1,9 @@
 /* This handles the memory map.. */
 
-#ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define PAGE_OFFSET_RAW                0x00000000
-#else
+#if defined(CONFIG_RAMBASE)
+#define PAGE_OFFSET_RAW                CONFIG_RAMBASE
+#elif defined(CONFIG_SUN3)
 #define PAGE_OFFSET_RAW                0x0E000000
-#endif
 #else
-#define        PAGE_OFFSET_RAW         CONFIG_RAMBASE
+#define PAGE_OFFSET_RAW                0x00000000
 #endif
index c294aad8a9000bd9d891d5a0a35b9987e5ed9c6b..37bee7e3223d0eb6da4fe7e5c2e782fb26aa21cb 100644 (file)
@@ -7,7 +7,9 @@
 
 #ifdef CONFIG_MMU
 #include <asm/virtconvert.h>
-#ifdef CONFIG_SUN3
+#if defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgalloc.h>
+#elif defined(CONFIG_SUN3)
 #include <asm/sun3_pgalloc.h>
 #else
 #include <asm/motorola_pgalloc.h>
index 87174c904d2b2e8eb5adc83a4fc056ac8790a9cc..dc35e0e106e4b1d9c3aa817248f7a642796891af 100644 (file)
@@ -40,6 +40,8 @@
 /* PGDIR_SHIFT determines what a third-level page table entry can map */
 #ifdef CONFIG_SUN3
 #define PGDIR_SHIFT     17
+#elif defined(CONFIG_COLDFIRE)
+#define PGDIR_SHIFT     22
 #else
 #define PGDIR_SHIFT    25
 #endif
 #define PTRS_PER_PTE   16
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
+#elif defined(CONFIG_COLDFIRE)
+#define PTRS_PER_PTE   512
+#define PTRS_PER_PMD   1
+#define PTRS_PER_PGD   1024
 #else
 #define PTRS_PER_PTE   1024
 #define PTRS_PER_PMD   8
 #ifdef CONFIG_SUN3
 #define KMAP_START     0x0DC00000
 #define KMAP_END       0x0E000000
+#elif defined(CONFIG_COLDFIRE)
+#define KMAP_START     0xe0000000
+#define KMAP_END       0xf0000000
 #else
 #define        KMAP_START      0xd0000000
 #define        KMAP_END        0xf0000000
 #endif
 
-#ifndef CONFIG_SUN3
+#ifdef CONFIG_SUN3
+extern unsigned long m68k_vmalloc_end;
+#define VMALLOC_START 0x0f800000
+#define VMALLOC_END m68k_vmalloc_end
+#elif defined(CONFIG_COLDFIRE)
+#define VMALLOC_START  0xd0000000
+#define VMALLOC_END    0xe0000000
+#else
 /* Just any arbitrary offset to the start of the vmalloc VM area: the
  * current 8MB value just means that there will be a 8MB "hole" after the
  * physical memory until the kernel virtual memory starts.  That means that
 #define VMALLOC_OFFSET (8*1024*1024)
 #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
 #define VMALLOC_END KMAP_START
-#else
-extern unsigned long m68k_vmalloc_end;
-#define VMALLOC_START 0x0f800000
-#define VMALLOC_END m68k_vmalloc_end
-#endif /* CONFIG_SUN3 */
+#endif
 
 /* zero page used for uninitialized stuff */
 extern void *empty_zero_page;
@@ -130,6 +142,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #ifdef CONFIG_SUN3
 #include <asm/sun3_pgtable.h>
+#elif defined(CONFIG_COLDFIRE)
+#include <asm/mcf_pgtable.h>
 #else
 #include <asm/motorola_pgtable.h>
 #endif
@@ -138,6 +152,9 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 /*
  * Macro to mark a page protection value as "uncacheable".
  */
+#ifdef CONFIG_COLDFIRE
+# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
+#else
 #ifdef SUN3_PAGE_NOCACHE
 # define __SUN3_PAGE_NOCACHE   SUN3_PAGE_NOCACHE
 #else
@@ -152,6 +169,7 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
            ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S))  \
            : (prot)))
 
+#endif /* CONFIG_COLDFIRE */
 #include <asm-generic/pgtable.h>
 #endif /* !__ASSEMBLY__ */
 
index 568facf30276693e1ae7466b95c26b1e34e71ef0..46460fa15d5cc66cbc68f39a3016b596c03af2ad 100644 (file)
@@ -48,10 +48,12 @@ static inline void wrusp(unsigned long usp)
  * so don't change it unless you know what you are doing.
  */
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_SIZE      (0xF0000000UL)
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_SIZE      (0xC0000000UL)
+#elif defined(CONFIG_SUN3)
 #define TASK_SIZE      (0x0E000000UL)
+#else
+#define TASK_SIZE      (0xF0000000UL)
 #endif
 #else
 #define TASK_SIZE      (0xFFFFFFFFUL)
@@ -66,10 +68,12 @@ static inline void wrusp(unsigned long usp)
  * space during mmap's.
  */
 #ifdef CONFIG_MMU
-#ifndef CONFIG_SUN3
-#define TASK_UNMAPPED_BASE     0xC0000000UL
-#else
+#if defined(CONFIG_COLDFIRE)
+#define TASK_UNMAPPED_BASE     0x60000000UL
+#elif defined(CONFIG_SUN3)
 #define TASK_UNMAPPED_BASE     0x0A000000UL
+#else
+#define TASK_UNMAPPED_BASE     0xC0000000UL
 #endif
 #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
 #else
@@ -88,14 +92,12 @@ struct thread_struct {
        unsigned long  fp[8*3];
        unsigned long  fpcntl[3];       /* fp control regs */
        unsigned char  fpstate[FPSTATESIZE];  /* floating point state */
-       struct thread_info info;
 };
 
 #define INIT_THREAD  {                                                 \
        .ksp    = sizeof(init_stack) + (unsigned long) init_stack,      \
        .sr     = PS_S,                                                 \
        .fs     = __KERNEL_DS,                                          \
-       .info   = INIT_THREAD_INFO(init_task),                          \
 }
 
 #ifdef CONFIG_MMU
index ee959219fdfe0fb1698e456344eeedfa39271ca2..0fa80e97ed2de8ca503e3cb1e92f0f33d15dc0cb 100644 (file)
@@ -22,23 +22,26 @@ typedef struct {
 } mm_segment_t;
 
 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define USER_DS                MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
 
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
 /*
  * Get/set the SFC/DFC registers for MOVES instructions
  */
+#define USER_DS                MAKE_MM_SEG(__USER_DS)
+#define KERNEL_DS      MAKE_MM_SEG(__KERNEL_DS)
 
 static inline mm_segment_t get_fs(void)
 {
-#ifdef CONFIG_MMU
        mm_segment_t _v;
        __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
-
        return _v;
-#else
-       return USER_DS;
-#endif
+}
+
+static inline void set_fs(mm_segment_t val)
+{
+       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+                             "movec %0,%/dfc\n\t"
+                             : /* no outputs */ : "r" (val.seg) : "memory");
 }
 
 static inline mm_segment_t get_ds(void)
@@ -47,14 +50,13 @@ static inline mm_segment_t get_ds(void)
     return KERNEL_DS;
 }
 
-static inline void set_fs(mm_segment_t val)
-{
-#ifdef CONFIG_MMU
-       __asm__ __volatile__ ("movec %0,%/sfc\n\t"
-                             "movec %0,%/dfc\n\t"
-                             : /* no outputs */ : "r" (val.seg) : "memory");
+#else
+#define USER_DS                MAKE_MM_SEG(TASK_SIZE)
+#define KERNEL_DS      MAKE_MM_SEG(0xFFFFFFFF)
+#define get_ds()       (KERNEL_DS)
+#define get_fs()       (current_thread_info()->addr_limit)
+#define set_fs(x)      (current_thread_info()->addr_limit = (x))
 #endif
-}
 
 #define segment_eq(a,b)        ((a).seg == (b).seg)
 
index 4dfb3952b3753e02b8d0ec2b179ab3bb408ca437..00c2c5397d37ee787eaeac0df751e220c3cefa26 100644 (file)
@@ -40,6 +40,7 @@
 #define MACH_HP300    9
 #define MACH_Q40     10
 #define MACH_SUN3X   11
+#define MACH_M54XX   12
 
 #define COMMAND_LINE_SIZE 256
 
@@ -211,23 +212,27 @@ extern unsigned long m68k_machtype;
 #define CPUB_68030     1
 #define CPUB_68040     2
 #define CPUB_68060     3
+#define CPUB_COLDFIRE  4
 
 #define CPU_68020      (1<<CPUB_68020)
 #define CPU_68030      (1<<CPUB_68030)
 #define CPU_68040      (1<<CPUB_68040)
 #define CPU_68060      (1<<CPUB_68060)
+#define CPU_COLDFIRE   (1<<CPUB_COLDFIRE)
 
 #define FPUB_68881     0
 #define FPUB_68882     1
 #define FPUB_68040     2                       /* Internal FPU */
 #define FPUB_68060     3                       /* Internal FPU */
 #define FPUB_SUNFPA    4                       /* Sun-3 FPA */
+#define FPUB_COLDFIRE  5                       /* ColdFire FPU */
 
 #define FPU_68881      (1<<FPUB_68881)
 #define FPU_68882      (1<<FPUB_68882)
 #define FPU_68040      (1<<FPUB_68040)
 #define FPU_68060      (1<<FPUB_68060)
 #define FPU_SUNFPA     (1<<FPUB_SUNFPA)
+#define FPU_COLDFIRE   (1<<FPUB_COLDFIRE)
 
 #define MMUB_68851     0
 #define MMUB_68030     1                       /* Internal MMU */
@@ -235,6 +240,7 @@ extern unsigned long m68k_machtype;
 #define MMUB_68060     3                       /* Internal MMU */
 #define MMUB_APOLLO    4                       /* Custom Apollo */
 #define MMUB_SUN3      5                       /* Custom Sun-3 */
+#define MMUB_COLDFIRE  6                       /* Internal MMU */
 
 #define MMU_68851      (1<<MMUB_68851)
 #define MMU_68030      (1<<MMUB_68030)
@@ -242,6 +248,7 @@ extern unsigned long m68k_machtype;
 #define MMU_68060      (1<<MMUB_68060)
 #define MMU_SUN3       (1<<MMUB_SUN3)
 #define MMU_APOLLO     (1<<MMUB_APOLLO)
+#define MMU_COLDFIRE   (1<<MMUB_COLDFIRE)
 
 #ifdef __KERNEL__
 
@@ -341,6 +348,13 @@ extern int m68k_is040or060;
 #  endif
 #endif
 
+#if !defined(CONFIG_COLDFIRE)
+#  define CPU_IS_COLDFIRE (0)
+#else
+#  define CPU_IS_COLDFIRE (1)
+#  define MMU_IS_COLDFIRE (1)
+#endif
+
 #define CPU_TYPE (m68k_cputype)
 
 #ifdef CONFIG_M68KFPU_EMU
index a29dd74a17cb25827a203654c5d7fafb156ff485..523db2a51cf304137b52315cae08ee2e4a7c8144 100644 (file)
@@ -15,11 +15,7 @@ struct sigcontext {
        unsigned long  sc_pc;
        unsigned short sc_formatvec;
 #ifndef __uClinux__
-# ifdef __mcoldfire__
-       unsigned long  sc_fpregs[2][2]; /* room for two fp registers */
-# else
        unsigned long  sc_fpregs[2*3];  /* room for two fp registers */
-# endif
        unsigned long  sc_fpcntl[3];
        unsigned char  sc_fpstate[216];
 #endif
index 790988967ba7ed10ae54eb2dbc5e917012cff088..29fa6da4f17c4a2024e36da7c1a66401de2348a2 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/types.h>
 #include <asm/page.h>
+#include <asm/segment.h>
 
 /*
  * On machines with 4k pages we default to an 8k thread size, though we
@@ -26,6 +27,7 @@ struct thread_info {
        struct task_struct      *task;          /* main task structure */
        unsigned long           flags;
        struct exec_domain      *exec_domain;   /* execution domain */
+       mm_segment_t            addr_limit;     /* thread address space */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
        __u32                   cpu;            /* should always be 0 on m68k */
        unsigned long           tp_value;       /* thread pointer */
@@ -39,6 +41,7 @@ struct thread_info {
 {                                              \
        .task           = &tsk,                 \
        .exec_domain    = &default_exec_domain, \
+       .addr_limit     = KERNEL_DS,            \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
        .restart_block = {                      \
                .fn = do_no_restart_syscall,    \
@@ -47,34 +50,6 @@ struct thread_info {
 
 #define init_stack             (init_thread_union.stack)
 
-#ifdef CONFIG_MMU
-
-#ifndef __ASSEMBLY__
-#include <asm/current.h>
-#endif
-
-#ifdef ASM_OFFSETS_C
-#define task_thread_info(tsk)  ((struct thread_info *) NULL)
-#else
-#include <asm/asm-offsets.h>
-#define task_thread_info(tsk)  ((struct thread_info *)((char *)tsk+TASK_TINFO))
-#endif
-
-#define init_thread_info       (init_task.thread.info)
-#define task_stack_page(tsk)   ((tsk)->stack)
-#define current_thread_info()  task_thread_info(current)
-
-#define __HAVE_THREAD_FUNCTIONS
-
-#define setup_thread_stack(p, org) ({                  \
-       *(struct task_struct **)(p)->stack = (p);       \
-       task_thread_info(p)->task = (p);                \
-})
-
-#define end_of_stack(p)                ((unsigned long *)(p)->stack + 1)
-
-#else /* !CONFIG_MMU */
-
 #ifndef __ASSEMBLY__
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
@@ -92,8 +67,6 @@ static inline struct thread_info *current_thread_info(void)
 
 #define init_thread_info       (init_thread_union.thread_info)
 
-#endif /* CONFIG_MMU */
-
 /* entry.S relies on these definitions!
  * bits 0-7 are tested at every exception exit
  * bits 8-15 are also tested at syscall exit
index a6b4ed4fc90faf9acdb31262fca3623f1f09cc40..965ea35c9a4059dcda1d5975fdad3ac5af94a954 100644 (file)
@@ -5,10 +5,13 @@
 #ifndef CONFIG_SUN3
 
 #include <asm/current.h>
+#include <asm/mcfmmu.h>
 
 static inline void flush_tlb_kernel_page(void *addr)
 {
-       if (CPU_IS_040_OR_060) {
+       if (CPU_IS_COLDFIRE) {
+               mmu_write(MMUOR, MMUOR_CNL);
+       } else if (CPU_IS_040_OR_060) {
                mm_segment_t old_fs = get_fs();
                set_fs(KERNEL_DS);
                __asm__ __volatile__(".chip 68040\n\t"
@@ -25,12 +28,15 @@ static inline void flush_tlb_kernel_page(void *addr)
  */
 static inline void __flush_tlb(void)
 {
-       if (CPU_IS_040_OR_060)
+       if (CPU_IS_COLDFIRE) {
+               mmu_write(MMUOR, MMUOR_CNL);
+       } else if (CPU_IS_040_OR_060) {
                __asm__ __volatile__(".chip 68040\n\t"
                                     "pflushan\n\t"
                                     ".chip 68k");
-       else if (CPU_IS_020_OR_030)
+       } else if (CPU_IS_020_OR_030) {
                __asm__ __volatile__("pflush #0,#4");
+       }
 }
 
 static inline void __flush_tlb040_one(unsigned long addr)
@@ -43,7 +49,9 @@ static inline void __flush_tlb040_one(unsigned long addr)
 
 static inline void __flush_tlb_one(unsigned long addr)
 {
-       if (CPU_IS_040_OR_060)
+       if (CPU_IS_COLDFIRE)
+               mmu_write(MMUOR, MMUOR_CNL);
+       else if (CPU_IS_040_OR_060)
                __flush_tlb040_one(addr);
        else if (CPU_IS_020_OR_030)
                __asm__ __volatile__("pflush #0,#4,(%0)" : : "a" (addr));
@@ -56,12 +64,15 @@ static inline void __flush_tlb_one(unsigned long addr)
  */
 static inline void flush_tlb_all(void)
 {
-       if (CPU_IS_040_OR_060)
+       if (CPU_IS_COLDFIRE) {
+               mmu_write(MMUOR, MMUOR_CNL);
+       } else if (CPU_IS_040_OR_060) {
                __asm__ __volatile__(".chip 68040\n\t"
                                     "pflusha\n\t"
                                     ".chip 68k");
-       else if (CPU_IS_020_OR_030)
+       } else if (CPU_IS_020_OR_030) {
                __asm__ __volatile__("pflusha");
+       }
 }
 
 static inline void flush_tlb_mm(struct mm_struct *mm)
index 151068f64f44f088b03339d57d13f6bb394a04a3..4aff3358fbaff75eb80952b2ea05977845614421 100644 (file)
@@ -18,6 +18,7 @@
 
 typedef void (*e_vector)(void);
 extern e_vector vectors[];
+extern e_vector *_ramvec;
 
 asmlinkage void auto_inthandler(void);
 asmlinkage void user_inthandler(void);
index 7107f3fbdbb66885bd000ec206648b5013bbcaf9..9c80cd515b2069cab1a28b2b54a16f70d9a19028 100644 (file)
@@ -20,6 +20,22 @@ static inline int access_ok(int type, const void __user *addr,
        return 1;
 }
 
+/*
+ * Not all varients of the 68k family support the notion of address spaces.
+ * The traditional 680x0 parts do, and they use the sfc/dfc registers and
+ * the "moves" instruction to access user space from kernel space. Other
+ * family members like ColdFire don't support this, and only have a single
+ * address space, and use the usual "move" instruction for user space access.
+ *
+ * Outside of this difference the user space access functions are the same.
+ * So lets keep the code simple and just define in what we need to use.
+ */
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+#define        MOVES   "moves"
+#else
+#define        MOVES   "move"
+#endif
+
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
@@ -43,7 +59,7 @@ extern int __get_user_bad(void);
 
 #define __put_user_asm(res, x, ptr, bwl, reg, err)     \
 asm volatile ("\n"                                     \
-       "1:     moves."#bwl"    %2,%1\n"                \
+       "1:     "MOVES"."#bwl"  %2,%1\n"                \
        "2:\n"                                          \
        "       .section .fixup,\"ax\"\n"               \
        "       .even\n"                                \
@@ -83,8 +99,8 @@ asm volatile ("\n"                                    \
            {                                                           \
                const void __user *__pu_ptr = (ptr);                    \
                asm volatile ("\n"                                      \
-                       "1:     moves.l %2,(%1)+\n"                     \
-                       "2:     moves.l %R2,(%1)\n"                     \
+                       "1:     "MOVES".l       %2,(%1)+\n"             \
+                       "2:     "MOVES".l       %R2,(%1)\n"             \
                        "3:\n"                                          \
                        "       .section .fixup,\"ax\"\n"               \
                        "       .even\n"                                \
@@ -115,12 +131,12 @@ asm volatile ("\n"                                        \
 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({    \
        type __gu_val;                                          \
        asm volatile ("\n"                                      \
-               "1:     moves."#bwl"    %2,%1\n"                \
+               "1:     "MOVES"."#bwl"  %2,%1\n"                \
                "2:\n"                                          \
                "       .section .fixup,\"ax\"\n"               \
                "       .even\n"                                \
                "10:    move.l  %3,%0\n"                        \
-               "       sub."#bwl"      %1,%1\n"                \
+               "       sub.l   %1,%1\n"                        \
                "       jra     2b\n"                           \
                "       .previous\n"                            \
                "\n"                                            \
@@ -152,8 +168,8 @@ asm volatile ("\n"                                  \
                const void *__gu_ptr = (ptr);                           \
                u64 __gu_val;                                           \
                asm volatile ("\n"                                      \
-                       "1:     moves.l (%2)+,%1\n"                     \
-                       "2:     moves.l (%2),%R1\n"                     \
+                       "1:     "MOVES".l       (%2)+,%1\n"             \
+                       "2:     "MOVES".l       (%2),%R1\n"             \
                        "3:\n"                                          \
                        "       .section .fixup,\"ax\"\n"               \
                        "       .even\n"                                \
@@ -188,12 +204,12 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned
 
 #define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
        asm volatile ("\n"                                              \
-               "1:     moves."#s1"     (%2)+,%3\n"                     \
+               "1:     "MOVES"."#s1"   (%2)+,%3\n"                     \
                "       move."#s1"      %3,(%1)+\n"                     \
-               "2:     moves."#s2"     (%2)+,%3\n"                     \
+               "2:     "MOVES"."#s2"   (%2)+,%3\n"                     \
                "       move."#s2"      %3,(%1)+\n"                     \
                "       .ifnc   \""#s3"\",\"\"\n"                       \
-               "3:     moves."#s3"     (%2)+,%3\n"                     \
+               "3:     "MOVES"."#s3"   (%2)+,%3\n"                     \
                "       move."#s3"      %3,(%1)+\n"                     \
                "       .endif\n"                                       \
                "4:\n"                                                  \
@@ -269,13 +285,13 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
        asm volatile ("\n"                                              \
                "       move."#s1"      (%2)+,%3\n"                     \
-               "11:    moves."#s1"     %3,(%1)+\n"                     \
+               "11:    "MOVES"."#s1"   %3,(%1)+\n"                     \
                "12:    move."#s2"      (%2)+,%3\n"                     \
-               "21:    moves."#s2"     %3,(%1)+\n"                     \
+               "21:    "MOVES"."#s2"   %3,(%1)+\n"                     \
                "22:\n"                                                 \
                "       .ifnc   \""#s3"\",\"\"\n"                       \
                "       move."#s3"      (%2)+,%3\n"                     \
-               "31:    moves."#s3"     %3,(%1)+\n"                     \
+               "31:    "MOVES"."#s3"   %3,(%1)+\n"                     \
                "32:\n"                                                 \
                "       .endif\n"                                       \
                "4:\n"                                                  \
index 00dcc5176c577ba82d505b297f1447981fc7dac4..e4e22669edc0669c55c1edc9a1947664b3c1d4bc 100644 (file)
@@ -7,11 +7,7 @@ typedef greg_t gregset_t[NGREG];
 
 typedef struct fpregset {
        int f_fpcntl[3];
-#ifdef __mcoldfire__
-       int f_fpregs[8][2];
-#else
        int f_fpregs[8*3];
-#endif
 } fpregset_t;
 
 struct mcontext {
index c5696193281a16a405be19906d649091c273de42..40d29a788b05e24aa6885ba65639c8bcb5073480 100644 (file)
@@ -2,19 +2,24 @@
 # Makefile for the linux kernel.
 #
 
-extra-$(CONFIG_MMU)    := head.o
+extra-$(CONFIG_AMIGA)  := head.o
+extra-$(CONFIG_ATARI)  := head.o
+extra-$(CONFIG_MAC)    := head.o
+extra-$(CONFIG_APOLLO) := head.o
+extra-$(CONFIG_VME)    := head.o
+extra-$(CONFIG_HP300)  := head.o
+extra-$(CONFIG_Q40)    := head.o
+extra-$(CONFIG_SUN3X)  := head.o
 extra-$(CONFIG_SUN3)   := sun3-head.o
 extra-y                        += vmlinux.lds
 
-obj-y  := entry.o irq.o m68k_ksyms.o module.o process.o ptrace.o setup.o \
-          signal.o sys_m68k.o syscalltable.o time.o traps.o
+obj-y  := entry.o init_task.o irq.o m68k_ksyms.o module.o process.o ptrace.o
+obj-y  += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
 
-obj-$(CONFIG_MMU)      += ints.o vectors.o
+obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
+obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
 
 ifndef CONFIG_MMU_SUN3
-obj-y                  += dma.o
-endif
-ifndef CONFIG_MMU
-obj-y                  += init_task.o
+obj-y  += dma.o
 endif
 
index 983fed9d469b3979ed8f5e251631434e6ad1f61b..a972b00cd77d7e699ae0c0fcb5f053bfac19f78c 100644 (file)
@@ -24,8 +24,7 @@ int main(void)
        /* offsets into the task struct */
        DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
        DEFINE(TASK_MM, offsetof(struct task_struct, mm));
-       DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
-       DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
+       DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
 
        /* offsets into the thread struct */
        DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
index 081cf96f243b01ee4cbea14b415f7364c08fd0fe..b8daf64e347de81443488a007658ac057f17f56e 100644 (file)
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 #include "entry_mm.S"
 #else
 #include "entry_no.S"
index c713f514843dbbea4ef6b42dcbec445e53847def..675a854966a62df047045c8dc8fc8016530e77c6 100644 (file)
@@ -99,7 +99,8 @@ do_trace_exit:
        jra     .Lret_from_exception
 
 ENTRY(ret_from_signal)
-       tstb    %curptr@(TASK_INFO+TINFO_FLAGS+2)
+       movel   %curptr@(TASK_STACK),%a1
+       tstb    %a1@(TINFO_FLAGS+2)
        jge     1f
        jbsr    syscall_trace
 1:     RESTORE_SWITCH_STACK
@@ -120,11 +121,13 @@ ENTRY(system_call)
        SAVE_ALL_SYS
 
        GET_CURRENT(%d1)
+       movel   %d1,%a1
+
        | save top of frame
        movel   %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
 
        | syscall trace?
-       tstb    %curptr@(TASK_INFO+TINFO_FLAGS+2)
+       tstb    %a1@(TINFO_FLAGS+2)
        jmi     do_trace_entry
        cmpl    #NR_syscalls,%d0
        jcc     badsys
@@ -133,7 +136,8 @@ syscall:
        movel   %d0,%sp@(PT_OFF_D0)     | save the return value
 ret_from_syscall:
        |oriw   #0x0700,%sr
-       movew   %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
+       movel   %curptr@(TASK_STACK),%a1
+       movew   %a1@(TINFO_FLAGS+2),%d0
        jne     syscall_exit_work
 1:     RESTORE_ALL
 
@@ -159,7 +163,8 @@ ENTRY(ret_from_exception)
        andw    #ALLOWINT,%sr
 
 resume_userspace:
-       moveb   %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
+       movel   %curptr@(TASK_STACK),%a1
+       moveb   %a1@(TINFO_FLAGS+3),%d0
        jne     exit_work
 1:     RESTORE_ALL
 
@@ -199,7 +204,8 @@ do_delayed_trace:
 ENTRY(auto_inthandler)
        SAVE_ALL_INT
        GET_CURRENT(%d0)
-       addqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %d0,%a1
+       addqb   #1,%a1@(TINFO_PREEMPT+1)
                                        |  put exception # in d0
        bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
        subw    #VEC_SPUR,%d0
@@ -211,7 +217,8 @@ auto_irqhandler_fixup = . + 2
        addql   #8,%sp                  |  pop parameters off stack
 
 ret_from_interrupt:
-       subqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %curptr@(TASK_STACK),%a1
+       subqb   #1,%a1@(TINFO_PREEMPT+1)
        jeq     ret_from_last_interrupt
 2:     RESTORE_ALL
 
@@ -232,7 +239,8 @@ ret_from_last_interrupt:
 ENTRY(user_inthandler)
        SAVE_ALL_INT
        GET_CURRENT(%d0)
-       addqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %d0,%a1
+       addqb   #1,%a1@(TINFO_PREEMPT+1)
                                        |  put exception # in d0
        bfextu  %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
 user_irqvec_fixup = . + 2
@@ -243,7 +251,8 @@ user_irqvec_fixup = . + 2
        jsr     do_IRQ                  |  process the IRQ
        addql   #8,%sp                  |  pop parameters off stack
 
-       subqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %curptr@(TASK_STACK),%a1
+       subqb   #1,%a1@(TINFO_PREEMPT+1)
        jeq     ret_from_last_interrupt
        RESTORE_ALL
 
@@ -252,13 +261,15 @@ user_irqvec_fixup = . + 2
 ENTRY(bad_inthandler)
        SAVE_ALL_INT
        GET_CURRENT(%d0)
-       addqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %d0,%a1
+       addqb   #1,%a1@(TINFO_PREEMPT+1)
 
        movel   %sp,%sp@-
        jsr     handle_badint
        addql   #4,%sp
 
-       subqb   #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
+       movel   %curptr@(TASK_STACK),%a1
+       subqb   #1,%a1@(TINFO_PREEMPT+1)
        jeq     ret_from_last_interrupt
        RESTORE_ALL
 
index 1b4289061a64a6ea261e2bce71eed7f2fb573e87..d80cba45589f4b7c337f5c9a2cdff9b430bcd514 100644 (file)
@@ -44,8 +44,7 @@
 
 ENTRY(buserr)
        SAVE_ALL_INT
-       moveq   #-1,%d0
-       movel   %d0,%sp@(PT_OFF_ORIG_D0)
+       GET_CURRENT(%d0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     buserr_c
        addql   #4,%sp
@@ -53,8 +52,7 @@ ENTRY(buserr)
 
 ENTRY(trap)
        SAVE_ALL_INT
-       moveq   #-1,%d0
-       movel   %d0,%sp@(PT_OFF_ORIG_D0)
+       GET_CURRENT(%d0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     trap_c
        addql   #4,%sp
@@ -65,8 +63,7 @@ ENTRY(trap)
 .globl dbginterrupt
 ENTRY(dbginterrupt)
        SAVE_ALL_INT
-       moveq   #-1,%d0
-       movel   %d0,%sp@(PT_OFF_ORIG_D0)
+       GET_CURRENT(%d0)
        movel   %sp,%sp@-               /* stack frame pointer argument */
        jsr     dbginterrupt_c
        addql   #4,%sp
index cbf9dc3cc51dc6a8a8d297fa99d7b9d349100a03..c744cfc6bfa1ceb0d4efb5828c270e76dc893700 100644 (file)
@@ -19,7 +19,6 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  *
  * All other task structs will be allocated on slabs in fork.c
  */
-__asm__(".align 4");
 struct task_struct init_task = INIT_TASK(init_task);
 
 EXPORT_SYMBOL(init_task);
@@ -27,7 +26,7 @@ EXPORT_SYMBOL(init_task);
 /*
  * Initial thread structure.
  *
- * We need to make sure that this is 8192-byte aligned due to the
+ * We need to make sure that this is THREAD size aligned due to the
  * way process stacks are handled. This is done by having a special
  * "init_task" linker map entry..
  */
index 1b7a14d1a00070af31fe62299f6dbe3b8e03d4be..774c1bd59c3655adea34251d77cb7792effcd688 100644 (file)
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__muldi3);
 
-#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
+#if defined(CONFIG_CPU_HAS_NO_MULDIV64)
 /*
  * Simpler 68k and ColdFire parts also need a few other gcc functions.
  */
index 1bc223aa07ec297a6ed923bbc49168b89eedfec3..125f34e00bf01e8409634a1c3dbfd1d2d294f123 100644 (file)
 #include <asm/setup.h>
 #include <asm/pgtable.h>
 
-/*
- * Initial task/thread structure. Make this a per-architecture thing,
- * because different architectures tend to have different
- * alignment requirements and potentially different initial
- * setup.
- */
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-union thread_union init_thread_union __init_task_data
-       __attribute__((aligned(THREAD_SIZE))) =
-               { INIT_THREAD_INFO(init_task) };
-
-/* initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
 
 asmlinkage void ret_from_fork(void);
 
@@ -188,9 +172,7 @@ void flush_thread(void)
 
        current->thread.fs = __USER_DS;
        if (!FPU_IS_EMU)
-               asm volatile (".chip 68k/68881\n\t"
-                             "frestore %0@\n\t"
-                             ".chip 68k" : : "a" (&zero));
+               asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
 }
 
 /*
@@ -264,11 +246,28 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
                /* Copy the current fpu state */
                asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
 
-               if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
-                 asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
-                               "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
-                               : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
-                               : "memory");
+               if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
+                       if (CPU_IS_COLDFIRE) {
+                               asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
+                                             "fmovel %/fpiar,%1\n\t"
+                                             "fmovel %/fpcr,%2\n\t"
+                                             "fmovel %/fpsr,%3"
+                                             :
+                                             : "m" (p->thread.fp[0]),
+                                               "m" (p->thread.fpcntl[0]),
+                                               "m" (p->thread.fpcntl[1]),
+                                               "m" (p->thread.fpcntl[2])
+                                             : "memory");
+                       } else {
+                               asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+                                             "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+                                             :
+                                             : "m" (p->thread.fp[0]),
+                                               "m" (p->thread.fpcntl[0])
+                                             : "memory");
+                       }
+               }
+
                /* Restore the state in case the fpu was busy */
                asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
        }
@@ -301,12 +300,28 @@ int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
        if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
                return 0;
 
-       asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
-               :: "m" (fpu->fpcntl[0])
-               : "memory");
-       asm volatile ("fmovemx %/fp0-%/fp7,%0"
-               :: "m" (fpu->fpregs[0])
-               : "memory");
+       if (CPU_IS_COLDFIRE) {
+               asm volatile ("fmovel %/fpiar,%0\n\t"
+                             "fmovel %/fpcr,%1\n\t"
+                             "fmovel %/fpsr,%2\n\t"
+                             "fmovemd %/fp0-%/fp7,%3"
+                             :
+                             : "m" (fpu->fpcntl[0]),
+                               "m" (fpu->fpcntl[1]),
+                               "m" (fpu->fpcntl[2]),
+                               "m" (fpu->fpregs[0])
+                             : "memory");
+       } else {
+               asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+                             :
+                             : "m" (fpu->fpcntl[0])
+                             : "memory");
+               asm volatile ("fmovemx %/fp0-%/fp7,%0"
+                             :
+                             : "m" (fpu->fpregs[0])
+                             : "memory");
+       }
+
        return 1;
 }
 EXPORT_SYMBOL(dump_fpu);
index 0b252683cefb21a3d057987833a22ce1ea8b17be..7bc999b7352966e0d00545f1c93966518e0047fd 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/ptrace.h>
 #include <linux/user.h>
 #include <linux/signal.h>
+#include <linux/tracehook.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -275,3 +276,20 @@ asmlinkage void syscall_trace(void)
                current->exit_code = 0;
        }
 }
+
+#ifdef CONFIG_COLDFIRE
+asmlinkage int syscall_trace_enter(void)
+{
+       int ret = 0;
+
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               ret = tracehook_report_syscall_entry(task_pt_regs(current));
+       return ret;
+}
+
+asmlinkage void syscall_trace_leave(void)
+{
+       if (test_thread_flag(TIF_SYSCALL_TRACE))
+               tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
+#endif /* CONFIG_COLDFIRE */
index c3b45061dd08d3f6981f3c19e0867f5d16ee62fe..d872ce4807c96f36f81de1f60adc67c99de9627b 100644 (file)
@@ -221,7 +221,8 @@ void __init setup_arch(char **cmdline_p)
 #endif
 
        /* The bootinfo is located right after the kernel bss */
-       m68k_parse_bootinfo((const struct bi_record *)_end);
+       if (!CPU_IS_COLDFIRE)
+               m68k_parse_bootinfo((const struct bi_record *)_end);
 
        if (CPU_IS_040)
                m68k_is040or060 = 4;
@@ -235,7 +236,7 @@ void __init setup_arch(char **cmdline_p)
         *  with them, we should add a test to check_bugs() below] */
 #ifndef CONFIG_M68KFPU_EMU_ONLY
        /* clear the fpu if we have one */
-       if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
+       if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
                volatile int zero = 0;
                asm volatile ("frestore %0" : : "m" (zero));
        }
@@ -258,6 +259,10 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long)_edata;
        init_mm.brk = (unsigned long)_end;
 
+#if defined(CONFIG_BOOTPARAM)
+       strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
+       m68k_command_line[CL_SIZE - 1] = 0;
+#endif /* CONFIG_BOOTPARAM */
        *cmdline_p = m68k_command_line;
        memcpy(boot_command_line, *cmdline_p, CL_SIZE);
 
@@ -322,6 +327,11 @@ void __init setup_arch(char **cmdline_p)
        case MACH_SUN3X:
                config_sun3x();
                break;
+#endif
+#ifdef CONFIG_COLDFIRE
+       case MACH_M54XX:
+               config_BSP(NULL, 0);
+               break;
 #endif
        default:
                panic("No configuration setup");
@@ -384,6 +394,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
 #define LOOP_CYCLES_68030      (8)
 #define LOOP_CYCLES_68040      (3)
 #define LOOP_CYCLES_68060      (1)
+#define LOOP_CYCLES_COLDFIRE   (2)
 
        if (CPU_IS_020) {
                cpu = "68020";
@@ -397,6 +408,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
        } else if (CPU_IS_060) {
                cpu = "68060";
                clockfactor = LOOP_CYCLES_68060;
+       } else if (CPU_IS_COLDFIRE) {
+               cpu = "ColdFire";
+               clockfactor = LOOP_CYCLES_COLDFIRE;
        } else {
                cpu = "680x0";
                clockfactor = 0;
@@ -415,6 +429,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                fpu = "68060";
        else if (m68k_fputype & FPU_SUNFPA)
                fpu = "Sun FPA";
+       else if (m68k_fputype & FPU_COLDFIRE)
+               fpu = "ColdFire";
        else
                fpu = "none";
 #endif
@@ -431,6 +447,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                mmu = "Sun-3";
        else if (m68k_mmutype & MMU_APOLLO)
                mmu = "Apollo";
+       else if (m68k_mmutype & MMU_COLDFIRE)
+               mmu = "ColdFire";
        else
                mmu = "unknown";
 
index 2ed8c0fb1517defe078c1aa38b8f4b7fbf9bf865..ca3df0dc7e8842b25104e90eaad6c290bb71dc7d 100644 (file)
@@ -47,7 +47,6 @@ EXPORT_SYMBOL(memory_end);
 char __initdata command_line[COMMAND_LINE_SIZE];
 
 /* machine dependent timer functions */
-void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
 int (*mach_set_clock_mmss)(unsigned long);
 
 /* machine dependent reboot functions */
index a0afc239304eb9eb40fcbcbffdf21c005cced264..cb856f9da655b5aa4141841db122a020876a21cd 100644 (file)
@@ -56,7 +56,11 @@ static const int frame_extra_sizes[16] = {
   [1]  = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
   [2]  = sizeof(((struct frame *)0)->un.fmt2),
   [3]  = sizeof(((struct frame *)0)->un.fmt3),
+#ifdef CONFIG_COLDFIRE
+  [4]  = 0,
+#else
   [4]  = sizeof(((struct frame *)0)->un.fmt4),
+#endif
   [5]  = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
   [6]  = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
   [7]  = sizeof(((struct frame *)0)->un.fmt7),
@@ -84,7 +88,11 @@ int handle_kernel_fault(struct pt_regs *regs)
        regs->stkadj = frame_extra_sizes[regs->format];
        tregs = (struct pt_regs *)((long)regs + regs->stkadj);
        tregs->vector = regs->vector;
+#ifdef CONFIG_COLDFIRE
+       tregs->format = 4;
+#else
        tregs->format = 0;
+#endif
        tregs->pc = fixup->fixup;
        tregs->sr = regs->sr;
 
@@ -195,7 +203,8 @@ static inline int restore_fpu_state(struct sigcontext *sc)
 
        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
            /* Verify the frame format.  */
-           if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+           if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+                (sc->sc_fpstate[0] != fpu_version))
                goto out;
            if (CPU_IS_020_OR_030) {
                if (m68k_fputype & FPU_68881 &&
@@ -214,19 +223,43 @@ static inline int restore_fpu_state(struct sigcontext *sc)
                       sc->sc_fpstate[3] == 0x60 ||
                      sc->sc_fpstate[3] == 0xe0))
                    goto out;
+           } else if (CPU_IS_COLDFIRE) {
+               if (!(sc->sc_fpstate[0] == 0x00 ||
+                     sc->sc_fpstate[0] == 0x05 ||
+                     sc->sc_fpstate[0] == 0xe5))
+                   goto out;
            } else
                goto out;
 
-           __asm__ volatile (".chip 68k/68881\n\t"
-                             "fmovemx %0,%%fp0-%%fp1\n\t"
-                             "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-                             ".chip 68k"
-                             : /* no outputs */
-                             : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
+           if (CPU_IS_COLDFIRE) {
+               __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
+                                 "fmovel %1,%%fpcr\n\t"
+                                 "fmovel %2,%%fpsr\n\t"
+                                 "fmovel %3,%%fpiar"
+                                 : /* no outputs */
+                                 : "m" (sc->sc_fpregs[0]),
+                                   "m" (sc->sc_fpcntl[0]),
+                                   "m" (sc->sc_fpcntl[1]),
+                                   "m" (sc->sc_fpcntl[2]));
+           } else {
+               __asm__ volatile (".chip 68k/68881\n\t"
+                                 "fmovemx %0,%%fp0-%%fp1\n\t"
+                                 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+                                 ".chip 68k"
+                                 : /* no outputs */
+                                 : "m" (*sc->sc_fpregs),
+                                   "m" (*sc->sc_fpcntl));
+           }
+       }
+
+       if (CPU_IS_COLDFIRE) {
+               __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
+       } else {
+               __asm__ volatile (".chip 68k/68881\n\t"
+                                 "frestore %0\n\t"
+                                 ".chip 68k"
+                                 : : "m" (*sc->sc_fpstate));
        }
-       __asm__ volatile (".chip 68k/68881\n\t"
-                         "frestore %0\n\t"
-                         ".chip 68k" : : "m" (*sc->sc_fpstate));
        err = 0;
 
 out:
@@ -241,7 +274,7 @@ out:
 static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 {
        unsigned char fpstate[FPCONTEXT_SIZE];
-       int context_size = CPU_IS_060 ? 8 : 0;
+       int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
        fpregset_t fpregs;
        int err = 1;
 
@@ -260,10 +293,11 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
        if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
                goto out;
        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
-               if (!CPU_IS_060)
+               if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
                        context_size = fpstate[1];
                /* Verify the frame format.  */
-               if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+               if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+                    (fpstate[0] != fpu_version))
                        goto out;
                if (CPU_IS_020_OR_030) {
                        if (m68k_fputype & FPU_68881 &&
@@ -282,26 +316,50 @@ static inline int rt_restore_fpu_state(struct ucontext __user *uc)
                              fpstate[3] == 0x60 ||
                              fpstate[3] == 0xe0))
                                goto out;
+               } else if (CPU_IS_COLDFIRE) {
+                       if (!(fpstate[3] == 0x00 ||
+                             fpstate[3] == 0x05 ||
+                             fpstate[3] == 0xe5))
+                               goto out;
                } else
                        goto out;
                if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
                                     sizeof(fpregs)))
                        goto out;
-               __asm__ volatile (".chip 68k/68881\n\t"
-                                 "fmovemx %0,%%fp0-%%fp7\n\t"
-                                 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
-                                 ".chip 68k"
-                                 : /* no outputs */
-                                 : "m" (*fpregs.f_fpregs),
-                                   "m" (*fpregs.f_fpcntl));
+
+               if (CPU_IS_COLDFIRE) {
+                       __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
+                                         "fmovel %1,%%fpcr\n\t"
+                                         "fmovel %2,%%fpsr\n\t"
+                                         "fmovel %3,%%fpiar"
+                                         : /* no outputs */
+                                         : "m" (fpregs.f_fpregs[0]),
+                                           "m" (fpregs.f_fpcntl[0]),
+                                           "m" (fpregs.f_fpcntl[1]),
+                                           "m" (fpregs.f_fpcntl[2]));
+               } else {
+                       __asm__ volatile (".chip 68k/68881\n\t"
+                                         "fmovemx %0,%%fp0-%%fp7\n\t"
+                                         "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+                                         ".chip 68k"
+                                         : /* no outputs */
+                                         : "m" (*fpregs.f_fpregs),
+                                           "m" (*fpregs.f_fpcntl));
+               }
        }
        if (context_size &&
            __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
                             context_size))
                goto out;
-       __asm__ volatile (".chip 68k/68881\n\t"
-                         "frestore %0\n\t"
-                         ".chip 68k" : : "m" (*fpstate));
+
+       if (CPU_IS_COLDFIRE) {
+               __asm__ volatile ("frestore %0" : : "m" (*fpstate));
+       } else {
+               __asm__ volatile (".chip 68k/68881\n\t"
+                                 "frestore %0\n\t"
+                                 ".chip 68k"
+                                 : : "m" (*fpstate));
+       }
        err = 0;
 
 out:
@@ -336,8 +394,12 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
                regs->format = formatvec >> 12;
                regs->vector = formatvec & 0xfff;
 #define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
-               __asm__ __volatile__
-                       ("   movel %0,%/a0\n\t"
+               __asm__ __volatile__ (
+#ifdef CONFIG_COLDFIRE
+                        "   movel %0,%/sp\n\t"
+                        "   bra ret_from_signal\n"
+#else
+                        "   movel %0,%/a0\n\t"
                         "   subl %1,%/a0\n\t"     /* make room on stack */
                         "   movel %/a0,%/sp\n\t"  /* set stack pointer */
                         /* move switch_stack and pt_regs */
@@ -350,6 +412,7 @@ static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
                         "2: movel %4@+,%/a0@+\n\t"
                         "   dbra %1,2b\n\t"
                         "   bral ret_from_signal\n"
+#endif
                         : /* no outputs, it doesn't ever return */
                         : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
                           "n" (frame_offset), "a" (buf + fsize/4)
@@ -516,10 +579,15 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
                return;
        }
 
-       __asm__ volatile (".chip 68k/68881\n\t"
-                         "fsave %0\n\t"
-                         ".chip 68k"
-                         : : "m" (*sc->sc_fpstate) : "memory");
+       if (CPU_IS_COLDFIRE) {
+               __asm__ volatile ("fsave %0"
+                                 : : "m" (*sc->sc_fpstate) : "memory");
+       } else {
+               __asm__ volatile (".chip 68k/68881\n\t"
+                                 "fsave %0\n\t"
+                                 ".chip 68k"
+                                 : : "m" (*sc->sc_fpstate) : "memory");
+       }
 
        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
                fpu_version = sc->sc_fpstate[0];
@@ -530,21 +598,35 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
                        if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
                                sc->sc_fpstate[0x38] |= 1 << 3;
                }
-               __asm__ volatile (".chip 68k/68881\n\t"
-                                 "fmovemx %%fp0-%%fp1,%0\n\t"
-                                 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-                                 ".chip 68k"
-                                 : "=m" (*sc->sc_fpregs),
-                                   "=m" (*sc->sc_fpcntl)
-                                 : /* no inputs */
-                                 : "memory");
+
+               if (CPU_IS_COLDFIRE) {
+                       __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
+                                         "fmovel %%fpcr,%1\n\t"
+                                         "fmovel %%fpsr,%2\n\t"
+                                         "fmovel %%fpiar,%3"
+                                         : "=m" (sc->sc_fpregs[0]),
+                                           "=m" (sc->sc_fpcntl[0]),
+                                           "=m" (sc->sc_fpcntl[1]),
+                                           "=m" (sc->sc_fpcntl[2])
+                                         : /* no inputs */
+                                         : "memory");
+               } else {
+                       __asm__ volatile (".chip 68k/68881\n\t"
+                                         "fmovemx %%fp0-%%fp1,%0\n\t"
+                                         "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+                                         ".chip 68k"
+                                         : "=m" (*sc->sc_fpregs),
+                                           "=m" (*sc->sc_fpcntl)
+                                         : /* no inputs */
+                                         : "memory");
+               }
        }
 }
 
 static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 {
        unsigned char fpstate[FPCONTEXT_SIZE];
-       int context_size = CPU_IS_060 ? 8 : 0;
+       int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
        int err = 0;
 
        if (FPU_IS_EMU) {
@@ -557,15 +639,19 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
                return err;
        }
 
-       __asm__ volatile (".chip 68k/68881\n\t"
-                         "fsave %0\n\t"
-                         ".chip 68k"
-                         : : "m" (*fpstate) : "memory");
+       if (CPU_IS_COLDFIRE) {
+               __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
+       } else {
+               __asm__ volatile (".chip 68k/68881\n\t"
+                                 "fsave %0\n\t"
+                                 ".chip 68k"
+                                 : : "m" (*fpstate) : "memory");
+       }
 
        err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
                fpregset_t fpregs;
-               if (!CPU_IS_060)
+               if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
                        context_size = fpstate[1];
                fpu_version = fpstate[0];
                if (CPU_IS_020_OR_030 &&
@@ -575,14 +661,27 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
                        if (*(unsigned short *) fpstate == 0x1f38)
                                fpstate[0x38] |= 1 << 3;
                }
-               __asm__ volatile (".chip 68k/68881\n\t"
-                                 "fmovemx %%fp0-%%fp7,%0\n\t"
-                                 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
-                                 ".chip 68k"
-                                 : "=m" (*fpregs.f_fpregs),
-                                   "=m" (*fpregs.f_fpcntl)
-                                 : /* no inputs */
-                                 : "memory");
+               if (CPU_IS_COLDFIRE) {
+                       __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
+                                         "fmovel %%fpcr,%1\n\t"
+                                         "fmovel %%fpsr,%2\n\t"
+                                         "fmovel %%fpiar,%3"
+                                         : "=m" (fpregs.f_fpregs[0]),
+                                           "=m" (fpregs.f_fpcntl[0]),
+                                           "=m" (fpregs.f_fpcntl[1]),
+                                           "=m" (fpregs.f_fpcntl[2])
+                                         : /* no inputs */
+                                         : "memory");
+               } else {
+                       __asm__ volatile (".chip 68k/68881\n\t"
+                                         "fmovemx %%fp0-%%fp7,%0\n\t"
+                                         "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+                                         ".chip 68k"
+                                         : "=m" (*fpregs.f_fpregs),
+                                           "=m" (*fpregs.f_fpcntl)
+                                         : /* no inputs */
+                                         : "memory");
+               }
                err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
                                    sizeof(fpregs));
        }
@@ -679,8 +778,7 @@ static inline void push_cache (unsigned long vaddr)
                                      "cpushl %%bc,(%0)\n\t"
                                      ".chip 68k"
                                      : : "a" (temp));
-       }
-       else {
+       } else if (!CPU_IS_COLDFIRE) {
                /*
                 * 68030/68020 have no writeback cache;
                 * still need to clear icache.
index a5cf40c26de58abc6c89caa9d800e51ed0095c07..75ab79b3bdeb76b7439f0337c30634115c7d7125 100644 (file)
@@ -1,4 +1,4 @@
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
 #include "time_mm.c"
 #else
 #include "time_no.c"
index 6623909f70e6f1d00facb0047865c12aa0510509..3ef0f7768dcd52a482acf04636a88fff3bfd335a 100644 (file)
@@ -26,6 +26,9 @@
 
 #define        TICK_SIZE (tick_nsec / 1000)
 
+/* machine dependent timer functions */
+void (*mach_gettod)(int*, int*, int*, int*, int*, int*);
+
 static inline int set_rtc_mmss(unsigned long nowtime)
 {
        if (mach_set_clock_mmss)
index 89362f2bb56a3e8bae88a364a1b06de7a68e8a61..a76452ca964ef6e538ee6d5181b4341c7679dd48 100644 (file)
@@ -706,6 +706,88 @@ create_atc_entry:
 #endif /* CPU_M68020_OR_M68030 */
 #endif /* !CONFIG_SUN3 */
 
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#include <asm/mcfmmu.h>
+
+/*
+ *     The following table converts the FS encoding of a ColdFire
+ *     exception stack frame into the error_code value needed by
+ *     do_fault.
+*/
+static const unsigned char fs_err_code[] = {
+       0,  /* 0000 */
+       0,  /* 0001 */
+       0,  /* 0010 */
+       0,  /* 0011 */
+       1,  /* 0100 */
+       0,  /* 0101 */
+       0,  /* 0110 */
+       0,  /* 0111 */
+       2,  /* 1000 */
+       3,  /* 1001 */
+       2,  /* 1010 */
+       0,  /* 1011 */
+       1,  /* 1100 */
+       1,  /* 1101 */
+       0,  /* 1110 */
+       0   /* 1111 */
+};
+
+static inline void access_errorcf(unsigned int fs, struct frame *fp)
+{
+       unsigned long mmusr, addr;
+       unsigned int err_code;
+       int need_page_fault;
+
+       mmusr = mmu_read(MMUSR);
+       addr = mmu_read(MMUAR);
+
+       /*
+        * error_code:
+        *      bit 0 == 0 means no page found, 1 means protection fault
+        *      bit 1 == 0 means read, 1 means write
+        */
+       switch (fs) {
+       case  5:  /* 0101 TLB opword X miss */
+               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
+               addr = fp->ptregs.pc;
+               break;
+       case  6:  /* 0110 TLB extension word X miss */
+               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
+               addr = fp->ptregs.pc + sizeof(long);
+               break;
+       case 10:  /* 1010 TLB W miss */
+               need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
+               break;
+       case 14: /* 1110 TLB R miss */
+               need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
+               break;
+       default:
+               /* 0000 Normal  */
+               /* 0001 Reserved */
+               /* 0010 Interrupt during debug service routine */
+               /* 0011 Reserved */
+               /* 0100 X Protection */
+               /* 0111 IFP in emulator mode */
+               /* 1000 W Protection*/
+               /* 1001 Write error*/
+               /* 1011 Reserved*/
+               /* 1100 R Protection*/
+               /* 1101 R Protection*/
+               /* 1111 OEP in emulator mode*/
+               need_page_fault = 1;
+               break;
+       }
+
+       if (need_page_fault) {
+               err_code = fs_err_code[fs];
+               if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
+                       err_code |= 2; /* bit1 - write, bit0 - protection */
+               do_page_fault(&fp->ptregs, addr, err_code);
+       }
+}
+#endif /* CONFIG_COLDFIRE CONFIG_MMU */
+
 asmlinkage void buserr_c(struct frame *fp)
 {
        /* Only set esp0 if coming from user mode */
@@ -716,6 +798,28 @@ asmlinkage void buserr_c(struct frame *fp)
        printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
 #endif
 
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+       if (CPU_IS_COLDFIRE) {
+               unsigned int fs;
+               fs = (fp->ptregs.vector & 0x3) |
+                       ((fp->ptregs.vector & 0xc00) >> 8);
+               switch (fs) {
+               case 0x5:
+               case 0x6:
+               case 0x7:
+               case 0x9:
+               case 0xa:
+               case 0xd:
+               case 0xe:
+               case 0xf:
+                       access_errorcf(fs, fp);
+                       return;
+               default:
+                       break;
+               }
+       }
+#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
+
        switch (fp->ptregs.format) {
 #if defined (CONFIG_M68060)
        case 4:                         /* 68060 access error */
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
new file mode 100644 (file)
index 0000000..8e66ccb
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ *     vmlinux.lds.S -- master linker script for m68knommu arch
+ *
+ *     (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
+ *
+ *     This linker script is equipped to build either ROM loaded or RAM
+ *     run kernels.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+#if defined(CONFIG_RAMKERNEL)
+#define        RAM_START       CONFIG_KERNELBASE
+#define        RAM_LENGTH      (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
+#define        TEXT            ram
+#define        DATA            ram
+#define        INIT            ram
+#define        BSSS            ram
+#endif
+#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
+#define        RAM_START       CONFIG_RAMBASE
+#define        RAM_LENGTH      CONFIG_RAMSIZE
+#define        ROMVEC_START    CONFIG_ROMVEC
+#define        ROMVEC_LENGTH   CONFIG_ROMVECSIZE
+#define        ROM_START       CONFIG_ROMSTART
+#define        ROM_LENGTH      CONFIG_ROMSIZE
+#define        TEXT            rom
+#define        DATA            ram
+#define        INIT            ram
+#define        BSSS            ram
+#endif
+
+#ifndef DATA_ADDR
+#define        DATA_ADDR
+#endif
+
+
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+
+MEMORY {
+       ram     : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
+#ifdef ROM_START
+       romvec  : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
+       rom     : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
+#endif
+}
+
+jiffies = jiffies_64 + 4;
+
+SECTIONS {
+
+#ifdef ROMVEC_START
+       . = ROMVEC_START ;
+       .romvec : {
+               __rom_start = . ;
+               _romvec = .;
+               *(.data..initvect)
+       } > romvec
+#endif
+
+       .text : {
+               _text = .;
+               _stext = . ;
+               HEAD_TEXT
+               TEXT_TEXT
+               SCHED_TEXT
+               LOCK_TEXT
+               *(.text..lock)
+               *(.fixup)
+
+               . = ALIGN(16);          /* Exception table              */
+               __start___ex_table = .;
+               *(__ex_table)
+               __stop___ex_table = .;
+
+               *(.rodata) *(.rodata.*)
+               *(__vermagic)           /* Kernel version magic */
+               *(.rodata1)
+               *(.rodata.str1.1)
+
+               /* Kernel symbol table: Normal symbols */
+               . = ALIGN(4);
+               __start___ksymtab = .;
+               *(SORT(___ksymtab+*))
+               __stop___ksymtab = .;
+
+               /* Kernel symbol table: GPL-only symbols */
+               __start___ksymtab_gpl = .;
+               *(SORT(___ksymtab_gpl+*))
+               __stop___ksymtab_gpl = .;
+
+               /* Kernel symbol table: Normal unused symbols */
+               __start___ksymtab_unused = .;
+               *(SORT(___ksymtab_unused+*))
+               __stop___ksymtab_unused = .;
+
+               /* Kernel symbol table: GPL-only unused symbols */
+               __start___ksymtab_unused_gpl = .;
+               *(SORT(___ksymtab_unused_gpl+*))
+               __stop___ksymtab_unused_gpl = .;
+
+               /* Kernel symbol table: GPL-future symbols */
+               __start___ksymtab_gpl_future = .;
+               *(SORT(___ksymtab_gpl_future+*))
+               __stop___ksymtab_gpl_future = .;
+
+               /* Kernel symbol table: Normal symbols */
+               __start___kcrctab = .;
+               *(SORT(___kcrctab+*))
+               __stop___kcrctab = .;
+
+               /* Kernel symbol table: GPL-only symbols */
+               __start___kcrctab_gpl = .;
+               *(SORT(___kcrctab_gpl+*))
+               __stop___kcrctab_gpl = .;
+
+               /* Kernel symbol table: Normal unused symbols */
+               __start___kcrctab_unused = .;
+               *(SORT(___kcrctab_unused+*))
+               __stop___kcrctab_unused = .;
+
+               /* Kernel symbol table: GPL-only unused symbols */
+               __start___kcrctab_unused_gpl = .;
+               *(SORT(___kcrctab_unused_gpl+*))
+               __stop___kcrctab_unused_gpl = .;
+
+               /* Kernel symbol table: GPL-future symbols */
+               __start___kcrctab_gpl_future = .;
+               *(SORT(___kcrctab_gpl_future+*))
+               __stop___kcrctab_gpl_future = .;
+
+               /* Kernel symbol table: strings */
+               *(__ksymtab_strings)
+
+               /* Built-in module parameters */
+               . = ALIGN(4) ;
+               __start___param = .;
+               *(__param)
+               __stop___param = .;
+
+               /* Built-in module versions */
+               . = ALIGN(4) ;
+               __start___modver = .;
+               *(__modver)
+               __stop___modver = .;
+
+               . = ALIGN(4) ;
+               _etext = . ;
+       } > TEXT
+
+       .data DATA_ADDR : {
+               . = ALIGN(4);
+               _sdata = . ;
+               DATA_DATA
+               CACHELINE_ALIGNED_DATA(32)
+               PAGE_ALIGNED_DATA(PAGE_SIZE)
+               *(.data..shared_aligned)
+               INIT_TASK_DATA(THREAD_SIZE)
+               _edata = . ;
+       } > DATA
+
+       .m68k_fixup : {
+               __start_fixup = .;
+               *(.m68k_fixup)
+               __stop_fixup = .;
+       } > DATA
+       NOTES > DATA
+
+       .init.text : {
+               . = ALIGN(PAGE_SIZE);
+               __init_begin = .;
+       } > INIT
+       INIT_TEXT_SECTION(PAGE_SIZE) > INIT
+       INIT_DATA_SECTION(16) > INIT
+       .init.data : {
+               . = ALIGN(PAGE_SIZE);
+               __init_end = .;
+       } > INIT
+
+       .bss : {
+               . = ALIGN(4);
+               _sbss = . ;
+               *(.bss)
+               *(COMMON)
+               . = ALIGN(4) ;
+               _ebss = . ;
+               _end = . ;
+       } > BSSS
+
+       DISCARDS
+}
+
index d0993594f558b3408317aede57c634c407ea580f..63407c836826842a9dc2648c84fb3c50ba5c4f79 100644 (file)
@@ -31,7 +31,9 @@ SECTIONS
 
   RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
 
+  _sbss = .;
   BSS_SECTION(0, 0, 0)
+  _ebss = .;
 
   _edata = .;                  /* End of data section */
 
index 8080469ee6c11c3e86ab59febaa4773790a2ffc7..ad0f46d64c0b66a8d6b6cf4f82ffeaeeabbda084 100644 (file)
@@ -44,7 +44,9 @@ __init_begin = .;
        . = ALIGN(PAGE_SIZE);
        __init_end = .;
 
+  _sbss = .;
   BSS_SECTION(0, 0, 0)
+  _ebss = .;
 
   _end = . ;
 
index 030dabf0bc538ddca7fd7ba6bdee96432f2bde56..69ec796388706bbda7dbccfaf6e5ffa1e6f9d4a8 100644 (file)
@@ -1,5 +1,14 @@
-#ifdef CONFIG_MMU
-#include "vmlinux.lds_mm.S"
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS (7);
+  data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
 #else
-#include "vmlinux.lds_no.S"
+#include "vmlinux-std.lds"
+#endif
+#else
+#include "vmlinux-nommu.lds"
 #endif
diff --git a/arch/m68k/kernel/vmlinux.lds_mm.S b/arch/m68k/kernel/vmlinux.lds_mm.S
deleted file mode 100644 (file)
index 99ba315..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-PHDRS
-{
-  text PT_LOAD FILEHDR PHDRS FLAGS (7);
-  data PT_LOAD FLAGS (7);
-}
-#ifdef CONFIG_SUN3
-#include "vmlinux-sun3.lds"
-#else
-#include "vmlinux-std.lds"
-#endif
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
deleted file mode 100644 (file)
index 4e23893..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-/*
- *     vmlinux.lds.S -- master linker script for m68knommu arch
- *
- *     (C) Copyright 2002-2006, Greg Ungerer <gerg@snapgear.com>
- *
- *     This linker script is equipped to build either ROM loaded or RAM
- *     run kernels.
- */
-
-#include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
-#include <asm/thread_info.h>
-
-#if defined(CONFIG_RAMKERNEL)
-#define        RAM_START       CONFIG_KERNELBASE
-#define        RAM_LENGTH      (CONFIG_RAMBASE + CONFIG_RAMSIZE - CONFIG_KERNELBASE)
-#define        TEXT            ram
-#define        DATA            ram
-#define        INIT            ram
-#define        BSSS            ram
-#endif
-#if defined(CONFIG_ROMKERNEL) || defined(CONFIG_HIMEMKERNEL)
-#define        RAM_START       CONFIG_RAMBASE
-#define        RAM_LENGTH      CONFIG_RAMSIZE
-#define        ROMVEC_START    CONFIG_ROMVEC
-#define        ROMVEC_LENGTH   CONFIG_ROMVECSIZE
-#define        ROM_START       CONFIG_ROMSTART
-#define        ROM_LENGTH      CONFIG_ROMSIZE
-#define        TEXT            rom
-#define        DATA            ram
-#define        INIT            ram
-#define        BSSS            ram
-#endif
-
-#ifndef DATA_ADDR
-#define        DATA_ADDR
-#endif
-
-
-OUTPUT_ARCH(m68k)
-ENTRY(_start)
-
-MEMORY {
-       ram     : ORIGIN = RAM_START, LENGTH = RAM_LENGTH
-#ifdef ROM_START
-       romvec  : ORIGIN = ROMVEC_START, LENGTH = ROMVEC_LENGTH
-       rom     : ORIGIN = ROM_START, LENGTH = ROM_LENGTH
-#endif
-}
-
-jiffies = jiffies_64 + 4;
-
-SECTIONS {
-
-#ifdef ROMVEC_START
-       . = ROMVEC_START ;
-       .romvec : {
-               __rom_start = . ;
-               _romvec = .;
-               *(.data..initvect)
-       } > romvec
-#endif
-
-       .text : {
-               _text = .;
-               _stext = . ;
-               HEAD_TEXT
-               TEXT_TEXT
-               SCHED_TEXT
-               LOCK_TEXT
-               *(.text..lock)
-
-               . = ALIGN(16);          /* Exception table              */
-               __start___ex_table = .;
-               *(__ex_table)
-               __stop___ex_table = .;
-
-               *(.rodata) *(.rodata.*)
-               *(__vermagic)           /* Kernel version magic */
-               *(.rodata1)
-               *(.rodata.str1.1)
-
-               /* Kernel symbol table: Normal symbols */
-               . = ALIGN(4);
-               __start___ksymtab = .;
-               *(SORT(___ksymtab+*))
-               __stop___ksymtab = .;
-
-               /* Kernel symbol table: GPL-only symbols */
-               __start___ksymtab_gpl = .;
-               *(SORT(___ksymtab_gpl+*))
-               __stop___ksymtab_gpl = .;
-
-               /* Kernel symbol table: Normal unused symbols */
-               __start___ksymtab_unused = .;
-               *(SORT(___ksymtab_unused+*))
-               __stop___ksymtab_unused = .;
-
-               /* Kernel symbol table: GPL-only unused symbols */
-               __start___ksymtab_unused_gpl = .;
-               *(SORT(___ksymtab_unused_gpl+*))
-               __stop___ksymtab_unused_gpl = .;
-
-               /* Kernel symbol table: GPL-future symbols */
-               __start___ksymtab_gpl_future = .;
-               *(SORT(___ksymtab_gpl_future+*))
-               __stop___ksymtab_gpl_future = .;
-
-               /* Kernel symbol table: Normal symbols */
-               __start___kcrctab = .;
-               *(SORT(___kcrctab+*))
-               __stop___kcrctab = .;
-
-               /* Kernel symbol table: GPL-only symbols */
-               __start___kcrctab_gpl = .;
-               *(SORT(___kcrctab_gpl+*))
-               __stop___kcrctab_gpl = .;
-
-               /* Kernel symbol table: Normal unused symbols */
-               __start___kcrctab_unused = .;
-               *(SORT(___kcrctab_unused+*))
-               __stop___kcrctab_unused = .;
-
-               /* Kernel symbol table: GPL-only unused symbols */
-               __start___kcrctab_unused_gpl = .;
-               *(SORT(___kcrctab_unused_gpl+*))
-               __stop___kcrctab_unused_gpl = .;
-
-               /* Kernel symbol table: GPL-future symbols */
-               __start___kcrctab_gpl_future = .;
-               *(SORT(___kcrctab_gpl_future+*))
-               __stop___kcrctab_gpl_future = .;
-
-               /* Kernel symbol table: strings */
-               *(__ksymtab_strings)
-
-               /* Built-in module parameters */
-               . = ALIGN(4) ;
-               __start___param = .;
-               *(__param)
-               __stop___param = .;
-
-               /* Built-in module versions */
-               . = ALIGN(4) ;
-               __start___modver = .;
-               *(__modver)
-               __stop___modver = .;
-
-               . = ALIGN(4) ;
-               _etext = . ;
-       } > TEXT
-
-       .data DATA_ADDR : {
-               . = ALIGN(4);
-               _sdata = . ;
-               DATA_DATA
-               CACHELINE_ALIGNED_DATA(32)
-               PAGE_ALIGNED_DATA(PAGE_SIZE)
-               *(.data..shared_aligned)
-               INIT_TASK_DATA(THREAD_SIZE)
-               _edata = . ;
-       } > DATA
-
-       .init.text : {
-               . = ALIGN(PAGE_SIZE);
-               __init_begin = .;
-       } > INIT
-       INIT_TEXT_SECTION(PAGE_SIZE) > INIT
-       INIT_DATA_SECTION(16) > INIT
-       .init.data : {
-               . = ALIGN(PAGE_SIZE);
-               __init_end = .;
-       } > INIT
-
-       .bss : {
-               . = ALIGN(4);
-               _sbss = . ;
-               *(.bss)
-               *(COMMON)
-               . = ALIGN(4) ;
-               _ebss = . ;
-               _end = . ;
-       } > BSSS
-
-       DISCARDS
-}
-
index 1a1bd9067e90a3ec284f240f42a041b2b28c2863..a9d782d34276c427cac22538cccefa47d8d24d11 100644 (file)
@@ -6,9 +6,11 @@
 lib-y  := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
           memcpy.o memset.o memmove.o
 
-ifdef CONFIG_MMU
-lib-y  += string.o uaccess.o checksum_mm.o
-else
-lib-y  += mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o checksum_no.o
+lib-$(CONFIG_MMU) += string.o uaccess.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += mulsi3.o divsi3.o udivsi3.o
+lib-$(CONFIG_CPU_HAS_NO_MULDIV64) += modsi3.o umodsi3.o
+
+ifndef CONFIG_GENERIC_CSUM
+lib-y  += checksum.o
 endif
 
diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c
new file mode 100644 (file)
index 0000000..6216f12
--- /dev/null
@@ -0,0 +1,425 @@
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             IP/TCP/UDP checksumming routines
+ *
+ * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
+ *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *             Tom May, <ftom@netcom.com>
+ *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
+ *             Lots of code moved from tcp.c and ip.c; see those files
+ *             for more names.
+ *
+ * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
+ *             Fixed some nasty bugs, causing some horrible crashes.
+ *             A: At some points, the sum (%0) was used as
+ *             length-counter instead of the length counter
+ *             (%1). Thanks to Roman Hodek for pointing this out.
+ *             B: GCC seems to mess up if one uses too many
+ *             data-registers to hold input values and one tries to
+ *             specify d0 and d1 as scratch registers. Letting gcc
+ *             choose these registers itself solves the problem.
+ *
+ *             This program is free software; you can redistribute it and/or
+ *             modify it under the terms of the GNU General Public License
+ *             as published by the Free Software Foundation; either version
+ *             2 of the License, or (at your option) any later version.
+ *
+ * 1998/8/31   Andreas Schwab:
+ *             Zero out rest of buffer on exception in
+ *             csum_partial_copy_from_user.
+ */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+
+/*
+ * computes a partial checksum, e.g. for TCP/UDP fragments
+ */
+
+__wsum csum_partial(const void *buff, int len, __wsum sum)
+{
+       unsigned long tmp1, tmp2;
+         /*
+          * Experiments with ethernet and slip connections show that buff
+          * is aligned on either a 2-byte or 4-byte boundary.
+          */
+       __asm__("movel %2,%3\n\t"
+               "btst #1,%3\n\t"        /* Check alignment */
+               "jeq 2f\n\t"
+               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
+               "jgt 1f\n\t"
+               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
+               "jra 4f\n"
+            "1:\t"
+               "addw %2@+,%0\n\t"      /* add first word to sum */
+               "clrl %3\n\t"
+               "addxl %3,%0\n"         /* add X bit */
+            "2:\t"
+               /* unrolled loop for the main part: do 8 longs at once */
+               "movel %1,%3\n\t"       /* save len in tmp1 */
+               "lsrl #5,%1\n\t"        /* len/32 */
+               "jeq 2f\n\t"            /* not enough... */
+               "subql #1,%1\n"
+            "1:\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "dbra %1,1b\n\t"
+               "clrl %4\n\t"
+               "addxl %4,%0\n\t"       /* add X bit */
+               "clrw %1\n\t"
+               "subql #1,%1\n\t"
+               "jcc 1b\n"
+            "2:\t"
+               "movel %3,%1\n\t"       /* restore len from tmp1 */
+               "andw #0x1c,%3\n\t"     /* number of rest longs */
+               "jeq 4f\n\t"
+               "lsrw #2,%3\n\t"
+               "subqw #1,%3\n"
+            "3:\t"
+               /* loop for rest longs */
+               "movel %2@+,%4\n\t"
+               "addxl %4,%0\n\t"
+               "dbra %3,3b\n\t"
+               "clrl %4\n\t"
+               "addxl %4,%0\n"         /* add X bit */
+            "4:\t"
+               /* now check for rest bytes that do not fit into longs */
+               "andw #3,%1\n\t"
+               "jeq 7f\n\t"
+               "clrl %4\n\t"           /* clear tmp2 for rest bytes */
+               "subqw #2,%1\n\t"
+               "jlt 5f\n\t"
+               "movew %2@+,%4\n\t"     /* have rest >= 2: get word */
+               "swap %4\n\t"           /* into bits 16..31 */
+               "tstw %1\n\t"           /* another byte? */
+               "jeq 6f\n"
+            "5:\t"
+               "moveb %2@,%4\n\t"      /* have odd rest: get byte */
+               "lslw #8,%4\n\t"        /* into bits 8..15; 16..31 untouched */
+            "6:\t"
+               "addl %4,%0\n\t"        /* now add rest long to sum */
+               "clrl %4\n\t"
+               "addxl %4,%0\n"         /* add X bit */
+            "7:\t"
+               : "=d" (sum), "=d" (len), "=a" (buff),
+                 "=&d" (tmp1), "=&d" (tmp2)
+               : "0" (sum), "1" (len), "2" (buff)
+           );
+       return(sum);
+}
+
+EXPORT_SYMBOL(csum_partial);
+
+
+/*
+ * copy from user space while checksumming, with exception handling.
+ */
+
+__wsum
+csum_partial_copy_from_user(const void __user *src, void *dst,
+                           int len, __wsum sum, int *csum_err)
+{
+       /*
+        * GCC doesn't like more than 10 operands for the asm
+        * statements so we have to use tmp2 for the error
+        * code.
+        */
+       unsigned long tmp1, tmp2;
+
+       __asm__("movel %2,%4\n\t"
+               "btst #1,%4\n\t"        /* Check alignment */
+               "jeq 2f\n\t"
+               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
+               "jgt 1f\n\t"
+               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
+               "jra 4f\n"
+            "1:\n"
+            "10:\t"
+               "movesw %2@+,%4\n\t"    /* add first word to sum */
+               "addw %4,%0\n\t"
+               "movew %4,%3@+\n\t"
+               "clrl %4\n\t"
+               "addxl %4,%0\n"         /* add X bit */
+            "2:\t"
+               /* unrolled loop for the main part: do 8 longs at once */
+               "movel %1,%4\n\t"       /* save len in tmp1 */
+               "lsrl #5,%1\n\t"        /* len/32 */
+               "jeq 2f\n\t"            /* not enough... */
+               "subql #1,%1\n"
+            "1:\n"
+            "11:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "12:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "13:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "14:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "15:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "16:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "17:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+            "18:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "dbra %1,1b\n\t"
+               "clrl %5\n\t"
+               "addxl %5,%0\n\t"       /* add X bit */
+               "clrw %1\n\t"
+               "subql #1,%1\n\t"
+               "jcc 1b\n"
+            "2:\t"
+               "movel %4,%1\n\t"       /* restore len from tmp1 */
+               "andw #0x1c,%4\n\t"     /* number of rest longs */
+               "jeq 4f\n\t"
+               "lsrw #2,%4\n\t"
+               "subqw #1,%4\n"
+            "3:\n"
+               /* loop for rest longs */
+            "19:\t"
+               "movesl %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "dbra %4,3b\n\t"
+               "clrl %5\n\t"
+               "addxl %5,%0\n"         /* add X bit */
+            "4:\t"
+               /* now check for rest bytes that do not fit into longs */
+               "andw #3,%1\n\t"
+               "jeq 7f\n\t"
+               "clrl %5\n\t"           /* clear tmp2 for rest bytes */
+               "subqw #2,%1\n\t"
+               "jlt 5f\n\t"
+            "20:\t"
+               "movesw %2@+,%5\n\t"    /* have rest >= 2: get word */
+               "movew %5,%3@+\n\t"
+               "swap %5\n\t"           /* into bits 16..31 */
+               "tstw %1\n\t"           /* another byte? */
+               "jeq 6f\n"
+            "5:\n"
+            "21:\t"
+               "movesb %2@,%5\n\t"     /* have odd rest: get byte */
+               "moveb %5,%3@+\n\t"
+               "lslw #8,%5\n\t"        /* into bits 8..15; 16..31 untouched */
+            "6:\t"
+               "addl %5,%0\n\t"        /* now add rest long to sum */
+               "clrl %5\n\t"
+               "addxl %5,%0\n\t"       /* add X bit */
+            "7:\t"
+               "clrl %5\n"             /* no error - clear return value */
+            "8:\n"
+               ".section .fixup,\"ax\"\n"
+               ".even\n"
+               /* If any exception occurs zero out the rest.
+                  Similarities with the code above are intentional :-) */
+            "90:\t"
+               "clrw %3@+\n\t"
+               "movel %1,%4\n\t"
+               "lsrl #5,%1\n\t"
+               "jeq 1f\n\t"
+               "subql #1,%1\n"
+            "91:\t"
+               "clrl %3@+\n"
+            "92:\t"
+               "clrl %3@+\n"
+            "93:\t"
+               "clrl %3@+\n"
+            "94:\t"
+               "clrl %3@+\n"
+            "95:\t"
+               "clrl %3@+\n"
+            "96:\t"
+               "clrl %3@+\n"
+            "97:\t"
+               "clrl %3@+\n"
+            "98:\t"
+               "clrl %3@+\n\t"
+               "dbra %1,91b\n\t"
+               "clrw %1\n\t"
+               "subql #1,%1\n\t"
+               "jcc 91b\n"
+            "1:\t"
+               "movel %4,%1\n\t"
+               "andw #0x1c,%4\n\t"
+               "jeq 1f\n\t"
+               "lsrw #2,%4\n\t"
+               "subqw #1,%4\n"
+            "99:\t"
+               "clrl %3@+\n\t"
+               "dbra %4,99b\n\t"
+            "1:\t"
+               "andw #3,%1\n\t"
+               "jeq 9f\n"
+            "100:\t"
+               "clrw %3@+\n\t"
+               "tstw %1\n\t"
+               "jeq 9f\n"
+            "101:\t"
+               "clrb %3@+\n"
+            "9:\t"
+#define STR(X) STR1(X)
+#define STR1(X) #X
+               "moveq #-" STR(EFAULT) ",%5\n\t"
+               "jra 8b\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               ".long 10b,90b\n"
+               ".long 11b,91b\n"
+               ".long 12b,92b\n"
+               ".long 13b,93b\n"
+               ".long 14b,94b\n"
+               ".long 15b,95b\n"
+               ".long 16b,96b\n"
+               ".long 17b,97b\n"
+               ".long 18b,98b\n"
+               ".long 19b,99b\n"
+               ".long 20b,100b\n"
+               ".long 21b,101b\n"
+               ".previous"
+               : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
+                 "=&d" (tmp1), "=d" (tmp2)
+               : "0" (sum), "1" (len), "2" (src), "3" (dst)
+           );
+
+       *csum_err = tmp2;
+
+       return(sum);
+}
+
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+
+
+/*
+ * copy from kernel space while checksumming, otherwise like csum_partial
+ */
+
+__wsum
+csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
+{
+       unsigned long tmp1, tmp2;
+       __asm__("movel %2,%4\n\t"
+               "btst #1,%4\n\t"        /* Check alignment */
+               "jeq 2f\n\t"
+               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
+               "jgt 1f\n\t"
+               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
+               "jra 4f\n"
+            "1:\t"
+               "movew %2@+,%4\n\t"     /* add first word to sum */
+               "addw %4,%0\n\t"
+               "movew %4,%3@+\n\t"
+               "clrl %4\n\t"
+               "addxl %4,%0\n"         /* add X bit */
+            "2:\t"
+               /* unrolled loop for the main part: do 8 longs at once */
+               "movel %1,%4\n\t"       /* save len in tmp1 */
+               "lsrl #5,%1\n\t"        /* len/32 */
+               "jeq 2f\n\t"            /* not enough... */
+               "subql #1,%1\n"
+            "1:\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "dbra %1,1b\n\t"
+               "clrl %5\n\t"
+               "addxl %5,%0\n\t"       /* add X bit */
+               "clrw %1\n\t"
+               "subql #1,%1\n\t"
+               "jcc 1b\n"
+            "2:\t"
+               "movel %4,%1\n\t"       /* restore len from tmp1 */
+               "andw #0x1c,%4\n\t"     /* number of rest longs */
+               "jeq 4f\n\t"
+               "lsrw #2,%4\n\t"
+               "subqw #1,%4\n"
+            "3:\t"
+               /* loop for rest longs */
+               "movel %2@+,%5\n\t"
+               "addxl %5,%0\n\t"
+               "movel %5,%3@+\n\t"
+               "dbra %4,3b\n\t"
+               "clrl %5\n\t"
+               "addxl %5,%0\n"         /* add X bit */
+            "4:\t"
+               /* now check for rest bytes that do not fit into longs */
+               "andw #3,%1\n\t"
+               "jeq 7f\n\t"
+               "clrl %5\n\t"           /* clear tmp2 for rest bytes */
+               "subqw #2,%1\n\t"
+               "jlt 5f\n\t"
+               "movew %2@+,%5\n\t"     /* have rest >= 2: get word */
+               "movew %5,%3@+\n\t"
+               "swap %5\n\t"           /* into bits 16..31 */
+               "tstw %1\n\t"           /* another byte? */
+               "jeq 6f\n"
+            "5:\t"
+               "moveb %2@,%5\n\t"      /* have odd rest: get byte */
+               "moveb %5,%3@+\n\t"
+               "lslw #8,%5\n"          /* into bits 8..15; 16..31 untouched */
+            "6:\t"
+               "addl %5,%0\n\t"        /* now add rest long to sum */
+               "clrl %5\n\t"
+               "addxl %5,%0\n"         /* add X bit */
+            "7:\t"
+               : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
+                 "=&d" (tmp1), "=&d" (tmp2)
+               : "0" (sum), "1" (len), "2" (src), "3" (dst)
+           );
+    return(sum);
+}
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/checksum_mm.c b/arch/m68k/lib/checksum_mm.c
deleted file mode 100644 (file)
index 6216f12..0000000
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IP/TCP/UDP checksumming routines
- *
- * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Tom May, <ftom@netcom.com>
- *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- *             Lots of code moved from tcp.c and ip.c; see those files
- *             for more names.
- *
- * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
- *             Fixed some nasty bugs, causing some horrible crashes.
- *             A: At some points, the sum (%0) was used as
- *             length-counter instead of the length counter
- *             (%1). Thanks to Roman Hodek for pointing this out.
- *             B: GCC seems to mess up if one uses too many
- *             data-registers to hold input values and one tries to
- *             specify d0 and d1 as scratch registers. Letting gcc
- *             choose these registers itself solves the problem.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- *
- * 1998/8/31   Andreas Schwab:
- *             Zero out rest of buffer on exception in
- *             csum_partial_copy_from_user.
- */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-
-/*
- * computes a partial checksum, e.g. for TCP/UDP fragments
- */
-
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
-       unsigned long tmp1, tmp2;
-         /*
-          * Experiments with ethernet and slip connections show that buff
-          * is aligned on either a 2-byte or 4-byte boundary.
-          */
-       __asm__("movel %2,%3\n\t"
-               "btst #1,%3\n\t"        /* Check alignment */
-               "jeq 2f\n\t"
-               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
-               "jgt 1f\n\t"
-               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
-               "jra 4f\n"
-            "1:\t"
-               "addw %2@+,%0\n\t"      /* add first word to sum */
-               "clrl %3\n\t"
-               "addxl %3,%0\n"         /* add X bit */
-            "2:\t"
-               /* unrolled loop for the main part: do 8 longs at once */
-               "movel %1,%3\n\t"       /* save len in tmp1 */
-               "lsrl #5,%1\n\t"        /* len/32 */
-               "jeq 2f\n\t"            /* not enough... */
-               "subql #1,%1\n"
-            "1:\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "dbra %1,1b\n\t"
-               "clrl %4\n\t"
-               "addxl %4,%0\n\t"       /* add X bit */
-               "clrw %1\n\t"
-               "subql #1,%1\n\t"
-               "jcc 1b\n"
-            "2:\t"
-               "movel %3,%1\n\t"       /* restore len from tmp1 */
-               "andw #0x1c,%3\n\t"     /* number of rest longs */
-               "jeq 4f\n\t"
-               "lsrw #2,%3\n\t"
-               "subqw #1,%3\n"
-            "3:\t"
-               /* loop for rest longs */
-               "movel %2@+,%4\n\t"
-               "addxl %4,%0\n\t"
-               "dbra %3,3b\n\t"
-               "clrl %4\n\t"
-               "addxl %4,%0\n"         /* add X bit */
-            "4:\t"
-               /* now check for rest bytes that do not fit into longs */
-               "andw #3,%1\n\t"
-               "jeq 7f\n\t"
-               "clrl %4\n\t"           /* clear tmp2 for rest bytes */
-               "subqw #2,%1\n\t"
-               "jlt 5f\n\t"
-               "movew %2@+,%4\n\t"     /* have rest >= 2: get word */
-               "swap %4\n\t"           /* into bits 16..31 */
-               "tstw %1\n\t"           /* another byte? */
-               "jeq 6f\n"
-            "5:\t"
-               "moveb %2@,%4\n\t"      /* have odd rest: get byte */
-               "lslw #8,%4\n\t"        /* into bits 8..15; 16..31 untouched */
-            "6:\t"
-               "addl %4,%0\n\t"        /* now add rest long to sum */
-               "clrl %4\n\t"
-               "addxl %4,%0\n"         /* add X bit */
-            "7:\t"
-               : "=d" (sum), "=d" (len), "=a" (buff),
-                 "=&d" (tmp1), "=&d" (tmp2)
-               : "0" (sum), "1" (len), "2" (buff)
-           );
-       return(sum);
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-
-/*
- * copy from user space while checksumming, with exception handling.
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst,
-                           int len, __wsum sum, int *csum_err)
-{
-       /*
-        * GCC doesn't like more than 10 operands for the asm
-        * statements so we have to use tmp2 for the error
-        * code.
-        */
-       unsigned long tmp1, tmp2;
-
-       __asm__("movel %2,%4\n\t"
-               "btst #1,%4\n\t"        /* Check alignment */
-               "jeq 2f\n\t"
-               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
-               "jgt 1f\n\t"
-               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
-               "jra 4f\n"
-            "1:\n"
-            "10:\t"
-               "movesw %2@+,%4\n\t"    /* add first word to sum */
-               "addw %4,%0\n\t"
-               "movew %4,%3@+\n\t"
-               "clrl %4\n\t"
-               "addxl %4,%0\n"         /* add X bit */
-            "2:\t"
-               /* unrolled loop for the main part: do 8 longs at once */
-               "movel %1,%4\n\t"       /* save len in tmp1 */
-               "lsrl #5,%1\n\t"        /* len/32 */
-               "jeq 2f\n\t"            /* not enough... */
-               "subql #1,%1\n"
-            "1:\n"
-            "11:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "12:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "13:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "14:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "15:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "16:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "17:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-            "18:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "dbra %1,1b\n\t"
-               "clrl %5\n\t"
-               "addxl %5,%0\n\t"       /* add X bit */
-               "clrw %1\n\t"
-               "subql #1,%1\n\t"
-               "jcc 1b\n"
-            "2:\t"
-               "movel %4,%1\n\t"       /* restore len from tmp1 */
-               "andw #0x1c,%4\n\t"     /* number of rest longs */
-               "jeq 4f\n\t"
-               "lsrw #2,%4\n\t"
-               "subqw #1,%4\n"
-            "3:\n"
-               /* loop for rest longs */
-            "19:\t"
-               "movesl %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "dbra %4,3b\n\t"
-               "clrl %5\n\t"
-               "addxl %5,%0\n"         /* add X bit */
-            "4:\t"
-               /* now check for rest bytes that do not fit into longs */
-               "andw #3,%1\n\t"
-               "jeq 7f\n\t"
-               "clrl %5\n\t"           /* clear tmp2 for rest bytes */
-               "subqw #2,%1\n\t"
-               "jlt 5f\n\t"
-            "20:\t"
-               "movesw %2@+,%5\n\t"    /* have rest >= 2: get word */
-               "movew %5,%3@+\n\t"
-               "swap %5\n\t"           /* into bits 16..31 */
-               "tstw %1\n\t"           /* another byte? */
-               "jeq 6f\n"
-            "5:\n"
-            "21:\t"
-               "movesb %2@,%5\n\t"     /* have odd rest: get byte */
-               "moveb %5,%3@+\n\t"
-               "lslw #8,%5\n\t"        /* into bits 8..15; 16..31 untouched */
-            "6:\t"
-               "addl %5,%0\n\t"        /* now add rest long to sum */
-               "clrl %5\n\t"
-               "addxl %5,%0\n\t"       /* add X bit */
-            "7:\t"
-               "clrl %5\n"             /* no error - clear return value */
-            "8:\n"
-               ".section .fixup,\"ax\"\n"
-               ".even\n"
-               /* If any exception occurs zero out the rest.
-                  Similarities with the code above are intentional :-) */
-            "90:\t"
-               "clrw %3@+\n\t"
-               "movel %1,%4\n\t"
-               "lsrl #5,%1\n\t"
-               "jeq 1f\n\t"
-               "subql #1,%1\n"
-            "91:\t"
-               "clrl %3@+\n"
-            "92:\t"
-               "clrl %3@+\n"
-            "93:\t"
-               "clrl %3@+\n"
-            "94:\t"
-               "clrl %3@+\n"
-            "95:\t"
-               "clrl %3@+\n"
-            "96:\t"
-               "clrl %3@+\n"
-            "97:\t"
-               "clrl %3@+\n"
-            "98:\t"
-               "clrl %3@+\n\t"
-               "dbra %1,91b\n\t"
-               "clrw %1\n\t"
-               "subql #1,%1\n\t"
-               "jcc 91b\n"
-            "1:\t"
-               "movel %4,%1\n\t"
-               "andw #0x1c,%4\n\t"
-               "jeq 1f\n\t"
-               "lsrw #2,%4\n\t"
-               "subqw #1,%4\n"
-            "99:\t"
-               "clrl %3@+\n\t"
-               "dbra %4,99b\n\t"
-            "1:\t"
-               "andw #3,%1\n\t"
-               "jeq 9f\n"
-            "100:\t"
-               "clrw %3@+\n\t"
-               "tstw %1\n\t"
-               "jeq 9f\n"
-            "101:\t"
-               "clrb %3@+\n"
-            "9:\t"
-#define STR(X) STR1(X)
-#define STR1(X) #X
-               "moveq #-" STR(EFAULT) ",%5\n\t"
-               "jra 8b\n"
-               ".previous\n"
-               ".section __ex_table,\"a\"\n"
-               ".long 10b,90b\n"
-               ".long 11b,91b\n"
-               ".long 12b,92b\n"
-               ".long 13b,93b\n"
-               ".long 14b,94b\n"
-               ".long 15b,95b\n"
-               ".long 16b,96b\n"
-               ".long 17b,97b\n"
-               ".long 18b,98b\n"
-               ".long 19b,99b\n"
-               ".long 20b,100b\n"
-               ".long 21b,101b\n"
-               ".previous"
-               : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
-                 "=&d" (tmp1), "=d" (tmp2)
-               : "0" (sum), "1" (len), "2" (src), "3" (dst)
-           );
-
-       *csum_err = tmp2;
-
-       return(sum);
-}
-
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-
-/*
- * copy from kernel space while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
-       unsigned long tmp1, tmp2;
-       __asm__("movel %2,%4\n\t"
-               "btst #1,%4\n\t"        /* Check alignment */
-               "jeq 2f\n\t"
-               "subql #2,%1\n\t"       /* buff%4==2: treat first word */
-               "jgt 1f\n\t"
-               "addql #2,%1\n\t"       /* len was == 2, treat only rest */
-               "jra 4f\n"
-            "1:\t"
-               "movew %2@+,%4\n\t"     /* add first word to sum */
-               "addw %4,%0\n\t"
-               "movew %4,%3@+\n\t"
-               "clrl %4\n\t"
-               "addxl %4,%0\n"         /* add X bit */
-            "2:\t"
-               /* unrolled loop for the main part: do 8 longs at once */
-               "movel %1,%4\n\t"       /* save len in tmp1 */
-               "lsrl #5,%1\n\t"        /* len/32 */
-               "jeq 2f\n\t"            /* not enough... */
-               "subql #1,%1\n"
-            "1:\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "dbra %1,1b\n\t"
-               "clrl %5\n\t"
-               "addxl %5,%0\n\t"       /* add X bit */
-               "clrw %1\n\t"
-               "subql #1,%1\n\t"
-               "jcc 1b\n"
-            "2:\t"
-               "movel %4,%1\n\t"       /* restore len from tmp1 */
-               "andw #0x1c,%4\n\t"     /* number of rest longs */
-               "jeq 4f\n\t"
-               "lsrw #2,%4\n\t"
-               "subqw #1,%4\n"
-            "3:\t"
-               /* loop for rest longs */
-               "movel %2@+,%5\n\t"
-               "addxl %5,%0\n\t"
-               "movel %5,%3@+\n\t"
-               "dbra %4,3b\n\t"
-               "clrl %5\n\t"
-               "addxl %5,%0\n"         /* add X bit */
-            "4:\t"
-               /* now check for rest bytes that do not fit into longs */
-               "andw #3,%1\n\t"
-               "jeq 7f\n\t"
-               "clrl %5\n\t"           /* clear tmp2 for rest bytes */
-               "subqw #2,%1\n\t"
-               "jlt 5f\n\t"
-               "movew %2@+,%5\n\t"     /* have rest >= 2: get word */
-               "movew %5,%3@+\n\t"
-               "swap %5\n\t"           /* into bits 16..31 */
-               "tstw %1\n\t"           /* another byte? */
-               "jeq 6f\n"
-            "5:\t"
-               "moveb %2@,%5\n\t"      /* have odd rest: get byte */
-               "moveb %5,%3@+\n\t"
-               "lslw #8,%5\n"          /* into bits 8..15; 16..31 untouched */
-            "6:\t"
-               "addl %5,%0\n\t"        /* now add rest long to sum */
-               "clrl %5\n\t"
-               "addxl %5,%0\n"         /* add X bit */
-            "7:\t"
-               : "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
-                 "=&d" (tmp1), "=&d" (tmp2)
-               : "0" (sum), "1" (len), "2" (src), "3" (dst)
-           );
-    return(sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
diff --git a/arch/m68k/lib/checksum_no.c b/arch/m68k/lib/checksum_no.c
deleted file mode 100644 (file)
index e4c6354..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             IP/TCP/UDP checksumming routines
- *
- * Authors:    Jorge Cwik, <jorge@laser.satlink.net>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Tom May, <ftom@netcom.com>
- *             Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
- *             Lots of code moved from tcp.c and ip.c; see those files
- *             for more names.
- *
- * 03/02/96    Jes Sorensen, Andreas Schwab, Roman Hodek:
- *             Fixed some nasty bugs, causing some horrible crashes.
- *             A: At some points, the sum (%0) was used as
- *             length-counter instead of the length counter
- *             (%1). Thanks to Roman Hodek for pointing this out.
- *             B: GCC seems to mess up if one uses too many
- *             data-registers to hold input values and one tries to
- *             specify d0 and d1 as scratch registers. Letting gcc choose these
- *      registers itself solves the problem.
- *
- *             This program is free software; you can redistribute it and/or
- *             modify it under the terms of the GNU General Public License
- *             as published by the Free Software Foundation; either version
- *             2 of the License, or (at your option) any later version.
- */
-/* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access kills, so most
-   of the assembly has to go. */
-
-#include <linux/module.h>
-#include <net/checksum.h>
-
-static inline unsigned short from32to16(unsigned long x)
-{
-       /* add up 16-bit and 16-bit for 16+c bit */
-       x = (x & 0xffff) + (x >> 16);
-       /* add up carry.. */
-       x = (x & 0xffff) + (x >> 16);
-       return x;
-}
-
-static unsigned long do_csum(const unsigned char * buff, int len)
-{
-       int odd, count;
-       unsigned long result = 0;
-
-       if (len <= 0)
-               goto out;
-       odd = 1 & (unsigned long) buff;
-       if (odd) {
-               result = *buff;
-               len--;
-               buff++;
-       }
-       count = len >> 1;               /* nr of 16-bit words.. */
-       if (count) {
-               if (2 & (unsigned long) buff) {
-                       result += *(unsigned short *) buff;
-                       count--;
-                       len -= 2;
-                       buff += 2;
-               }
-               count >>= 1;            /* nr of 32-bit words.. */
-               if (count) {
-                       unsigned long carry = 0;
-                       do {
-                               unsigned long w = *(unsigned long *) buff;
-                               count--;
-                               buff += 4;
-                               result += carry;
-                               result += w;
-                               carry = (w > result);
-                       } while (count);
-                       result += carry;
-                       result = (result & 0xffff) + (result >> 16);
-               }
-               if (len & 2) {
-                       result += *(unsigned short *) buff;
-                       buff += 2;
-               }
-       }
-       if (len & 1)
-               result += (*buff << 8);
-       result = from32to16(result);
-       if (odd)
-               result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
-out:
-       return result;
-}
-
-#ifdef CONFIG_COLDFIRE
-/*
- *     This is a version of ip_compute_csum() optimized for IP headers,
- *     which always checksum on 4 octet boundaries.
- */
-__sum16 ip_fast_csum(const void *iph, unsigned int ihl)
-{
-       return (__force __sum16)~do_csum(iph,ihl*4);
-}
-EXPORT_SYMBOL(ip_fast_csum);
-#endif
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
-       unsigned int result = do_csum(buff, len);
-
-       /* add in old sum, and carry.. */
-       result += (__force u32)sum;
-       if ((__force u32)sum > result)
-               result += 1;
-       return (__force __wsum)result;
-}
-
-EXPORT_SYMBOL(csum_partial);
-
-/*
- * copy from fs while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_from_user(const void __user *src, void *dst,
-                           int len, __wsum sum, int *csum_err)
-{
-       if (csum_err) *csum_err = 0;
-       memcpy(dst, (__force const void *)src, len);
-       return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-
-/*
- * copy from ds while checksumming, otherwise like csum_partial
- */
-
-__wsum
-csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
-{
-       memcpy(dst, src, len);
-       return csum_partial(dst, len, sum);
-}
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
index 13854ed8cd9ad226497e5ab6fe79d4636138a7a9..5664386338da851094d48191ac1f717aa5dfff69 100644 (file)
@@ -15,17 +15,17 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from,
        asm volatile ("\n"
                "       tst.l   %0\n"
                "       jeq     2f\n"
-               "1:     moves.l (%1)+,%3\n"
+               "1:     "MOVES".l       (%1)+,%3\n"
                "       move.l  %3,(%2)+\n"
                "       subq.l  #1,%0\n"
                "       jne     1b\n"
                "2:     btst    #1,%5\n"
                "       jeq     4f\n"
-               "3:     moves.w (%1)+,%3\n"
+               "3:     "MOVES".w       (%1)+,%3\n"
                "       move.w  %3,(%2)+\n"
                "4:     btst    #0,%5\n"
                "       jeq     6f\n"
-               "5:     moves.b (%1)+,%3\n"
+               "5:     "MOVES".b       (%1)+,%3\n"
                "       move.b  %3,(%2)+\n"
                "6:\n"
                "       .section .fixup,\"ax\"\n"
@@ -68,17 +68,17 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
                "       tst.l   %0\n"
                "       jeq     4f\n"
                "1:     move.l  (%1)+,%3\n"
-               "2:     moves.l %3,(%2)+\n"
+               "2:     "MOVES".l       %3,(%2)+\n"
                "3:     subq.l  #1,%0\n"
                "       jne     1b\n"
                "4:     btst    #1,%5\n"
                "       jeq     6f\n"
                "       move.w  (%1)+,%3\n"
-               "5:     moves.w %3,(%2)+\n"
+               "5:     "MOVES".w       %3,(%2)+\n"
                "6:     btst    #0,%5\n"
                "       jeq     8f\n"
                "       move.b  (%1)+,%3\n"
-               "7:     moves.b  %3,(%2)+\n"
+               "7:     "MOVES".b  %3,(%2)+\n"
                "8:\n"
                "       .section .fixup,\"ax\"\n"
                "       .even\n"
@@ -115,7 +115,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
                return count;
 
        asm volatile ("\n"
-               "1:     moves.b (%2)+,%4\n"
+               "1:     "MOVES".b       (%2)+,%4\n"
                "       move.b  %4,(%1)+\n"
                "       jeq     2f\n"
                "       subq.l  #1,%3\n"
@@ -152,7 +152,7 @@ long strnlen_user(const char __user *src, long n)
        asm volatile ("\n"
                "1:     subq.l  #1,%1\n"
                "       jmi     3f\n"
-               "2:     moves.b (%0)+,%2\n"
+               "2:     "MOVES".b       (%0)+,%2\n"
                "       tst.b   %2\n"
                "       jne     1b\n"
                "       jra     4f\n"
@@ -188,15 +188,15 @@ unsigned long __clear_user(void __user *to, unsigned long n)
        asm volatile ("\n"
                "       tst.l   %0\n"
                "       jeq     3f\n"
-               "1:     moves.l %2,(%1)+\n"
+               "1:     "MOVES".l       %2,(%1)+\n"
                "2:     subq.l  #1,%0\n"
                "       jne     1b\n"
                "3:     btst    #1,%4\n"
                "       jeq     5f\n"
-               "4:     moves.w %2,(%1)+\n"
+               "4:     "MOVES".w       %2,(%1)+\n"
                "5:     btst    #0,%4\n"
                "       jeq     7f\n"
-               "6:     moves.b %2,(%1)\n"
+               "6:     "MOVES".b       %2,(%1)\n"
                "7:\n"
                "       .section .fixup,\"ax\"\n"
                "       .even\n"
index 09cadf1058d5c6b948a8dd3b1016c6f6bc79068f..cfbf3205724a03c011c3f809716a16830f8c80b0 100644 (file)
@@ -4,6 +4,8 @@
 
 obj-y  := init.o
 
-obj-$(CONFIG_MMU)              += cache.o fault.o hwtest.o
-obj-$(CONFIG_MMU_MOTOROLA)     += kmap.o memory.o motorola.o
-obj-$(CONFIG_MMU_SUN3)         += sun3kmap.o sun3mmu.o
+obj-$(CONFIG_MMU)              += cache.o fault.o
+obj-$(CONFIG_MMU_MOTOROLA)     += kmap.o memory.o motorola.o hwtest.o
+obj-$(CONFIG_MMU_SUN3)         += sun3kmap.o sun3mmu.o hwtest.o
+obj-$(CONFIG_MMU_COLDFIRE)     += kmap.o memory.o mcfmmu.o
+
index 5437fff5fe076958bf296d4b378b8da0bca97df1..95d0bf66e2e22e72b272e11b48f5d41914b5be38 100644 (file)
@@ -74,8 +74,16 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
 /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
 void flush_icache_range(unsigned long address, unsigned long endaddr)
 {
-
-       if (CPU_IS_040_OR_060) {
+       if (CPU_IS_COLDFIRE) {
+               unsigned long start, end;
+               start = address & ICACHE_SET_MASK;
+               end = endaddr & ICACHE_SET_MASK;
+               if (start > end) {
+                       flush_cf_icache(0, end);
+                       end = ICACHE_MAX_ADDR;
+               }
+               flush_cf_icache(start, end);
+       } else if (CPU_IS_040_OR_060) {
                address &= PAGE_MASK;
 
                do {
@@ -100,7 +108,17 @@ EXPORT_SYMBOL(flush_icache_range);
 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
                             unsigned long addr, int len)
 {
-       if (CPU_IS_040_OR_060) {
+       if (CPU_IS_COLDFIRE) {
+               unsigned long start, end;
+               start = addr & ICACHE_SET_MASK;
+               end = (addr + len) & ICACHE_SET_MASK;
+               if (start > end) {
+                       flush_cf_icache(0, end);
+                       end = ICACHE_MAX_ADDR;
+               }
+               flush_cf_icache(start, end);
+
+       } else if (CPU_IS_040_OR_060) {
                asm volatile ("nop\n\t"
                              ".chip 68040\n\t"
                              "cpushp %%bc,(%0)\n\t"
index bbe525434ccb677fe19af015a1b21b3d057095c4..89f3b203814b5cc4cf51038429271cb4c27df408 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/system.h>
+#include <asm/traps.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
 #ifdef CONFIG_ATARI
@@ -75,6 +76,38 @@ extern void init_pointer_table(unsigned long ptable);
 
 extern pmd_t *zero_pgtable;
 
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+#define VECTORS        &vectors[0]
+#else
+#define VECTORS        _ramvec
+#endif
+
+void __init print_memmap(void)
+{
+#define UL(x) ((unsigned long) (x))
+#define MLK(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 10
+#define MLM(b, t) UL(b), UL(t), (UL(t) - UL(b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), 1024)
+
+       pr_notice("Virtual kernel memory layout:\n"
+               "    vector  : 0x%08lx - 0x%08lx   (%4ld KiB)\n"
+               "    kmap    : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+               "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+               "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MiB)\n"
+               "      .init : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+               "      .text : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+               "      .data : 0x%p" " - 0x%p" "   (%4d KiB)\n"
+               "      .bss  : 0x%p" " - 0x%p" "   (%4d KiB)\n",
+               MLK(VECTORS, VECTORS + 256),
+               MLM(KMAP_START, KMAP_END),
+               MLM(VMALLOC_START, VMALLOC_END),
+               MLM(PAGE_OFFSET, (unsigned long)high_memory),
+               MLK_ROUNDUP(__init_begin, __init_end),
+               MLK_ROUNDUP(_stext, _etext),
+               MLK_ROUNDUP(_sdata, _edata),
+               MLK_ROUNDUP(_sbss, _ebss));
+}
+
 void __init mem_init(void)
 {
        pg_data_t *pgdat;
@@ -106,7 +139,7 @@ void __init mem_init(void)
                }
        }
 
-#ifndef CONFIG_SUN3
+#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
        /* insert pointer tables allocated so far into the tablelist */
        init_pointer_table((unsigned long)kernel_pg_dir);
        for (i = 0; i < PTRS_PER_PGD; i++) {
@@ -125,6 +158,7 @@ void __init mem_init(void)
               codepages << (PAGE_SHIFT-10),
               datapages << (PAGE_SHIFT-10),
               initpages << (PAGE_SHIFT-10));
+       print_memmap();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
index 69345849454b9c03991258aa1eff409472896e2b..1cc2bed4c3ddd00f746869cd6e164e6cd12ea914 100644 (file)
@@ -171,7 +171,8 @@ void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cachefla
                        break;
                }
        } else {
-               physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+               physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED |
+                            _PAGE_DIRTY | _PAGE_READWRITE);
                switch (cacheflag) {
                case IOMAP_NOCACHE_SER:
                case IOMAP_NOCACHE_NONSER:
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
new file mode 100644 (file)
index 0000000..babd5a9
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Based upon linux/arch/m68k/mm/sun3mmu.c
+ * Based upon linux/arch/ppc/mm/mmu_context.c
+ *
+ * Implementations of mm routines specific to the Coldfire MMU.
+ *
+ * Copyright (c) 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/mcf_pgalloc.h>
+#include <asm/tlbflush.h>
+
+#define KMAPAREA(x)    ((x >= VMALLOC_START) && (x < KMAP_END))
+
+mm_context_t next_mmu_context;
+unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+atomic_t nr_free_contexts;
+struct mm_struct *context_mm[LAST_CONTEXT+1];
+extern unsigned long num_pages;
+
+void free_initmem(void)
+{
+}
+
+/*
+ * ColdFire paging_init derived from sun3.
+ */
+void __init paging_init(void)
+{
+       pgd_t *pg_dir;
+       pte_t *pg_table;
+       unsigned long address, size;
+       unsigned long next_pgtable, bootmem_end;
+       unsigned long zones_size[MAX_NR_ZONES];
+       enum zone_type zone;
+       int i;
+
+       empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE);
+       memset((void *) empty_zero_page, 0, PAGE_SIZE);
+
+       pg_dir = swapper_pg_dir;
+       memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+       size = num_pages * sizeof(pte_t);
+       size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
+       next_pgtable = (unsigned long) alloc_bootmem_pages(size);
+
+       bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
+       pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
+
+       address = PAGE_OFFSET;
+       while (address < (unsigned long)high_memory) {
+               pg_table = (pte_t *) next_pgtable;
+               next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
+               pgd_val(*pg_dir) = (unsigned long) pg_table;
+               pg_dir++;
+
+               /* now change pg_table to kernel virtual addresses */
+               for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
+                       pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
+                       if (address >= (unsigned long) high_memory)
+                               pte_val(pte) = 0;
+
+                       set_pte(pg_table, pte);
+                       address += PAGE_SIZE;
+               }
+       }
+
+       current->mm = NULL;
+
+       for (zone = 0; zone < MAX_NR_ZONES; zone++)
+               zones_size[zone] = 0x0;
+       zones_size[ZONE_DMA] = num_pages;
+       free_area_init(zones_size);
+}
+
+int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
+{
+       unsigned long flags, mmuar;
+       struct mm_struct *mm;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int asid;
+
+       local_irq_save(flags);
+
+       mmuar = (dtlb) ? mmu_read(MMUAR) :
+               regs->pc + (extension_word * sizeof(long));
+
+       mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
+       if (!mm) {
+               local_irq_restore(flags);
+               return -1;
+       }
+
+       pgd = pgd_offset(mm, mmuar);
+       if (pgd_none(*pgd))  {
+               local_irq_restore(flags);
+               return -1;
+       }
+
+       pmd = pmd_offset(pgd, mmuar);
+       if (pmd_none(*pmd)) {
+               local_irq_restore(flags);
+               return -1;
+       }
+
+       pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
+                               : pte_offset_map(pmd, mmuar);
+       if (pte_none(*pte) || !pte_present(*pte)) {
+               local_irq_restore(flags);
+               return -1;
+       }
+
+       if (write) {
+               if (!pte_write(*pte)) {
+                       local_irq_restore(flags);
+                       return -1;
+               }
+               set_pte(pte, pte_mkdirty(*pte));
+       }
+
+       set_pte(pte, pte_mkyoung(*pte));
+       asid = mm->context & 0xff;
+       if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
+               set_pte(pte, pte_wrprotect(*pte));
+
+       mmu_write(MMUTR, (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) |
+               (((int)(pte->pte) & (int)CF_PAGE_MMUTR_MASK)
+               >> CF_PAGE_MMUTR_SHIFT) | MMUTR_V);
+
+       mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
+               ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
+
+       if (dtlb)
+               mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
+       else
+               mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
+
+       local_irq_restore(flags);
+       return 0;
+}
+
+/*
+ * Initialize the context management stuff.
+ * The following was taken from arch/ppc/mmu_context.c
+ */
+void __init mmu_context_init(void)
+{
+       /*
+        * Some processors have too few contexts to reserve one for
+        * init_mm, and require using context 0 for a normal task.
+        * Other processors reserve the use of context zero for the kernel.
+        * This code assumes FIRST_CONTEXT < 32.
+        */
+       context_map[0] = (1 << FIRST_CONTEXT) - 1;
+       next_mmu_context = FIRST_CONTEXT;
+       atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
+}
+
+/*
+ * Steal a context from a task that has one at the moment.
+ * This is only used on 8xx and 4xx and we presently assume that
+ * they don't do SMP.  If they do then thicfpgalloc.hs will have to check
+ * whether the MM we steal is in use.
+ * We also assume that this is only used on systems that don't
+ * use an MMU hash table - this is true for 8xx and 4xx.
+ * This isn't an LRU system, it just frees up each context in
+ * turn (sort-of pseudo-random replacement :).  This would be the
+ * place to implement an LRU scheme if anyone was motivated to do it.
+ *  -- paulus
+ */
+void steal_context(void)
+{
+       struct mm_struct *mm;
+       /*
+        * free up context `next_mmu_context'
+        * if we shouldn't free context 0, don't...
+        */
+       if (next_mmu_context < FIRST_CONTEXT)
+               next_mmu_context = FIRST_CONTEXT;
+       mm = context_mm[next_mmu_context];
+       flush_tlb_mm(mm);
+       destroy_context(mm);
+}
+
index 34c77ce24fba5b54a46c63449620259a2a9fdd93..a5dbb74fe1de289ed696dfbe100dfbcb328343bb 100644 (file)
@@ -203,7 +203,9 @@ static inline void pushcl040(unsigned long paddr)
 
 void cache_clear (unsigned long paddr, int len)
 {
-    if (CPU_IS_040_OR_060) {
+    if (CPU_IS_COLDFIRE) {
+       flush_cf_bcache(0, DCACHE_MAX_ADDR);
+    } else if (CPU_IS_040_OR_060) {
        int tmp;
 
        /*
@@ -250,7 +252,9 @@ EXPORT_SYMBOL(cache_clear);
 
 void cache_push (unsigned long paddr, int len)
 {
-    if (CPU_IS_040_OR_060) {
+    if (CPU_IS_COLDFIRE) {
+       flush_cf_bcache(0, DCACHE_MAX_ADDR);
+    } else if (CPU_IS_040_OR_060) {
        int tmp = PAGE_SIZE;
 
        /*
index 78130984db956045c4e3371d138f8c7e70070344..ee043540bfa2af220c89e47ec640e196c1f2444c 100644 (file)
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <asm/pgalloc.h>
 #include <asm/machdep.h>
 #include <asm/coldfire.h>
 #include <asm/m54xxsim.h>
 #include <asm/mcfuart.h>
 #include <asm/m54xxgpt.h>
+#ifdef CONFIG_MMU
+#include <asm/mmu_context.h>
+#endif
 
 /***************************************************************************/
 
@@ -95,8 +101,49 @@ static void mcf54xx_reset(void)
 
 /***************************************************************************/
 
+#ifdef CONFIG_MMU
+
+unsigned long num_pages;
+
+static void __init mcf54xx_bootmem_alloc(void)
+{
+       unsigned long start_pfn;
+       unsigned long memstart;
+
+       /* _rambase and _ramend will be naturally page aligned */
+       m68k_memory[0].addr = _rambase;
+       m68k_memory[0].size = _ramend - _rambase;
+
+       /* compute total pages in system */
+       num_pages = (_ramend - _rambase) >> PAGE_SHIFT;
+
+       /* page numbers */
+       memstart = PAGE_ALIGN(_ramstart);
+       min_low_pfn = _rambase >> PAGE_SHIFT;
+       start_pfn = memstart >> PAGE_SHIFT;
+       max_low_pfn = _ramend >> PAGE_SHIFT;
+       high_memory = (void *)_ramend;
+
+       m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
+       module_fixup(NULL, __start_fixup, __stop_fixup);
+
+       /* setup bootmem data */
+       m68k_setup_node(0);
+       memstart += init_bootmem_node(NODE_DATA(0), start_pfn,
+               min_low_pfn, max_low_pfn);
+       free_bootmem_node(NODE_DATA(0), memstart, _ramend - memstart);
+}
+
+#endif /* CONFIG_MMU */
+
+/***************************************************************************/
+
 void __init config_BSP(char *commandp, int size)
 {
+#ifdef CONFIG_MMU
+       mcf54xx_bootmem_alloc();
+       mmu_context_init();
+#endif
        mach_reset = mcf54xx_reset;
        m54xx_uarts_init();
 }
index e4dfd8fde068b9ed9738590d13db02ddb1bdc0f0..ee61bf84d4a0a32fbf9bbbe80328c309d4fe22ef 100644 (file)
@@ -14,12 +14,8 @@ obj-$(CONFIG_M68328) += config.o
 obj-$(CONFIG_ROM)      += romvec.o
 
 extra-y                        := head.o
-extra-$(CONFIG_M68328) += bootlogo.rh head.o
-
-$(obj)/bootlogo.rh: $(src)/bootlogo.h
-       perl $(src)/bootlogo.pl < $(src)/bootlogo.h > $(obj)/bootlogo.rh
 
 $(obj)/head.o: $(obj)/$(head-y)
        ln -sf $(head-y) $(obj)/head.o
 
-clean-files := $(obj)/bootlogo.rh $(obj)/head.o $(head-y)
+clean-files := $(obj)/head.o $(head-y)
index 67bc2c17386edfb609aa5bf0970ad1685e5f7139..b896c933fafce4182b3af263690587f72aba2336 100644 (file)
@@ -1,6 +1,6 @@
 #define bootlogo_width 160
 #define bootlogo_height 160
-static unsigned char bootlogo_bits[] = {
+unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = {
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x01, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x40, 0x55, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
diff --git a/arch/m68k/platform/68328/bootlogo.pl b/arch/m68k/platform/68328/bootlogo.pl
deleted file mode 100644 (file)
index b04ae3f..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-
-$_ = join("", <>);
-
-s/(0x[0-9a-f]{2})/sprintf("0x%.2x",ord(pack("b8",unpack("B8",chr(hex($1))))))/gei;
-
-s/^ /  .byte /gm;
-s/[,};]+$//gm;
-s/^static.*//gm;
-
-print $_;
index a7bd21deb00fd2366358185875c97277e57fe8d3..d70bf2623db11316f560e159572a1bb4597e4fb6 100644 (file)
@@ -20,6 +20,9 @@
 #include <asm/system.h>
 #include <asm/machdep.h>
 #include <asm/MC68328.h>
+#if defined(CONFIG_PILOT) || defined(CONFIG_INIT_LCD)
+#include "bootlogo.h"
+#endif
 
 /***************************************************************************/
 
index aecff532b3432cfa731e69cc525522f42e147bfc..2ebfd642081855acb3af3e35dac9066ed886038c 100644 (file)
 .global _ramstart
 .global _ramend
 
-.global penguin_bits
-
-#ifdef CONFIG_PILOT
-
-#define IMR 0xFFFFF304
-
-       .data
-       .align 16
-
-penguin_bits:  
-#include "bootlogo.rh"
-
-#endif
+.global bootlogo_bits
 
 /*****************************************************************************/
 
@@ -185,9 +173,6 @@ L3:
        moveq   #79, %d7
        movel   %d0, _ramend
 
-       movel   %a3, %d0
-       movel   %d0, rom_length
-
        pea     0
        pea     env
        pea     %sp@(4)
@@ -196,7 +181,7 @@ L3:
        DBG_PUTC('H')
 
 #ifdef CONFIG_PILOT
-       movel   #penguin_bits, 0xFFFFFA00
+       movel   #bootlogo_bits, 0xFFFFFA00
        moveb   #10, 0xFFFFFA05
        movew   #160, 0xFFFFFA08
        movew   #160, 0xFFFFFA0A
index 6ec77d3ea0b384d981cc7012b90ee3fe6fb71a0a..a5ff96d0295f069e28ded27ace22a5d1296e6cff 100644 (file)
@@ -8,7 +8,7 @@
        .global _ramend
 
 #ifdef CONFIG_INIT_LCD
-       .global splash_bits
+       .global bootlogo_bits
 #endif
 
        .data
@@ -29,16 +29,11 @@ _ramend:
 
 #define        RAMEND  (CONFIG_RAMBASE + CONFIG_RAMSIZE)
 
-#ifdef CONFIG_INIT_LCD
-splash_bits:
-#include "bootlogo.rh"
-#endif
-       
        .text
 _start:
 _stext:        movew   #0x2700,%sr
 #ifdef CONFIG_INIT_LCD
-       movel   #splash_bits, 0xfffffA00 /* LSSA */
+       movel   #bootlogo_bits, 0xfffffA00 /* LSSA */
        moveb   #0x28,   0xfffffA05     /* LVPW */
        movew   #0x280,  0xFFFFFa08     /* LXMAX */
        movew   #0x1df,  0xFFFFFa0a     /* LYMAX */
index 3157461a8d1da8c6cd07af7709abd17ba03b3a3b..863889fc31c9c1fe2b0ad7a9b460a8627a14a56c 100644 (file)
@@ -54,7 +54,6 @@ sw_usp:
 .globl ret_from_signal
 .globl sys_call_table
 .globl inthandler
-.globl fasthandler
 
 enosys:
        mov.l   #sys_ni_syscall,%d3
@@ -63,6 +62,7 @@ enosys:
 ENTRY(system_call)
        SAVE_ALL_SYS
        move    #0x2000,%sr             /* enable intrs again */
+       GET_CURRENT(%d2)
 
        cmpl    #NR_syscalls,%d0
        jcc     enosys
@@ -166,6 +166,7 @@ Lsignal_return:
  */
 ENTRY(inthandler)
        SAVE_ALL_INT
+       GET_CURRENT(%d2)
 
        movew   %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */
        andl    #0x03fc,%d0             /* mask out vector only */
@@ -191,7 +192,9 @@ ENTRY(resume)
        movel   %sp,%a0@(TASK_THREAD+THREAD_KSP) /* save kernel stack pointer */
        RDUSP                                    /* movel %usp,%a3 */
        movel   %a3,%a0@(TASK_THREAD+THREAD_USP) /* save thread user stack */
-
+#ifdef CONFIG_MMU
+       movel   %a1,%a2                          /* set new current */
+#endif
        movel   %a1@(TASK_THREAD+THREAD_USP),%a3 /* restore thread user stack */
        WRUSP                                    /* movel %a3,%usp */
        movel   %a1@(TASK_THREAD+THREAD_KSP),%sp /* restore new kernel stack */
index c33483824a2eed3573c6d1c8b1c07545b136a800..38f04a3f620713e5ea14cc5a100b5f41ad299d51 100644 (file)
@@ -3,7 +3,7 @@
 /*
  *     head.S -- common startup code for ColdFire CPUs.
  *
- *     (C) Copyright 1999-2010, Greg Ungerer <gerg@snapgear.com>.
+ *     (C) Copyright 1999-2011, Greg Ungerer <gerg@snapgear.com>.
  */
 
 /*****************************************************************************/
@@ -13,6 +13,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/coldfire.h>
 #include <asm/mcfsim.h>
+#include <asm/mcfmmu.h>
 #include <asm/thread_info.h>
 
 /*****************************************************************************/
@@ -135,6 +136,14 @@ _init_sp:
 
 __HEAD
 
+#ifdef CONFIG_MMU
+_start0:
+       jmp     _start
+.global kernel_pg_dir
+.equ   kernel_pg_dir,_start0
+.equ   .,_start0+0x1000
+#endif
+
 /*
  *     This is the codes first entry point. This is where it all
  *     begins...
@@ -143,6 +152,9 @@ __HEAD
 _start:
        nop                                     /* filler */
        movew   #0x2700, %sr                    /* no interrupts */
+       movel   #CACHE_INIT,%d0                 /* disable cache */
+       movec   %d0,%CACR
+       nop
 #if defined(CONFIG_UBOOT)
        movel   %sp,_init_sp                    /* save initial stack pointer */
 #endif
@@ -176,9 +188,6 @@ _start:
         *      it is very similar. Define the exact settings in the headers
         *      then the code here is the same for all.
         */
-       movel   #CACHE_INIT,%d0                 /* invalidate whole cache */
-       movec   %d0,%CACR
-       nop
        movel   #ACR0_MODE,%d0                  /* set RAM region for caching */
        movec   %d0,%ACR0
        movel   #ACR1_MODE,%d0                  /* anything else to cache? */
@@ -193,6 +202,26 @@ _start:
        movec   %d0,%CACR
        nop
 
+#ifdef CONFIG_MMU
+       /*
+        *      Identity mapping for the kernel region.
+        */
+       movel   #(MMUBASE+1),%d0                /* enable MMUBAR registers */
+       movec   %d0,%MMUBAR
+       movel   #MMUOR_CA,%d0                   /* clear TLB entries */
+       movel   %d0,MMUOR
+       movel   #0,%d0                          /* set ASID to 0 */
+       movec   %d0,%asid
+
+       movel   #MMUCR_EN,%d0                   /* Enable the identity map */
+       movel   %d0,MMUCR
+       nop                                     /* sync i-pipeline */
+
+       movel   #_vstart,%a0                    /* jump to "virtual" space */
+       jmp     %a0@
+_vstart:
+#endif /* CONFIG_MMU */
+
 #ifdef CONFIG_ROMFS_FS
        /*
         *      Move ROM filesystem above bss :-)
@@ -238,6 +267,22 @@ _clear_bss:
        lea     init_thread_union,%a0
        lea     THREAD_SIZE(%a0),%sp
 
+#ifdef CONFIG_MMU
+.global m68k_cputype
+.global m68k_mmutype
+.global m68k_fputype
+.global m68k_machtype
+       movel   #CPU_COLDFIRE,%d0
+       movel   %d0,m68k_cputype                /* Mark us as a ColdFire */
+       movel   #MMU_COLDFIRE,%d0
+       movel   %d0,m68k_mmutype
+       movel   #FPU_COLDFIRE,%d0
+       movel   %d0,m68k_fputype
+       movel   #MACH_M54XX,%d0
+       movel   %d0,m68k_machtype               /* Mark us as a 54xx machine */
+       lea     init_task,%a2                   /* Set "current" init task */
+#endif
+
        /*
         *      Assember start up done, start code proper.
         */
index b7f822b552bbdba06474d78dfa2b89832d15995c..54e1452f853a76162f26ca4f100a51b4982db611 100644 (file)
@@ -98,16 +98,19 @@ static struct irqaction mcfslt_timer_irq = {
 static cycle_t mcfslt_read_clk(struct clocksource *cs)
 {
        unsigned long flags;
-       u32 cycles;
-       u16 scnt;
+       u32 cycles, scnt;
 
        local_irq_save(flags);
        scnt = __raw_readl(TA(MCFSLT_SCNT));
        cycles = mcfslt_cnt;
+       if (__raw_readl(TA(MCFSLT_SSR)) & MCFSLT_SSR_TE) {
+               cycles += mcfslt_cycles_per_jiffy;
+               scnt = __raw_readl(TA(MCFSLT_SCNT));
+       }
        local_irq_restore(flags);
 
        /* subtract because slice timers count down */
-       return cycles - scnt;
+       return cycles + ((mcfslt_cycles_per_jiffy - 1) - scnt);
 }
 
 static struct clocksource mcfslt_clk = {