'm' all linux/synclink.h conflict!
'm' 00-1F net/irda/irmod.h conflict!
'n' 00-7F linux/ncp_fs.h
+'n' 80-8F linux/nilfs2_fs.h NILFS2
'n' E0-FF video/matrox.h matroxfb
'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
libata.dma=4 Compact Flash DMA only
Combinations also work, so libata.dma=3 enables DMA
for disks and CDROMs, but not CFs.
+
+ libata.ignore_hpa= [LIBATA] Ignore HPA limit
+ libata.ignore_hpa=0 keep BIOS limits (default)
+ libata.ignore_hpa=1 ignore limits, using full disk
libata.noacpi [LIBATA] Disables use of ACPI in libata suspend/resume
when set.
0 -> Unknown EM2800 video grabber (em2800) [eb1a:2800]
- 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2870,eb1a:2881,eb1a:2883]
+ 1 -> Unknown EM2750/28xx video grabber (em2820/em2840) [eb1a:2710,eb1a:2820,eb1a:2821,eb1a:2860,eb1a:2861,eb1a:2870,eb1a:2881,eb1a:2883]
2 -> Terratec Cinergy 250 USB (em2820/em2840) [0ccd:0036]
3 -> Pinnacle PCTV USB 2 (em2820/em2840) [2304:0208]
4 -> Hauppauge WinTV USB 2 (em2820/em2840) [2040:4200,2040:4201]
152 -> Asus Tiger Rev:1.00 [1043:4857]
153 -> Kworld Plus TV Analog Lite PCI [17de:7128]
154 -> Avermedia AVerTV GO 007 FM Plus [1461:f31d]
-155 -> Hauppauge WinTV-HVR1120 ATSC/QAM-Hybrid [0070:6706,0070:6708]
-156 -> Hauppauge WinTV-HVR1110r3 DVB-T/Hybrid [0070:6707,0070:6709,0070:670a]
+155 -> Hauppauge WinTV-HVR1150 ATSC/QAM-Hybrid [0070:6706,0070:6708]
+156 -> Hauppauge WinTV-HVR1120 DVB-T/Hybrid [0070:6707,0070:6709,0070:670a]
157 -> Avermedia AVerTV Studio 507UA [1461:a11b]
158 -> AVerMedia Cardbus TV/Radio (E501R) [1461:b7e9]
159 -> Beholder BeholdTV 505 RDS [0000:505B]
S: Maintained
F: drivers/media/video/gspca/pac207.c
+GSPCA SN9C20X SUBDRIVER
+P: Brian Johnson
+M: brijohn@gmail.com
+L: linux-media@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+S: Maintained
+F: drivers/media/video/gspca/sn9c20x.c
+
GSPCA T613 SUBDRIVER
M: Leandro Costantino <lcostantino@gmail.com>
L: linux-media@vger.kernel.org
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 31
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
#
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0"
+CONFIG_CMDLINE="init=/sbin/preinit ubi.mtd=rootfs root=ubi0:rootfs rootfstype=ubifs rootflags=bulk_read,no_chk_data_crc rw console=ttyMTD,log console=tty0 console=ttyS2,115200n8"
# CONFIG_XIP_KERNEL is not set
# CONFIG_KEXEC is not set
# CONFIG_USB_GPIO_VBUS is not set
# CONFIG_ISP1301_OMAP is not set
CONFIG_TWL4030_USB=y
-CONFIG_MMC=m
+CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_UNSAFE_RESUME is not set
# on-CPU RTC drivers
#
# CONFIG_DMADEVICES is not set
-# CONFIG_REGULATOR is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_TWL4030=y
# CONFIG_UIO is not set
# CONFIG_STAGING is not set
struct membank {
unsigned long start;
unsigned long size;
- int node;
+ unsigned short node;
+ unsigned short highmem;
};
struct meminfo {
#include <mach/hardware.h>
-#define IO_SPACE_LIMIT 0xffff0000
+#define IO_SPACE_LIMIT 0x0000ffff
extern int (*ixp4xx_pci_read)(u32 addr, u32 cmd, u32* data);
extern int ixp4xx_pci_write(u32 addr, u32 cmd, u32 data);
static int devboard_sdhc2_get_ro(struct device *dev)
{
- return gpio_get_value(SDHC2_WP);
+ return !gpio_get_value(SDHC2_WP);
}
static int devboard_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
static int marxbot_sdhc2_get_ro(struct device *dev)
{
- return gpio_get_value(SDHC2_WP);
+ return !gpio_get_value(SDHC2_WP);
}
static int marxbot_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
static int moboard_sdhc1_get_ro(struct device *dev)
{
- return gpio_get_value(SDHC1_WP);
+ return !gpio_get_value(SDHC1_WP);
}
static int moboard_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
#include "devices.h"
static unsigned int pcm037_eet_pins[] = {
- /* SPI #1 */
- MX31_PIN_CSPI1_MISO__MISO,
- MX31_PIN_CSPI1_MOSI__MOSI,
- MX31_PIN_CSPI1_SCLK__SCLK,
- MX31_PIN_CSPI1_SPI_RDY__SPI_RDY,
- MX31_PIN_CSPI1_SS0__SS0,
- MX31_PIN_CSPI1_SS1__SS1,
- MX31_PIN_CSPI1_SS2__SS2,
-
/* Reserve and hardwire GPIO 57 high - S6E63D6 chipselect */
IOMUX_MODE(MX31_PIN_KEY_COL7, IOMUX_CONFIG_GPIO),
/* GPIO keys */
static void __init omap_2430sdp_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}
static void __init omap_3430sdp_init_irq(void)
{
- omap2_init_common_hw(hyb18m512160af6_sdrc_params);
+ omap2_init_common_hw(hyb18m512160af6_sdrc_params, NULL);
omap_init_irq();
omap_gpio_init();
}
static void __init omap_4430sdp_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(1);
#endif
static void __init omap_apollon_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
apollon_init_smc91x();
static void __init omap_generic_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
}
static void __init omap_h4_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
h4_init_flash();
static void __init omap_ldp_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
ldp_init_smsc911x();
static void __init omap3_beagle_init_irq(void)
{
- omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
omap_init_irq();
#ifdef CONFIG_OMAP_32K_TIMER
omap2_gp_clockevent_set_gptimer(12);
usb_musb_init();
omap3beagle_flash_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_cfg_reg(H16_34XX_SDRC_CKE0);
+ omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init omap3_beagle_map_io(void)
static void __init omap3_evm_init_irq(void)
{
- omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params, NULL);
omap_init_irq();
omap_gpio_init();
omap3evm_init_smc911x();
#include <mach/mcspi.h>
#include <mach/usb.h>
#include <mach/keypad.h>
+#include <mach/mux.h>
#include "sdram-micron-mt46h32m32lf-6.h"
#include "mmc-twl4030.h"
static void __init omap3pandora_init_irq(void)
{
- omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
omap_init_irq();
omap_gpio_init();
}
omap3pandora_ads7846_init();
pandora_keys_gpio_init();
usb_musb_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_cfg_reg(H16_34XX_SDRC_CKE0);
+ omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init omap3pandora_map_io(void)
#include <mach/gpmc.h>
#include <mach/hardware.h>
#include <mach/nand.h>
+#include <mach/mux.h>
#include <mach/usb.h>
#include "sdram-micron-mt46h32m32lf-6.h"
#define OVERO_GPIO_BT_XGATE 15
#define OVERO_GPIO_W2W_NRESET 16
+#define OVERO_GPIO_PENDOWN 114
#define OVERO_GPIO_BT_NRESET 164
#define OVERO_GPIO_USBH_CPEN 168
#define OVERO_GPIO_USBH_NRESET 183
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(overo_smsc911x_resources),
- .resource = &overo_smsc911x_resources,
+ .resource = overo_smsc911x_resources,
.dev = {
.platform_data = &overo_smsc911x_config,
},
static void __init overo_init_irq(void)
{
- omap2_init_common_hw(mt46h32m32lf6_sdrc_params);
+ omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
+ mt46h32m32lf6_sdrc_params);
omap_init_irq();
omap_gpio_init();
}
overo_ads7846_init();
overo_init_smsc911x();
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_cfg_reg(H16_34XX_SDRC_CKE0);
+ omap_cfg_reg(H17_34XX_SDRC_CKE1);
+
if ((gpio_request(OVERO_GPIO_W2W_NRESET,
"OVERO_GPIO_W2W_NRESET") == 0) &&
(gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) {
.setup = rx51_twlgpio_setup,
};
+static struct twl4030_usb_data rx51_usb_data = {
+ .usb_mode = T2_USB_MODE_ULPI,
+};
+
static struct twl4030_platform_data rx51_twldata = {
.irq_base = TWL4030_IRQ_BASE,
.irq_end = TWL4030_IRQ_END,
.gpio = &rx51_gpio_data,
.keypad = &rx51_kp_data,
.madc = &rx51_madc_data,
+ .usb = &rx51_usb_data,
.vaux1 = &rx51_vaux1,
.vaux2 = &rx51_vaux2,
static void __init rx51_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}
omap_serial_init();
usb_musb_init();
rx51_peripherals_init();
+
+ /* Ensure SDRC pins are mux'd for self-refresh */
+ omap_cfg_reg(H16_34XX_SDRC_CKE0);
+ omap_cfg_reg(H17_34XX_SDRC_CKE1);
}
static void __init rx51_map_io(void)
static void __init omap_zoom2_init_irq(void)
{
- omap2_init_common_hw(NULL);
+ omap2_init_common_hw(NULL, NULL);
omap_init_irq();
omap_gpio_init();
}
#include <mach/clock.h>
#include <mach/clockdomain.h>
#include <mach/cpu.h>
+#include <mach/prcm.h>
#include <asm/div64.h>
#include <mach/sdrc.h>
#include "cm-regbits-24xx.h"
#include "cm-regbits-34xx.h"
-#define MAX_CLOCK_ENABLE_WAIT 100000
-
/* DPLL rate rounding: minimum DPLL multiplier, divider values */
#define DPLL_MIN_MULTIPLIER 1
#define DPLL_MIN_DIVIDER 1
}
/**
- * omap2_wait_clock_ready - wait for clock to enable
- * @reg: physical address of clock IDLEST register
- * @mask: value to mask against to determine if the clock is active
- * @name: name of the clock (for printk)
+ * omap2_clk_dflt_find_companion - find companion clock to @clk
+ * @clk: struct clk * to find the companion clock of
+ * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
+ * @other_bit: u8 ** to return the companion clock bit shift in
+ *
+ * Note: We don't need special code here for INVERT_ENABLE for the
+ * time being since INVERT_ENABLE only applies to clocks enabled by
+ * CM_CLKEN_PLL
*
- * Returns 1 if the clock enabled in time, or 0 if it failed to enable
- * in roughly MAX_CLOCK_ENABLE_WAIT microseconds.
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
+ * just a matter of XORing the bits.
+ *
+ * Some clocks don't have companion clocks. For example, modules with
+ * only an interface clock (such as MAILBOXES) don't have a companion
+ * clock. Right now, this code relies on the hardware exporting a bit
+ * in the correct companion register that indicates that the
+ * nonexistent 'companion clock' is active. Future patches will
+ * associate this type of code with per-module data structures to
+ * avoid this issue, and remove the casts. No return value.
*/
-int omap2_wait_clock_ready(void __iomem *reg, u32 mask, const char *name)
+void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
+ u8 *other_bit)
{
- int i = 0;
- int ena = 0;
+ u32 r;
/*
- * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
- * 34xx reverses this, just to keep us on our toes
+ * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
+ * it's just a matter of XORing the bits.
*/
- if (cpu_mask & (RATE_IN_242X | RATE_IN_243X))
- ena = mask;
- else if (cpu_mask & RATE_IN_343X)
- ena = 0;
-
- /* Wait for lock */
- while (((__raw_readl(reg) & mask) != ena) &&
- (i++ < MAX_CLOCK_ENABLE_WAIT)) {
- udelay(1);
- }
-
- if (i <= MAX_CLOCK_ENABLE_WAIT)
- pr_debug("Clock %s stable after %d loops\n", name, i);
- else
- printk(KERN_ERR "Clock %s didn't enable in %d tries\n",
- name, MAX_CLOCK_ENABLE_WAIT);
-
-
- return (i < MAX_CLOCK_ENABLE_WAIT) ? 1 : 0;
-};
+ r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
+ *other_reg = (__force void __iomem *)r;
+ *other_bit = clk->enable_bit;
+}
-/*
- * Note: We don't need special code here for INVERT_ENABLE
- * for the time being since INVERT_ENABLE only applies to clocks enabled by
- * CM_CLKEN_PLL
+/**
+ * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
+ * @clk: struct clk * to find IDLEST info for
+ * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
+ * @idlest_bit: u8 ** to return the CM_IDLEST bit shift in
+ *
+ * Return the CM_IDLEST register address and bit shift corresponding
+ * to the module that "owns" this clock. This default code assumes
+ * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
+ * the IDLEST register address ID corresponds to the CM_*CLKEN
+ * register address ID (e.g., that CM_FCLKEN2 corresponds to
+ * CM_IDLEST2). This is not true for all modules. No return value.
*/
-static void omap2_clk_wait_ready(struct clk *clk)
+void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
+ u8 *idlest_bit)
{
- void __iomem *reg, *other_reg, *st_reg;
- u32 bit;
+ u32 r;
- /*
- * REVISIT: This code is pretty ugly. It would be nice to generalize
- * it and pull it into struct clk itself somehow.
- */
- reg = clk->enable_reg;
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = clk->enable_bit;
+}
- /*
- * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
- * it's just a matter of XORing the bits.
- */
- other_reg = (void __iomem *)((u32)reg ^ (CM_FCLKEN ^ CM_ICLKEN));
+/**
+ * omap2_module_wait_ready - wait for an OMAP module to leave IDLE
+ * @clk: struct clk * belonging to the module
+ *
+ * If the necessary clocks for the OMAP hardware IP block that
+ * corresponds to clock @clk are enabled, then wait for the module to
+ * indicate readiness (i.e., to leave IDLE). This code does not
+ * belong in the clock code and will be moved in the medium term to
+ * module-dependent code. No return value.
+ */
+static void omap2_module_wait_ready(struct clk *clk)
+{
+ void __iomem *companion_reg, *idlest_reg;
+ u8 other_bit, idlest_bit;
+
+ /* Not all modules have multiple clocks that their IDLEST depends on */
+ if (clk->ops->find_companion) {
+ clk->ops->find_companion(clk, &companion_reg, &other_bit);
+ if (!(__raw_readl(companion_reg) & (1 << other_bit)))
+ return;
+ }
- /* Check if both functional and interface clocks
- * are running. */
- bit = 1 << clk->enable_bit;
- if (!(__raw_readl(other_reg) & bit))
- return;
- st_reg = (void __iomem *)(((u32)other_reg & ~0xf0) | 0x20); /* CM_IDLEST* */
+ clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit);
- omap2_wait_clock_ready(st_reg, bit, clk->name);
+ omap2_cm_wait_idlest(idlest_reg, (1 << idlest_bit), clk->name);
}
-static int omap2_dflt_clk_enable(struct clk *clk)
+int omap2_dflt_clk_enable(struct clk *clk)
{
u32 v;
if (unlikely(clk->enable_reg == NULL)) {
- printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
+ pr_err("clock.c: Enable for %s without enable code\n",
clk->name);
return 0; /* REVISIT: -EINVAL */
}
__raw_writel(v, clk->enable_reg);
v = __raw_readl(clk->enable_reg); /* OCP barrier */
- return 0;
-}
+ if (clk->ops->find_idlest)
+ omap2_module_wait_ready(clk);
-static int omap2_dflt_clk_enable_wait(struct clk *clk)
-{
- int ret;
-
- if (!clk->enable_reg) {
- printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
- clk->name);
- return 0; /* REVISIT: -EINVAL */
- }
-
- ret = omap2_dflt_clk_enable(clk);
- if (ret == 0)
- omap2_clk_wait_ready(clk);
- return ret;
+ return 0;
}
-static void omap2_dflt_clk_disable(struct clk *clk)
+void omap2_dflt_clk_disable(struct clk *clk)
{
u32 v;
}
const struct clkops clkops_omap2_dflt_wait = {
- .enable = omap2_dflt_clk_enable_wait,
+ .enable = omap2_dflt_clk_enable,
.disable = omap2_dflt_clk_disable,
+ .find_companion = omap2_clk_dflt_find_companion,
+ .find_idlest = omap2_clk_dflt_find_idlest,
};
const struct clkops clkops_omap2_dflt = {
u32 omap2_get_dpll_rate(struct clk *clk);
int omap2_wait_clock_ready(void __iomem *reg, u32 cval, const char *name);
void omap2_clk_prepare_for_reboot(void);
+int omap2_dflt_clk_enable(struct clk *clk);
+void omap2_dflt_clk_disable(struct clk *clk);
+void omap2_clk_dflt_find_companion(struct clk *clk, void __iomem **other_reg,
+ u8 *other_bit);
+void omap2_clk_dflt_find_idlest(struct clk *clk, void __iomem **idlest_reg,
+ u8 *idlest_bit);
extern const struct clkops clkops_omap2_dflt_wait;
extern const struct clkops clkops_omap2_dflt;
#include <mach/clock.h>
#include <mach/sram.h>
+#include <mach/prcm.h>
#include <asm/div64.h>
#include <asm/clkdev.h>
static const struct clkops clkops_oscck;
static const struct clkops clkops_fixed;
+static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit);
+
+/* 2430 I2CHS has non-standard IDLEST register */
+static const struct clkops clkops_omap2430_i2chs_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap2430_clk_i2chs_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
#include "clock24xx.h"
struct omap_clk {
* Omap24xx specific clock functions
*-------------------------------------------------------------------------*/
+/**
+ * omap2430_clk_i2chs_find_idlest - return CM_IDLEST info for 2430 I2CHS
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ *
+ * OMAP2430 I2CHS CM_IDLEST bits are in CM_IDLEST1_CORE, but the
+ * CM_*CLKEN bits are in CM_{I,F}CLKEN2_CORE. This custom function
+ * passes back the correct CM_IDLEST register address for I2CHS
+ * modules. No return value.
+ */
+static void omap2430_clk_i2chs_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit)
+{
+ *idlest_reg = OMAP_CM_REGADDR(CORE_MOD, CM_IDLEST);
+ *idlest_bit = clk->enable_bit;
+}
+
+
/**
* omap2xxx_clk_get_core_rate - return the CORE_CLK rate
* @clk: pointer to the combined dpll_ck + core_ck (currently "dpll_ck")
else if (clk == &apll54_ck)
cval = OMAP24XX_ST_54M_APLL;
- omap2_wait_clock_ready(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval,
- clk->name);
+ omap2_cm_wait_idlest(OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), cval,
+ clk->name);
/*
* REVISIT: Should we return an error code if omap2_wait_clock_ready()
static struct clk i2chs2_fck = {
.name = "i2c_fck",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap2430_i2chs_wait,
.id = 2,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
static struct clk i2chs1_fck = {
.name = "i2c_fck",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap2430_i2chs_wait,
.id = 1,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
- * Copyright (C) 2007-2008 Nokia Corporation
+ * Copyright (C) 2007-2009 Nokia Corporation
*
* Written by Paul Walmsley
* Testing and integration fixes by Jouni Högander
static const struct clkops clkops_noncore_dpll_ops;
+static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit);
+static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit);
+static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit);
+
+static const struct clkops clkops_omap3430es2_ssi_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_ssi_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+static const struct clkops clkops_omap3430es2_hsotgusb_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_hsotgusb_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
+static const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
+ .enable = omap2_dflt_clk_enable,
+ .disable = omap2_dflt_clk_disable,
+ .find_idlest = omap3430es2_clk_dss_usbhost_find_idlest,
+ .find_companion = omap2_clk_dflt_find_companion,
+};
+
#include "clock34xx.h"
struct omap_clk {
CLK(NULL, "fshostusb_fck", &fshostusb_fck, CK_3430ES1),
CLK(NULL, "core_12m_fck", &core_12m_fck, CK_343X),
CLK("omap_hdq.0", "fck", &hdq_fck, CK_343X),
- CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck, CK_343X),
- CLK(NULL, "ssi_sst_fck", &ssi_sst_fck, CK_343X),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2, CK_3430ES2),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2, CK_3430ES2),
CLK(NULL, "core_l3_ick", &core_l3_ick, CK_343X),
- CLK("musb_hdrc", "ick", &hsotgusb_ick, CK_343X),
+ CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es1, CK_3430ES1),
+ CLK("musb_hdrc", "ick", &hsotgusb_ick_3430es2, CK_3430ES2),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_343X),
CLK(NULL, "gpmc_fck", &gpmc_fck, CK_343X),
CLK(NULL, "security_l3_ick", &security_l3_ick, CK_343X),
CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_343X),
CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_343X),
CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_343X),
- CLK(NULL, "ssi_ick", &ssi_ick, CK_343X),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es1, CK_3430ES1),
+ CLK(NULL, "ssi_ick", &ssi_ick_3430es2, CK_3430ES2),
CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_3430ES1),
CLK(NULL, "security_l4_ick2", &security_l4_ick2, CK_343X),
CLK(NULL, "aes1_ick", &aes1_ick, CK_343X),
CLK("omap_rng", "ick", &rng_ick, CK_343X),
CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
CLK(NULL, "des1_ick", &des1_ick, CK_343X),
- CLK("omapfb", "dss1_fck", &dss1_alwon_fck, CK_343X),
+ CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
+ CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2),
CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X),
CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X),
CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X),
- CLK("omapfb", "ick", &dss_ick, CK_343X),
+ CLK("omapfb", "ick", &dss_ick_3430es1, CK_3430ES1),
+ CLK("omapfb", "ick", &dss_ick_3430es2, CK_3430ES2),
CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
CLK(NULL, "cam_ick", &cam_ick, CK_343X),
CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
*/
#define SDRC_MPURATE_LOOPS 96
+/**
+ * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ *
+ * The OMAP3430ES2 SSI target CM_IDLEST bit is at a different shift
+ * from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void omap3430es2_clk_ssi_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = OMAP3430ES2_ST_SSI_IDLE_SHIFT;
+}
+
+/**
+ * omap3430es2_clk_dss_usbhost_find_idlest - CM_IDLEST info for DSS, USBHOST
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ *
+ * Some OMAP modules on OMAP3 ES2+ chips have both initiator and
+ * target IDLEST bits. For our purposes, we are concerned with the
+ * target IDLEST bits, which exist at a different bit position than
+ * the *CLKEN bit position for these modules (DSS and USBHOST) (The
+ * default find_idlest code assumes that they are at the same
+ * position.) No return value.
+ */
+static void omap3430es2_clk_dss_usbhost_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ /* USBHOST_IDLE has same shift */
+ *idlest_bit = OMAP3430ES2_ST_DSS_IDLE_SHIFT;
+}
+
+/**
+ * omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
+ * @clk: struct clk * being enabled
+ * @idlest_reg: void __iomem ** to store CM_IDLEST reg address into
+ * @idlest_bit: pointer to a u8 to store the CM_IDLEST bit shift into
+ *
+ * The OMAP3430ES2 HSOTGUSB target CM_IDLEST bit is at a different
+ * shift from the CM_{I,F}CLKEN bit. Pass back the correct info via
+ * @idlest_reg and @idlest_bit. No return value.
+ */
+static void omap3430es2_clk_hsotgusb_find_idlest(struct clk *clk,
+ void __iomem **idlest_reg,
+ u8 *idlest_bit)
+{
+ u32 r;
+
+ r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
+ *idlest_reg = (__force void __iomem *)r;
+ *idlest_bit = OMAP3430ES2_ST_HSOTGUSB_IDLE_SHIFT;
+}
+
/**
* omap3_dpll_recalc - recalculate DPLL rate
* @clk: DPLL struct clk
u32 unlock_dll = 0;
u32 c;
unsigned long validrate, sdrcrate, mpurate;
- struct omap_sdrc_params *sp;
+ struct omap_sdrc_params *sdrc_cs0;
+ struct omap_sdrc_params *sdrc_cs1;
+ int ret;
if (!clk || !rate)
return -EINVAL;
else
sdrcrate >>= ((clk->rate / rate) >> 1);
- sp = omap2_sdrc_get_params(sdrcrate);
- if (!sp)
+ ret = omap2_sdrc_get_params(sdrcrate, &sdrc_cs0, &sdrc_cs1);
+ if (ret)
return -EINVAL;
if (sdrcrate < MIN_SDRC_DLL_LOCK_FREQ) {
pr_debug("clock: changing CORE DPLL rate from %lu to %lu\n", clk->rate,
validrate);
- pr_debug("clock: SDRC timing params used: %08x %08x %08x\n",
- sp->rfr_ctrl, sp->actim_ctrla, sp->actim_ctrlb);
-
- omap3_configure_core_dpll(sp->rfr_ctrl, sp->actim_ctrla,
- sp->actim_ctrlb, new_div, unlock_dll, c,
- sp->mr, rate > clk->rate);
+ pr_debug("clock: SDRC CS0 timing params used:"
+ " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n",
+ sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
+ sdrc_cs0->actim_ctrlb, sdrc_cs0->mr);
+ if (sdrc_cs1)
+ pr_debug("clock: SDRC CS1 timing params used: "
+ " RFR %08x CTRLA %08x CTRLB %08x MR %08x\n",
+ sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla,
+ sdrc_cs1->actim_ctrlb, sdrc_cs1->mr);
+
+ if (sdrc_cs1)
+ omap3_configure_core_dpll(
+ new_div, unlock_dll, c, rate > clk->rate,
+ sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
+ sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
+ sdrc_cs1->rfr_ctrl, sdrc_cs1->actim_ctrla,
+ sdrc_cs1->actim_ctrlb, sdrc_cs1->mr);
+ else
+ omap3_configure_core_dpll(
+ new_div, unlock_dll, c, rate > clk->rate,
+ sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
+ sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
+ 0, 0, 0, 0);
return 0;
}
{ .parent = NULL }
};
-static struct clk ssi_ssr_fck = {
+static struct clk ssi_ssr_fck_3430es1 = {
.name = "ssi_ssr_fck",
.ops = &clkops_omap2_dflt,
.init = &omap2_init_clksel_parent,
.recalc = &omap2_clksel_recalc,
};
-static struct clk ssi_sst_fck = {
+static struct clk ssi_ssr_fck_3430es2 = {
+ .name = "ssi_ssr_fck",
+ .ops = &clkops_omap3430es2_ssi_wait,
+ .init = &omap2_init_clksel_parent,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
+ .clksel_mask = OMAP3430_CLKSEL_SSI_MASK,
+ .clksel = ssi_ssr_clksel,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk ssi_sst_fck_3430es1 = {
.name = "ssi_sst_fck",
.ops = &clkops_null,
- .parent = &ssi_ssr_fck,
+ .parent = &ssi_ssr_fck_3430es1,
+ .fixed_div = 2,
+ .recalc = &omap2_fixed_divisor_recalc,
+};
+
+static struct clk ssi_sst_fck_3430es2 = {
+ .name = "ssi_sst_fck",
+ .ops = &clkops_null,
+ .parent = &ssi_ssr_fck_3430es2,
.fixed_div = 2,
.recalc = &omap2_fixed_divisor_recalc,
};
.recalc = &followparent_recalc,
};
-static struct clk hsotgusb_ick = {
+static struct clk hsotgusb_ick_3430es1 = {
.name = "hsotgusb_ick",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap2_dflt,
+ .parent = &core_l3_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
+ .clkdm_name = "core_l3_clkdm",
+ .recalc = &followparent_recalc,
+};
+
+static struct clk hsotgusb_ick_3430es2 = {
+ .name = "hsotgusb_ick",
+ .ops = &clkops_omap3430es2_hsotgusb_wait,
.parent = &core_l3_ick,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT,
.recalc = &followparent_recalc,
};
-static struct clk ssi_ick = {
+static struct clk ssi_ick_3430es1 = {
.name = "ssi_ick",
.ops = &clkops_omap2_dflt,
.parent = &ssi_l4_ick,
.recalc = &followparent_recalc,
};
+static struct clk ssi_ick_3430es2 = {
+ .name = "ssi_ick",
+ .ops = &clkops_omap3430es2_ssi_wait,
+ .parent = &ssi_l4_ick,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
+ .enable_bit = OMAP3430_EN_SSI_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
+};
+
/* REVISIT: Technically the TRM claims that this is CORE_CLK based,
* but l4_ick makes more sense to me */
};
/* DSS */
-static struct clk dss1_alwon_fck = {
+static struct clk dss1_alwon_fck_3430es1 = {
.name = "dss1_alwon_fck",
.ops = &clkops_omap2_dflt,
.parent = &dpll4_m4x2_ck,
.recalc = &followparent_recalc,
};
+static struct clk dss1_alwon_fck_3430es2 = {
+ .name = "dss1_alwon_fck",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &dpll4_m4x2_ck,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
+ .enable_bit = OMAP3430_EN_DSS1_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
static struct clk dss_tv_fck = {
.name = "dss_tv_fck",
.ops = &clkops_omap2_dflt,
.recalc = &followparent_recalc,
};
-static struct clk dss_ick = {
+static struct clk dss_ick_3430es1 = {
/* Handles both L3 and L4 clocks */
.name = "dss_ick",
.ops = &clkops_omap2_dflt,
.recalc = &followparent_recalc,
};
+static struct clk dss_ick_3430es2 = {
+ /* Handles both L3 and L4 clocks */
+ .name = "dss_ick",
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
+ .parent = &l4_ick,
+ .init = &omap2_init_clk_clkdm,
+ .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
+ .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
+ .clkdm_name = "dss_clkdm",
+ .recalc = &followparent_recalc,
+};
+
/* CAM */
static struct clk cam_mclk = {
static struct clk usbhost_120m_fck = {
.name = "usbhost_120m_fck",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap2_dflt,
.parent = &dpll5_m2_ck,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
static struct clk usbhost_48m_fck = {
.name = "usbhost_48m_fck",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &omap_48m_fck,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
static struct clk usbhost_ick = {
/* Handles both L3 and L4 clocks */
.name = "usbhost_ick",
- .ops = &clkops_omap2_dflt_wait,
+ .ops = &clkops_omap3430es2_dss_usbhost_wait,
.parent = &l4_ick,
.init = &omap2_init_clk_clkdm,
.enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
* These registers appear once per CM module.
*/
-#define OMAP3430_CM_REVISION OMAP_CM_REGADDR(OCP_MOD, 0x0000)
-#define OMAP3430_CM_SYSCONFIG OMAP_CM_REGADDR(OCP_MOD, 0x0010)
-#define OMAP3430_CM_POLCTRL OMAP_CM_REGADDR(OCP_MOD, 0x009c)
+#define OMAP3430_CM_REVISION OMAP34XX_CM_REGADDR(OCP_MOD, 0x0000)
+#define OMAP3430_CM_SYSCONFIG OMAP34XX_CM_REGADDR(OCP_MOD, 0x0010)
+#define OMAP3430_CM_POLCTRL OMAP34XX_CM_REGADDR(OCP_MOD, 0x009c)
#define OMAP3_CM_CLKOUT_CTRL_OFFSET 0x0070
#define OMAP3430_CM_CLKOUT_CTRL OMAP_CM_REGADDR(OMAP3430_CCR_MOD, 0x0070)
return v;
}
-void __init omap2_init_common_hw(struct omap_sdrc_params *sp)
+void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1)
{
omap2_mux_init();
#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
pwrdm_init(powerdomains_omap);
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
omap2_clk_init();
- omap2_sdrc_init(sp);
+ omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
_omap2_init_reprogram_sdrc();
#endif
gpmc_init();
if (i != 0)
break;
ret = PTR_ERR(reg);
+ hsmmc[i].vcc = NULL;
goto err;
}
hsmmc[i].vcc = reg;
static void twl_mmc_cleanup(struct device *dev)
{
struct omap_mmc_platform_data *mmc = dev->platform_data;
+ int i;
gpio_free(mmc->slots[0].switch_pin);
+ for(i = 0; i < ARRAY_SIZE(hsmmc); i++) {
+ regulator_put(hsmmc[i].vcc);
+ regulator_put(hsmmc[i].vcc_aux);
+ }
}
#ifdef CONFIG_PM
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("J25_34XX_GPIO170", 0x1c6,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
+
+/* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
+MUX_CFG_34XX("H16_34XX_SDRC_CKE0", 0x262,
+ OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
+MUX_CFG_34XX("H17_34XX_SDRC_CKE1", 0x264,
+ OMAP34XX_MUX_MODE0 | OMAP34XX_PIN_OUTPUT)
};
#define OMAP34XX_PINS_SZ ARRAY_SIZE(omap34xx_pins)
#ifndef __ARCH_ARM_MACH_OMAP2_PM_H
#define __ARCH_ARM_MACH_OMAP2_PM_H
-extern int omap2_pm_init(void);
-extern int omap3_pm_init(void);
-
#ifdef CONFIG_PM_DEBUG
extern void omap2_pm_dump(int mode, int resume, unsigned int us);
extern int omap2_pm_debug;
WKUP_MOD, PM_WKEN);
}
-int __init omap2_pm_init(void)
+static int __init omap2_pm_init(void)
{
u32 l;
struct power_state {
struct powerdomain *pwrdm;
u32 next_state;
+#ifdef CONFIG_SUSPEND
u32 saved_state;
+#endif
struct list_head node;
};
local_irq_enable();
}
+#ifdef CONFIG_SUSPEND
+static suspend_state_t suspend_state;
+
static int omap3_pm_prepare(void)
{
disable_hlt();
restore:
/* Restore next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node) {
- set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
if (state > pwrst->next_state) {
printk(KERN_INFO "Powerdomain (%s) didn't enter "
pwrst->pwrdm->name, pwrst->next_state);
ret = -1;
}
+ set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
}
if (ret)
printk(KERN_ERR "Could not enter target state in pm_suspend\n");
return ret;
}
-static int omap3_pm_enter(suspend_state_t state)
+static int omap3_pm_enter(suspend_state_t unused)
{
int ret = 0;
- switch (state) {
+ switch (suspend_state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
ret = omap3_pm_suspend();
enable_hlt();
}
+/* Hooks to enable / disable UART interrupts during suspend */
+static int omap3_pm_begin(suspend_state_t state)
+{
+ suspend_state = state;
+ omap_uart_enable_irqs(0);
+ return 0;
+}
+
+static void omap3_pm_end(void)
+{
+ suspend_state = PM_SUSPEND_ON;
+ omap_uart_enable_irqs(1);
+ return;
+}
+
static struct platform_suspend_ops omap_pm_ops = {
+ .begin = omap3_pm_begin,
+ .end = omap3_pm_end,
.prepare = omap3_pm_prepare,
.enter = omap3_pm_enter,
.finish = omap3_pm_finish,
.valid = suspend_valid_only_mem,
};
+#endif /* CONFIG_SUSPEND */
/**
/* Clear any pending PRCM interrupts */
prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+ /* Don't attach IVA interrupts */
+ prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
+ prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
+ prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
+ prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
+
+ /* Clear any pending 'reset' flags */
+ prm_write_mod_reg(0xffffffff, MPU_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, CORE_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, RM_RSTST);
+ prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, RM_RSTST);
+
+ /* Clear any pending PRCM interrupts */
+ prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
+
omap3_iva_idle();
omap3_d2d_idle();
}
return 0;
}
-int __init omap3_pm_init(void)
+static int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
int ret;
_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
omap34xx_cpu_suspend_sz);
+#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
+#endif /* CONFIG_SUSPEND */
pm_idle = omap3_pm_idle;
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/delay.h>
#include <mach/common.h>
#include <mach/prcm.h>
static void __iomem *prm_base;
static void __iomem *cm_base;
+#define MAX_MODULE_ENABLE_WAIT 100000
+
u32 omap_prcm_get_reset_sources(void)
{
/* XXX This presumably needs modification for 34XX */
}
EXPORT_SYMBOL(cm_rmw_mod_reg_bits);
+/**
+ * omap2_cm_wait_idlest - wait for IDLEST bit to indicate module readiness
+ * @reg: physical address of module IDLEST register
+ * @mask: value to mask against to determine if the module is active
+ * @name: name of the clock (for printk)
+ *
+ * Returns 1 if the module indicated readiness in time, or 0 if it
+ * failed to enable in roughly MAX_MODULE_ENABLE_WAIT microseconds.
+ */
+int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name)
+{
+ int i = 0;
+ int ena = 0;
+
+ /*
+ * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
+ * 34xx reverses this, just to keep us on our toes
+ */
+ if (cpu_is_omap24xx())
+ ena = mask;
+ else if (cpu_is_omap34xx())
+ ena = 0;
+ else
+ BUG();
+
+ /* Wait for lock */
+ while (((__raw_readl(reg) & mask) != ena) &&
+ (i++ < MAX_MODULE_ENABLE_WAIT))
+ udelay(1);
+
+ if (i < MAX_MODULE_ENABLE_WAIT)
+ pr_debug("cm: Module associated with clock %s ready after %d "
+ "loops\n", name, i);
+ else
+ pr_err("cm: Module associated with clock %s didn't enable in "
+ "%d tries\n", name, MAX_MODULE_ENABLE_WAIT);
+
+ return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
+};
+
void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals)
{
prm_base = omap2_globals->prm;
#include <mach/sdrc.h>
#include "sdrc.h"
-static struct omap_sdrc_params *sdrc_init_params;
+static struct omap_sdrc_params *sdrc_init_params_cs0, *sdrc_init_params_cs1;
void __iomem *omap2_sdrc_base;
void __iomem *omap2_sms_base;
/**
* omap2_sdrc_get_params - return SDRC register values for a given clock rate
* @r: SDRC clock rate (in Hz)
+ * @sdrc_cs0: chip select 0 ram timings **
+ * @sdrc_cs1: chip select 1 ram timings **
*
* Return pre-calculated values for the SDRC_ACTIM_CTRLA,
- * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL, and SDRC_MR registers, for a given
- * SDRC clock rate 'r'. These parameters control various timing
- * delays in the SDRAM controller that are expressed in terms of the
- * number of SDRC clock cycles to wait; hence the clock rate
- * dependency. Note that sdrc_init_params must be sorted rate
- * descending. Also assumes that both chip-selects use the same
- * timing parameters. Returns a struct omap_sdrc_params * upon
- * success, or NULL upon failure.
+ * SDRC_ACTIM_CTRLB, SDRC_RFR_CTRL and SDRC_MR registers in sdrc_cs[01]
+ * structs,for a given SDRC clock rate 'r'.
+ * These parameters control various timing delays in the SDRAM controller
+ * that are expressed in terms of the number of SDRC clock cycles to
+ * wait; hence the clock rate dependency.
+ *
+ * Supports 2 different timing parameters for both chip selects.
+ *
+ * Note 1: the sdrc_init_params_cs[01] must be sorted rate descending.
+ * Note 2: If sdrc_init_params_cs_1 is not NULL it must be of same size
+ * as sdrc_init_params_cs_0.
+ *
+ * Fills in the struct omap_sdrc_params * for each chip select.
+ * Returns 0 upon success or -1 upon failure.
*/
-struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r)
+int omap2_sdrc_get_params(unsigned long r,
+ struct omap_sdrc_params **sdrc_cs0,
+ struct omap_sdrc_params **sdrc_cs1)
{
- struct omap_sdrc_params *sp;
+ struct omap_sdrc_params *sp0, *sp1;
- if (!sdrc_init_params)
- return NULL;
+ if (!sdrc_init_params_cs0)
+ return -1;
- sp = sdrc_init_params;
+ sp0 = sdrc_init_params_cs0;
+ sp1 = sdrc_init_params_cs1;
- while (sp->rate && sp->rate != r)
- sp++;
+ while (sp0->rate && sp0->rate != r) {
+ sp0++;
+ if (sdrc_init_params_cs1)
+ sp1++;
+ }
- if (!sp->rate)
- return NULL;
+ if (!sp0->rate)
+ return -1;
- return sp;
+ *sdrc_cs0 = sp0;
+ *sdrc_cs1 = sp1;
+ return 0;
}
/**
* omap2_sdrc_init - initialize SMS, SDRC devices on boot
- * @sp: pointer to a null-terminated list of struct omap_sdrc_params
+ * @sdrc_cs[01]: pointers to a null-terminated list of struct omap_sdrc_params
+ * Support for 2 chip selects timings
*
* Turn on smart idle modes for SDRAM scheduler and controller.
* Program a known-good configuration for the SDRC to deal with buggy
* bootloaders.
*/
-void __init omap2_sdrc_init(struct omap_sdrc_params *sp)
+void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1)
{
u32 l;
l |= (0x2 << 3);
sdrc_write_reg(l, SDRC_SYSCONFIG);
- sdrc_init_params = sp;
+ sdrc_init_params_cs0 = sdrc_cs0;
+ sdrc_init_params_cs1 = sdrc_cs1;
/* XXX Enable SRFRONIDLEREQ here also? */
+ /*
+ * PWDENA should not be set due to 34xx erratum 1.150 - PWDENA
+ * can cause random memory corruption
+ */
l = (1 << SDRC_POWER_EXTCLKDIS_SHIFT) |
- (1 << SDRC_POWER_PWDENA_SHIFT) |
(1 << SDRC_POWER_PAGEPOLICY_SHIFT);
sdrc_write_reg(l, SDRC_POWER);
}
struct plat_serial8250_port *p;
struct list_head node;
+ struct platform_device pdev;
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
int context_valid;
#endif
};
-static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS];
static LIST_HEAD(uart_list);
-static struct plat_serial8250_port serial_platform_data[] = {
+static struct plat_serial8250_port serial_platform_data0[] = {
{
.membase = IO_ADDRESS(OMAP_UART1_BASE),
.mapbase = OMAP_UART1_BASE,
.regshift = 2,
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
+ .flags = 0
+ }
+};
+
+static struct plat_serial8250_port serial_platform_data1[] = {
+ {
.membase = IO_ADDRESS(OMAP_UART2_BASE),
.mapbase = OMAP_UART2_BASE,
.irq = 73,
.regshift = 2,
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
+ .flags = 0
+ }
+};
+
+static struct plat_serial8250_port serial_platform_data2[] = {
+ {
.membase = IO_ADDRESS(OMAP_UART3_BASE),
.mapbase = OMAP_UART3_BASE,
.irq = 74,
clk_disable(uart->fck);
}
+static void omap_uart_enable_wakeup(struct omap_uart_state *uart)
+{
+ /* Set wake-enable bit */
+ if (uart->wk_en && uart->wk_mask) {
+ u32 v = __raw_readl(uart->wk_en);
+ v |= uart->wk_mask;
+ __raw_writel(v, uart->wk_en);
+ }
+
+ /* Ensure IOPAD wake-enables are set */
+ if (cpu_is_omap34xx() && uart->padconf) {
+ u16 v = omap_ctrl_readw(uart->padconf);
+ v |= OMAP3_PADCONF_WAKEUPENABLE0;
+ omap_ctrl_writew(v, uart->padconf);
+ }
+}
+
+static void omap_uart_disable_wakeup(struct omap_uart_state *uart)
+{
+ /* Clear wake-enable bit */
+ if (uart->wk_en && uart->wk_mask) {
+ u32 v = __raw_readl(uart->wk_en);
+ v &= ~uart->wk_mask;
+ __raw_writel(v, uart->wk_en);
+ }
+
+ /* Ensure IOPAD wake-enables are cleared */
+ if (cpu_is_omap34xx() && uart->padconf) {
+ u16 v = omap_ctrl_readw(uart->padconf);
+ v &= ~OMAP3_PADCONF_WAKEUPENABLE0;
+ omap_ctrl_writew(v, uart->padconf);
+ }
+}
+
static void omap_uart_smart_idle_enable(struct omap_uart_state *uart,
int enable)
{
static void omap_uart_allow_sleep(struct omap_uart_state *uart)
{
+ if (device_may_wakeup(&uart->pdev.dev))
+ omap_uart_enable_wakeup(uart);
+ else
+ omap_uart_disable_wakeup(uart);
+
if (!uart->clocked)
return;
/* Check for normal UART wakeup */
if (__raw_readl(uart->wk_st) & uart->wk_mask)
omap_uart_block_sleep(uart);
-
return;
}
}
return IRQ_NONE;
}
-static u32 sleep_timeout = DEFAULT_TIMEOUT;
-
static void omap_uart_idle_init(struct omap_uart_state *uart)
{
- u32 v;
struct plat_serial8250_port *p = uart->p;
int ret;
uart->can_sleep = 0;
- uart->timeout = sleep_timeout;
+ uart->timeout = DEFAULT_TIMEOUT;
setup_timer(&uart->timer, omap_uart_idle_timer,
(unsigned long) uart);
mod_timer(&uart->timer, jiffies + uart->timeout);
uart->padconf = 0;
}
- /* Set wake-enable bit */
- if (uart->wk_en && uart->wk_mask) {
- v = __raw_readl(uart->wk_en);
- v |= uart->wk_mask;
- __raw_writel(v, uart->wk_en);
- }
-
- /* Ensure IOPAD wake-enables are set */
- if (cpu_is_omap34xx() && uart->padconf) {
- u16 v;
-
- v = omap_ctrl_readw(uart->padconf);
- v |= OMAP3_PADCONF_WAKEUPENABLE0;
- omap_ctrl_writew(v, uart->padconf);
- }
-
p->flags |= UPF_SHARE_IRQ;
ret = request_irq(p->irq, omap_uart_interrupt, IRQF_SHARED,
"serial idle", (void *)uart);
WARN_ON(ret);
}
-static ssize_t sleep_timeout_show(struct kobject *kobj,
- struct kobj_attribute *attr,
+void omap_uart_enable_irqs(int enable)
+{
+ int ret;
+ struct omap_uart_state *uart;
+
+ list_for_each_entry(uart, &uart_list, node) {
+ if (enable)
+ ret = request_irq(uart->p->irq, omap_uart_interrupt,
+ IRQF_SHARED, "serial idle", (void *)uart);
+ else
+ free_irq(uart->p->irq, (void *)uart);
+ }
+}
+
+static ssize_t sleep_timeout_show(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
- return sprintf(buf, "%u\n", sleep_timeout / HZ);
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct omap_uart_state *uart = container_of(pdev,
+ struct omap_uart_state, pdev);
+
+ return sprintf(buf, "%u\n", uart->timeout / HZ);
}
-static ssize_t sleep_timeout_store(struct kobject *kobj,
- struct kobj_attribute *attr,
+static ssize_t sleep_timeout_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t n)
{
- struct omap_uart_state *uart;
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct omap_uart_state *uart = container_of(pdev,
+ struct omap_uart_state, pdev);
unsigned int value;
if (sscanf(buf, "%u", &value) != 1) {
printk(KERN_ERR "sleep_timeout_store: Invalid value\n");
return -EINVAL;
}
- sleep_timeout = value * HZ;
- list_for_each_entry(uart, &uart_list, node) {
- uart->timeout = sleep_timeout;
- if (uart->timeout)
- mod_timer(&uart->timer, jiffies + uart->timeout);
- else
- /* A zero value means disable timeout feature */
- omap_uart_block_sleep(uart);
- }
+
+ uart->timeout = value * HZ;
+ if (uart->timeout)
+ mod_timer(&uart->timer, jiffies + uart->timeout);
+ else
+ /* A zero value means disable timeout feature */
+ omap_uart_block_sleep(uart);
+
return n;
}
-static struct kobj_attribute sleep_timeout_attr =
- __ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store);
-
+DEVICE_ATTR(sleep_timeout, 0644, sleep_timeout_show, sleep_timeout_store);
+#define DEV_CREATE_FILE(dev, attr) WARN_ON(device_create_file(dev, attr))
#else
static inline void omap_uart_idle_init(struct omap_uart_state *uart) {}
+#define DEV_CREATE_FILE(dev, attr)
#endif /* CONFIG_PM */
-static struct platform_device serial_device = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = serial_platform_data,
+static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = {
+ {
+ .pdev = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = serial_platform_data0,
+ },
+ },
+ }, {
+ .pdev = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM1,
+ .dev = {
+ .platform_data = serial_platform_data1,
+ },
+ },
+ }, {
+ .pdev = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM2,
+ .dev = {
+ .platform_data = serial_platform_data2,
+ },
+ },
},
};
void __init omap_serial_init(void)
{
- int i, err;
+ int i;
const struct omap_uart_config *info;
char name[16];
if (info == NULL)
return;
- if (cpu_is_omap44xx()) {
- for (i = 0; i < OMAP_MAX_NR_PORTS; i++)
- serial_platform_data[i].irq += 32;
- }
for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
- struct plat_serial8250_port *p = serial_platform_data + i;
struct omap_uart_state *uart = &omap_uart[i];
+ struct platform_device *pdev = &uart->pdev;
+ struct device *dev = &pdev->dev;
+ struct plat_serial8250_port *p = dev->platform_data;
if (!(info->enabled_uarts & (1 << i))) {
p->membase = NULL;
uart->num = i;
p->private_data = uart;
uart->p = p;
- list_add(&uart->node, &uart_list);
+ list_add_tail(&uart->node, &uart_list);
+
+ if (cpu_is_omap44xx())
+ p->irq += 32;
omap_uart_enable_clocks(uart);
omap_uart_reset(uart);
omap_uart_idle_init(uart);
- }
-
- err = platform_device_register(&serial_device);
-
-#ifdef CONFIG_PM
- if (!err)
- err = sysfs_create_file(&serial_device.dev.kobj,
- &sleep_timeout_attr.attr);
-#endif
+ if (WARN_ON(platform_device_register(pdev)))
+ continue;
+ if ((cpu_is_omap34xx() && uart->padconf) ||
+ (uart->wk_en && uart->wk_mask)) {
+ device_init_wakeup(dev, true);
+ DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
+ }
+ }
}
-
.text
-/* r4 parameters */
+/* r1 parameters */
#define SDRC_NO_UNLOCK_DLL 0x0
#define SDRC_UNLOCK_DLL 0x1
/* SDRC_POWER bit settings */
#define SRFRONIDLEREQ_MASK 0x40
-#define PWDENA_MASK 0x4
/* CM_IDLEST1_CORE bit settings */
#define ST_SDRC_MASK 0x2
/*
* omap3_sram_configure_core_dpll - change DPLL3 M2 divider
- * r0 = new SDRC_RFR_CTRL register contents
- * r1 = new SDRC_ACTIM_CTRLA register contents
- * r2 = new SDRC_ACTIM_CTRLB register contents
- * r3 = new M2 divider setting (only 1 and 2 supported right now)
- * r4 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
+ *
+ * Params passed in registers:
+ * r0 = new M2 divider setting (only 1 and 2 supported right now)
+ * r1 = unlock SDRC DLL? (1 = yes, 0 = no). Only unlock DLL for
* SDRC rates < 83MHz
- * r5 = number of MPU cycles to wait for SDRC to stabilize after
+ * r2 = number of MPU cycles to wait for SDRC to stabilize after
* reprogramming the SDRC when switching to a slower MPU speed
- * r6 = new SDRC_MR_0 register value
- * r7 = increasing SDRC rate? (1 = yes, 0 = no)
+ * r3 = increasing SDRC rate? (1 = yes, 0 = no)
+ *
+ * Params passed via the stack. The needed params will be copied in SRAM
+ * before use by the code in SRAM (SDRAM is not accessible during SDRC
+ * reconfiguration):
+ * new SDRC_RFR_CTRL_0 register contents
+ * new SDRC_ACTIM_CTRL_A_0 register contents
+ * new SDRC_ACTIM_CTRL_B_0 register contents
+ * new SDRC_MR_0 register value
+ * new SDRC_RFR_CTRL_1 register contents
+ * new SDRC_ACTIM_CTRL_A_1 register contents
+ * new SDRC_ACTIM_CTRL_B_1 register contents
+ * new SDRC_MR_1 register value
*
+ * If the param SDRC_RFR_CTRL_1 is 0, the parameters
+ * are not programmed into the SDRC CS1 registers
*/
ENTRY(omap3_sram_configure_core_dpll)
stmfd sp!, {r1-r12, lr} @ store regs to stack
- ldr r4, [sp, #52] @ pull extra args off the stack
- ldr r5, [sp, #56] @ load extra args from the stack
- ldr r6, [sp, #60] @ load extra args from the stack
- ldr r7, [sp, #64] @ load extra args from the stack
+
+ @ pull the extra args off the stack
+ @ and store them in SRAM
+ ldr r4, [sp, #52]
+ str r4, omap_sdrc_rfr_ctrl_0_val
+ ldr r4, [sp, #56]
+ str r4, omap_sdrc_actim_ctrl_a_0_val
+ ldr r4, [sp, #60]
+ str r4, omap_sdrc_actim_ctrl_b_0_val
+ ldr r4, [sp, #64]
+ str r4, omap_sdrc_mr_0_val
+ ldr r4, [sp, #68]
+ str r4, omap_sdrc_rfr_ctrl_1_val
+ cmp r4, #0 @ if SDRC_RFR_CTRL_1 is 0,
+ beq skip_cs1_params @ do not use cs1 params
+ ldr r4, [sp, #72]
+ str r4, omap_sdrc_actim_ctrl_a_1_val
+ ldr r4, [sp, #76]
+ str r4, omap_sdrc_actim_ctrl_b_1_val
+ ldr r4, [sp, #80]
+ str r4, omap_sdrc_mr_1_val
+skip_cs1_params:
dsb @ flush buffered writes to interconnect
- cmp r7, #1 @ if increasing SDRC clk rate,
+
+ cmp r3, #1 @ if increasing SDRC clk rate,
bleq configure_sdrc @ program the SDRC regs early (for RFR)
- cmp r4, #SDRC_UNLOCK_DLL @ set the intended DLL state
+ cmp r1, #SDRC_UNLOCK_DLL @ set the intended DLL state
bleq unlock_dll
blne lock_dll
bl sdram_in_selfrefresh @ put SDRAM in self refresh, idle SDRC
bl configure_core_dpll @ change the DPLL3 M2 divider
+ mov r12, r2
+ bl wait_clk_stable @ wait for SDRC to stabilize
bl enable_sdrc @ take SDRC out of idle
- cmp r4, #SDRC_UNLOCK_DLL @ wait for DLL status to change
+ cmp r1, #SDRC_UNLOCK_DLL @ wait for DLL status to change
bleq wait_dll_unlock
blne wait_dll_lock
- cmp r7, #1 @ if increasing SDRC clk rate,
+ cmp r3, #1 @ if increasing SDRC clk rate,
beq return_to_sdram @ return to SDRAM code, otherwise,
bl configure_sdrc @ reprogram SDRC regs now
- mov r12, r5
- bl wait_clk_stable @ wait for SDRC to stabilize
return_to_sdram:
isb @ prevent speculative exec past here
mov r0, #0 @ return value
unlock_dll:
ldr r11, omap3_sdrc_dlla_ctrl
ldr r12, [r11]
- and r12, r12, #FIXEDDELAY_MASK
+ bic r12, r12, #FIXEDDELAY_MASK
orr r12, r12, #FIXEDDELAY_DEFAULT
orr r12, r12, #DLLIDLE_MASK
str r12, [r11] @ (no OCP barrier needed)
ldr r12, [r11] @ read the contents of SDRC_POWER
mov r9, r12 @ keep a copy of SDRC_POWER bits
orr r12, r12, #SRFRONIDLEREQ_MASK @ enable self refresh on idle
- bic r12, r12, #PWDENA_MASK @ clear PWDENA
str r12, [r11] @ write back to SDRC_POWER register
ldr r12, [r11] @ posted-write barrier for SDRC
idle_sdrc:
ldr r12, [r11]
ldr r10, core_m2_mask_val @ modify m2 for core dpll
and r12, r12, r10
- orr r12, r12, r3, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
+ orr r12, r12, r0, lsl #CORE_DPLL_CLKOUT_DIV_SHIFT
str r12, [r11]
ldr r12, [r11] @ posted-write barrier for CM
bx lr
bne wait_dll_unlock
bx lr
configure_sdrc:
- ldr r11, omap3_sdrc_rfr_ctrl
- str r0, [r11]
- ldr r11, omap3_sdrc_actim_ctrla
- str r1, [r11]
- ldr r11, omap3_sdrc_actim_ctrlb
- str r2, [r11]
+ ldr r12, omap_sdrc_rfr_ctrl_0_val @ fetch value from SRAM
+ ldr r11, omap3_sdrc_rfr_ctrl_0 @ fetch addr from SRAM
+ str r12, [r11] @ store
+ ldr r12, omap_sdrc_actim_ctrl_a_0_val
+ ldr r11, omap3_sdrc_actim_ctrl_a_0
+ str r12, [r11]
+ ldr r12, omap_sdrc_actim_ctrl_b_0_val
+ ldr r11, omap3_sdrc_actim_ctrl_b_0
+ str r12, [r11]
+ ldr r12, omap_sdrc_mr_0_val
ldr r11, omap3_sdrc_mr_0
- str r6, [r11]
- ldr r6, [r11] @ posted-write barrier for SDRC
+ str r12, [r11]
+ ldr r12, omap_sdrc_rfr_ctrl_1_val
+ cmp r12, #0 @ if SDRC_RFR_CTRL_1 is 0,
+ beq skip_cs1_prog @ do not program cs1 params
+ ldr r11, omap3_sdrc_rfr_ctrl_1
+ str r12, [r11]
+ ldr r12, omap_sdrc_actim_ctrl_a_1_val
+ ldr r11, omap3_sdrc_actim_ctrl_a_1
+ str r12, [r11]
+ ldr r12, omap_sdrc_actim_ctrl_b_1_val
+ ldr r11, omap3_sdrc_actim_ctrl_b_1
+ str r12, [r11]
+ ldr r12, omap_sdrc_mr_1_val
+ ldr r11, omap3_sdrc_mr_1
+ str r12, [r11]
+skip_cs1_prog:
+ ldr r12, [r11] @ posted-write barrier for SDRC
bx lr
omap3_sdrc_power:
.word OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST)
omap3_cm_iclken1_core:
.word OMAP34XX_CM_REGADDR(CORE_MOD, CM_ICLKEN1)
-omap3_sdrc_rfr_ctrl:
+
+omap3_sdrc_rfr_ctrl_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_0)
-omap3_sdrc_actim_ctrla:
+omap3_sdrc_rfr_ctrl_1:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_RFR_CTRL_1)
+omap3_sdrc_actim_ctrl_a_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_0)
-omap3_sdrc_actim_ctrlb:
+omap3_sdrc_actim_ctrl_a_1:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_A_1)
+omap3_sdrc_actim_ctrl_b_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_0)
+omap3_sdrc_actim_ctrl_b_1:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_ACTIM_CTRL_B_1)
omap3_sdrc_mr_0:
.word OMAP34XX_SDRC_REGADDR(SDRC_MR_0)
+omap3_sdrc_mr_1:
+ .word OMAP34XX_SDRC_REGADDR(SDRC_MR_1)
+omap_sdrc_rfr_ctrl_0_val:
+ .word 0xDEADBEEF
+omap_sdrc_rfr_ctrl_1_val:
+ .word 0xDEADBEEF
+omap_sdrc_actim_ctrl_a_0_val:
+ .word 0xDEADBEEF
+omap_sdrc_actim_ctrl_a_1_val:
+ .word 0xDEADBEEF
+omap_sdrc_actim_ctrl_b_0_val:
+ .word 0xDEADBEEF
+omap_sdrc_actim_ctrl_b_1_val:
+ .word 0xDEADBEEF
+omap_sdrc_mr_0_val:
+ .word 0xDEADBEEF
+omap_sdrc_mr_1_val:
+ .word 0xDEADBEEF
+
omap3_sdrc_dlla_status:
.word OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
omap3_sdrc_dlla_ctrl:
ENTRY(omap3_sram_configure_core_dpll_sz)
.word . - omap3_sram_configure_core_dpll
+
}
};
-static void u300_init_check_chip(void)
+static void __init u300_init_check_chip(void)
{
u16 val;
printk("%d pages swap cached\n", cached);
}
+static void __init find_node_limits(int node, struct meminfo *mi,
+ unsigned long *min, unsigned long *max_low, unsigned long *max_high)
+{
+ int i;
+
+ *min = -1UL;
+ *max_low = *max_high = 0;
+
+ for_each_nodebank(i, mi, node) {
+ struct membank *bank = &mi->bank[i];
+ unsigned long start, end;
+
+ start = bank_pfn_start(bank);
+ end = bank_pfn_end(bank);
+
+ if (*min > start)
+ *min = start;
+ if (*max_high < end)
+ *max_high = end;
+ if (bank->highmem)
+ continue;
+ if (*max_low < end)
+ *max_low = end;
+ }
+}
+
/*
* FIXME: We really want to avoid allocating the bootmap bitmap
* over the top of the initrd. Hopefully, this is located towards
#endif
}
-static unsigned long __init bootmem_init_node(int node, struct meminfo *mi)
+static void __init bootmem_init_node(int node, struct meminfo *mi,
+ unsigned long start_pfn, unsigned long end_pfn)
{
- unsigned long start_pfn, end_pfn, boot_pfn;
+ unsigned long boot_pfn;
unsigned int boot_pages;
pg_data_t *pgdat;
int i;
- start_pfn = -1UL;
- end_pfn = 0;
-
/*
- * Calculate the pfn range, and map the memory banks for this node.
+ * Map the memory banks for this node.
*/
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
- unsigned long start, end;
- start = bank_pfn_start(bank);
- end = bank_pfn_end(bank);
-
- if (start_pfn > start)
- start_pfn = start;
- if (end_pfn < end)
- end_pfn = end;
-
- map_memory_bank(bank);
+ if (!bank->highmem)
+ map_memory_bank(bank);
}
- /*
- * If there is no memory in this node, ignore it.
- */
- if (end_pfn == 0)
- return end_pfn;
-
/*
* Allocate the bootmem bitmap page.
*/
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
- free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
+ if (!bank->highmem)
+ free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
}
*/
reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT,
boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
-
- return end_pfn;
}
static void __init bootmem_reserve_initrd(int node)
static void __init bootmem_free_node(int node, struct meminfo *mi)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long start_pfn, end_pfn;
- pg_data_t *pgdat = NODE_DATA(node);
+ unsigned long min, max_low, max_high;
int i;
- start_pfn = pgdat->bdata->node_min_pfn;
- end_pfn = pgdat->bdata->node_low_pfn;
+ find_node_limits(node, mi, &min, &max_low, &max_high);
/*
* initialise the zones within this node.
*/
memset(zone_size, 0, sizeof(zone_size));
- memset(zhole_size, 0, sizeof(zhole_size));
/*
* The size of this node has already been determined. If we need
* to do anything fancy with the allocation of this memory to the
* zones, now is the time to do it.
*/
- zone_size[0] = end_pfn - start_pfn;
+ zone_size[0] = max_low - min;
+#ifdef CONFIG_HIGHMEM
+ zone_size[ZONE_HIGHMEM] = max_high - max_low;
+#endif
/*
* For each bank in this node, calculate the size of the holes.
* holes = node_size - sum(bank_sizes_in_node)
*/
- zhole_size[0] = zone_size[0];
- for_each_nodebank(i, mi, node)
- zhole_size[0] -= bank_pfn_size(&mi->bank[i]);
+ memcpy(zhole_size, zone_size, sizeof(zhole_size));
+ for_each_nodebank(i, mi, node) {
+ int idx = 0;
+#ifdef CONFIG_HIGHMEM
+ if (mi->bank[i].highmem)
+ idx = ZONE_HIGHMEM;
+#endif
+ zhole_size[idx] -= bank_pfn_size(&mi->bank[i]);
+ }
/*
* Adjust the sizes according to any special requirements for
*/
arch_adjust_zones(node, zone_size, zhole_size);
- free_area_init_node(node, zone_size, start_pfn, zhole_size);
+ free_area_init_node(node, zone_size, min, zhole_size);
}
void __init bootmem_init(void)
{
struct meminfo *mi = &meminfo;
- unsigned long memend_pfn = 0;
+ unsigned long min, max_low, max_high;
int node, initrd_node;
/*
*/
initrd_node = check_initrd(mi);
+ max_low = max_high = 0;
+
/*
* Run through each node initialising the bootmem allocator.
*/
for_each_node(node) {
- unsigned long end_pfn = bootmem_init_node(node, mi);
+ unsigned long node_low, node_high;
+
+ find_node_limits(node, mi, &min, &node_low, &node_high);
+
+ if (node_low > max_low)
+ max_low = node_low;
+ if (node_high > max_high)
+ max_high = node_high;
+
+ /*
+ * If there is no memory in this node, ignore it.
+ * (We can't have nodes which have no lowmem)
+ */
+ if (node_low == 0)
+ continue;
+
+ bootmem_init_node(node, mi, min, node_low);
/*
* Reserve any special node zero regions.
*/
if (node == initrd_node)
bootmem_reserve_initrd(node);
-
- /*
- * Remember the highest memory PFN.
- */
- if (end_pfn > memend_pfn)
- memend_pfn = end_pfn;
}
/*
for_each_node(node)
bootmem_free_node(node, mi);
- high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1;
+ high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
- max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
+ max_low_pfn = max_low - PHYS_PFN_OFFSET;
+ max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
static void __init sanity_check_meminfo(void)
{
- int i, j;
+ int i, j, highmem = 0;
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
#ifdef CONFIG_HIGHMEM
+ if (__va(bank->start) > VMALLOC_MIN ||
+ __va(bank->start) < (void *)PAGE_OFFSET)
+ highmem = 1;
+
+ bank->highmem = highmem;
+
/*
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
i++;
bank[1].size -= VMALLOC_MIN - __va(bank->start);
bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
+ bank[1].highmem = highmem = 1;
j++;
}
bank->size = VMALLOC_MIN - __va(bank->start);
/* Ensure desired rate is within allowed range. Some govenors
* (ondemand) will just pass target_freq=0 to get the minimum. */
- if (target_freq < policy->cpuinfo.min_freq)
- target_freq = policy->cpuinfo.min_freq;
- if (target_freq > policy->cpuinfo.max_freq)
- target_freq = policy->cpuinfo.max_freq;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+ if (target_freq > policy->max)
+ target_freq = policy->max;
freqs.old = omap_getspeed(0);
freqs.new = clk_round_rate(mpu_clk, target_freq * 1000) / 1000;
cur_lch = next_lch;
} while (next_lch != -1);
- } else if (cpu_class_is_omap2()) {
+ } else if (cpu_is_omap242x() ||
+ (cpu_is_omap243x() && omap_type() <= OMAP2430_REV_ES1_0)) {
+
/* Errata: Need to write lch even if not using chaining */
dma_write(lch, CLNK_CTRL(lch));
}
__raw_writel(l, reg);
}
-static int __omap_get_gpio_datain(int gpio)
+static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
- struct gpio_bank *bank;
void __iomem *reg;
if (check_gpio(gpio) < 0)
return -EINVAL;
- bank = get_gpio_bank(gpio);
reg = bank->base;
switch (bank->method) {
#ifdef CONFIG_ARCH_OMAP1
& (1 << get_gpio_index(gpio))) != 0;
}
+static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg;
+
+ if (check_gpio(gpio) < 0)
+ return -EINVAL;
+ reg = bank->base;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_OUTPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP16XX
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DATAOUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP730
+ case METHOD_GPIO_730:
+ reg += OMAP730_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP850
+ case METHOD_GPIO_850:
+ reg += OMAP850_GPIO_DATA_OUTPUT;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP24XX) || defined(CONFIG_ARCH_OMAP34XX) || \
+ defined(CONFIG_ARCH_OMAP4)
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_DATAOUT;
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+}
+
#define MOD_REG_BIT(reg, bit_mask, set) \
do { \
int l = __raw_readl(base + reg); \
struct gpio_bank *bank = get_irq_chip_data(irq);
_set_gpio_irqenable(bank, gpio, 0);
+ _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
}
static void gpio_unmask_irq(unsigned int irq)
unsigned int gpio = irq - IH_GPIO_BASE;
struct gpio_bank *bank = get_irq_chip_data(irq);
unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ struct irq_desc *desc = irq_to_desc(irq);
+ u32 trigger = desc->status & IRQ_TYPE_SENSE_MASK;
+
+ if (trigger)
+ _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
return 0;
}
+static int gpio_is_input(struct gpio_bank *bank, int mask)
+{
+ void __iomem *reg = bank->base;
+
+ switch (bank->method) {
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_IO_CNTL;
+ break;
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_1610:
+ reg += OMAP1610_GPIO_DIRECTION;
+ break;
+ case METHOD_GPIO_730:
+ reg += OMAP730_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_850:
+ reg += OMAP850_GPIO_DIR_CONTROL;
+ break;
+ case METHOD_GPIO_24XX:
+ reg += OMAP24XX_GPIO_OE;
+ break;
+ }
+ return __raw_readl(reg) & mask;
+}
+
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return __omap_get_gpio_datain(chip->base + offset);
+ struct gpio_bank *bank;
+ void __iomem *reg;
+ int gpio;
+ u32 mask;
+
+ gpio = chip->base + offset;
+ bank = get_gpio_bank(gpio);
+ reg = bank->base;
+ mask = 1 << get_gpio_index(gpio);
+
+ if (gpio_is_input(bank, mask))
+ return _get_gpio_datain(bank, gpio);
+ else
+ return _get_gpio_dataout(bank, gpio);
}
static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-static int gpio_is_input(struct gpio_bank *bank, int mask)
-{
- void __iomem *reg = bank->base;
-
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL;
- break;
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
- case METHOD_GPIO_730:
- reg += OMAP730_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_850:
- reg += OMAP850_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
- }
- return __raw_readl(reg) & mask;
-}
-
-
static int dbg_gpio_show(struct seq_file *s, void *unused)
{
unsigned i, j, gpio;
struct clkops {
int (*enable)(struct clk *);
void (*disable)(struct clk *);
+ void (*find_idlest)(struct clk *, void __iomem **, u8 *);
+ void (*find_companion)(struct clk *, void __iomem **, u8 *);
};
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
cpu_is_omap44xx())
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) || \
- defined(CONFIG_ARCH_OMAP4)
-
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
#define OMAP2420_REV_ES1_0 0x24200024
int omap_chip_is(struct omap_chip_id oci);
void omap2_check_revision(void);
-
-#endif /* defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) */
extern void omap1_init_common_hw(void);
extern void omap2_map_common_io(void);
-extern void omap2_init_common_hw(struct omap_sdrc_params *sp);
+extern void omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1);
#define __arch_ioremap(p,s,t) omap_ioremap(p,s,t)
#define __arch_iounmap(v) omap_iounmap(v)
AE5_34XX_GPIO143,
H19_34XX_GPIO164_OUT,
J25_34XX_GPIO170,
+
+ /* OMAP3 SDRC CKE signals to SDR/DDR ram chips */
+ H16_34XX_SDRC_CKE0,
+ H17_34XX_SDRC_CKE1,
};
struct omap_mux_cfg {
u32 omap_prcm_get_reset_sources(void);
void omap_prcm_arch_reset(char mode);
+int omap2_cm_wait_idlest(void __iomem *reg, u32 mask, const char *name);
#endif
#define SDRC_ACTIM_CTRL_A_0 0x09c
#define SDRC_ACTIM_CTRL_B_0 0x0a0
#define SDRC_RFR_CTRL_0 0x0a4
+#define SDRC_MR_1 0x0B4
+#define SDRC_ACTIM_CTRL_A_1 0x0C4
+#define SDRC_ACTIM_CTRL_B_1 0x0C8
+#define SDRC_RFR_CTRL_1 0x0D4
/*
* These values represent the number of memory clock cycles between
u32 mr;
};
-void __init omap2_sdrc_init(struct omap_sdrc_params *sp);
-struct omap_sdrc_params *omap2_sdrc_get_params(unsigned long r);
+void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
+ struct omap_sdrc_params *sdrc_cs1);
+int omap2_sdrc_get_params(unsigned long r,
+ struct omap_sdrc_params **sdrc_cs0,
+ struct omap_sdrc_params **sdrc_cs1);
#ifdef CONFIG_ARCH_OMAP2
extern void omap_uart_prepare_suspend(void);
extern void omap_uart_prepare_idle(int num);
extern void omap_uart_resume_idle(int num);
+extern void omap_uart_enable_irqs(int enable);
#endif
#endif
u32 mem_type);
extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
-extern u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl,
- u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll, u32 f, u32 sdrc_mr,
- u32 inc);
+extern u32 omap3_configure_core_dpll(
+ u32 m2, u32 unlock_dll, u32 f, u32 inc,
+ u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
+ u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
+ u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
+ u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
/* Do not use these */
extern void omap1_sram_reprogram_clock(u32 ckctl, u32 dpllctl);
u32 mem_type);
extern unsigned long omap243x_sram_reprogram_sdrc_sz;
-
-extern u32 omap3_sram_configure_core_dpll(u32 sdrc_rfr_ctrl,
- u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2,
- u32 unlock_dll, u32 f, u32 sdrc_mr,
- u32 inc);
+extern u32 omap3_sram_configure_core_dpll(
+ u32 m2, u32 unlock_dll, u32 f, u32 inc,
+ u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
+ u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
+ u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
+ u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
extern unsigned long omap3_sram_configure_core_dpll_sz;
#endif
#define OMAP2_SRAM_VA 0xe3000000
#define OMAP2_SRAM_PUB_VA (OMAP2_SRAM_VA + 0x800)
#define OMAP3_SRAM_PA 0x40200000
-#define OMAP3_SRAM_VA 0xd7000000
+#define OMAP3_SRAM_VA 0xe3000000
#define OMAP3_SRAM_PUB_PA 0x40208000
-#define OMAP3_SRAM_PUB_VA 0xd7008000
+#define OMAP3_SRAM_PUB_VA (OMAP3_SRAM_VA + 0x8000)
#define OMAP4_SRAM_PA 0x40200000 /*0x402f0000*/
#define OMAP4_SRAM_VA 0xd7000000 /*0xd70f0000*/
#ifdef CONFIG_ARCH_OMAP3
-static u32 (*_omap3_sram_configure_core_dpll)(u32 sdrc_rfr_ctrl,
- u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb,
- u32 m2, u32 unlock_dll,
- u32 f, u32 sdrc_mr, u32 inc);
-u32 omap3_configure_core_dpll(u32 sdrc_rfr_ctrl, u32 sdrc_actim_ctrla,
- u32 sdrc_actim_ctrlb, u32 m2, u32 unlock_dll,
- u32 f, u32 sdrc_mr, u32 inc)
+static u32 (*_omap3_sram_configure_core_dpll)(
+ u32 m2, u32 unlock_dll, u32 f, u32 inc,
+ u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
+ u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
+ u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
+ u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1);
+
+u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc,
+ u32 sdrc_rfr_ctrl_0, u32 sdrc_actim_ctrl_a_0,
+ u32 sdrc_actim_ctrl_b_0, u32 sdrc_mr_0,
+ u32 sdrc_rfr_ctrl_1, u32 sdrc_actim_ctrl_a_1,
+ u32 sdrc_actim_ctrl_b_1, u32 sdrc_mr_1)
{
BUG_ON(!_omap3_sram_configure_core_dpll);
- return _omap3_sram_configure_core_dpll(sdrc_rfr_ctrl,
- sdrc_actim_ctrla,
- sdrc_actim_ctrlb, m2,
- unlock_dll, f, sdrc_mr, inc);
+ return _omap3_sram_configure_core_dpll(
+ m2, unlock_dll, f, inc,
+ sdrc_rfr_ctrl_0, sdrc_actim_ctrl_a_0,
+ sdrc_actim_ctrl_b_0, sdrc_mr_0,
+ sdrc_rfr_ctrl_1, sdrc_actim_ctrl_a_1,
+ sdrc_actim_ctrl_b_1, sdrc_mr_1);
}
/* REVISIT: Should this be same as omap34xx_sram_init() after off-idle? */
/* calculate the MISCCR setting for the clock */
- if (parent == &clk_xtal)
+ if (parent == &clk_mpll)
source = S3C2410_MISCCR_CLK0_MPLL;
else if (parent == &clk_upll)
source = S3C2410_MISCCR_CLK0_UPLL;
ftp://ftp.hpl.hp.com/pub/linux-ia64/gas-030124.tar.gz)
endif
-ifeq ($(call cc-version),0304)
- cflags-$(CONFIG_ITANIUM) += -mtune=merced
- cflags-$(CONFIG_MCKINLEY) += -mtune=mckinley
-endif
-
KBUILD_CFLAGS += $(cflags-y)
head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
{
__u32 *p = (__u32 *) addr + (nr >> 5);
__u32 m = 1 << (nr & 31);
- int oldbitset = *p & m;
+ int oldbitset = (*p & m) != 0;
*p &= ~m;
return oldbitset;
#include <linux/bitops.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
-#include <asm/processor.h>
/*
* Next come the mappings that determine how mmap() protection bits
#include <asm/page.h>
EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
#ifdef CONFIG_VIRTUAL_MEM_MAP
#include <linux/bootmem.h>
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#include <asm/page.h>
-EXPORT_SYMBOL(copy_page);
-
#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
}
addr = ioremap(phys_addr, 0);
+ if (addr == NULL) {
+ spin_unlock_irqrestore(&iosapic_lock, flags);
+ return -ENOMEM;
+ }
ver = iosapic_version(addr);
if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
iounmap(addr);
int iommu_dma_supported(struct device *dev, u64 mask)
{
- struct dma_map_ops *ops = platform_dma_get_ops(dev);
-
- if (ops->dma_supported)
- return ops->dma_supported(dev, mask);
-
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj,
&cache_ktype_percpu_entry, &sys_dev->kobj,
"%s", "cache");
+ if (unlikely(retval < 0)) {
+ cpu_cache_sysfs_exit(cpu);
+ return retval;
+ }
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
this_object = LEAF_KOBJECT_PTR(cpu,i);
}
kobject_put(&all_cpu_cache_info[cpu].kobj);
cpu_cache_sysfs_exit(cpu);
- break;
+ return retval;
}
kobject_uevent(&(this_object->kobj), KOBJ_ADD);
}
#define PAGE_SIZE (1UL << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+#endif /* CONFIG_HUGETLB_PAGE */
#ifndef __ASSEMBLY__
#include <linux/slab.h>
#include <asm/scatterlist.h>
#include <linux/string.h>
-#include <linux/mm.h>
#include <asm/io.h>
struct pci_dev;
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/lmb.h>
#include <asm/bug.h>
#include <asm/abs_addr.h>
static int dma_direct_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
- /* Could be improved to check for memory though it better be
- * done via some global so platforms can set the limit in case
+ /* Could be improved so platforms can set the limit in case
* they have limited DMA windows
*/
- return mask >= DMA_BIT_MASK(32);
+ return mask >= (lmb_end_of_DRAM() - 1);
#else
return 1;
#endif
struct cpu_hw_counters *cpuhw;
unsigned long flags;
+ if (!ppmu)
+ return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
int n_lim;
int idx;
+ if (!ppmu)
+ return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
if (!cpuhw->disabled) {
long i, n, n0;
struct perf_counter *sub;
+ if (!ppmu)
+ return 0;
cpuhw = &__get_cpu_var(cpu_hw_counters);
n0 = cpuhw->n_counters;
n = collect_events(group_leader, ppmu->n_counter - n0,
{
struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+ if (!ppmu)
+ return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
return platform_add_devices(ap325rxa_devices,
ARRAY_SIZE(ap325rxa_devices));
}
-device_initcall(ap325rxa_devices_setup);
+arch_initcall(ap325rxa_devices_setup);
/* Return the board specific boot mode pin configuration */
static int ap325rxa_mode_pins(void)
return platform_add_devices(migor_devices, ARRAY_SIZE(migor_devices));
}
-__initcall(migor_devices_setup);
+arch_initcall(migor_devices_setup);
/* Return the board specific boot mode pin configuration */
static int migor_mode_pins(void)
},
};
-/* KEYSC */
+/* KEYSC in SoC (Needs SW33-2 set to ON) */
static struct sh_keysc_info keysc_info = {
.mode = SH_KEYSC_MODE_1,
.scan_timing = 10,
static struct resource keysc_resources[] = {
[0] = {
- .start = 0x1a204000,
- .end = 0x1a20400f,
+ .name = "KEYSC",
+ .start = 0x044b0000,
+ .end = 0x044b000f,
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = IRQ0_KEY,
+ .start = 79,
.flags = IORESOURCE_IRQ,
},
};
return platform_add_devices(sh7619_devices,
ARRAY_SIZE(sh7619_devices));
}
-__initcall(sh7619_devices_setup);
+arch_initcall(sh7619_devices_setup);
void __init plat_irq_setup(void)
{
return platform_add_devices(mxg_devices,
ARRAY_SIZE(mxg_devices));
}
-__initcall(mxg_devices_setup);
+arch_initcall(mxg_devices_setup);
void __init plat_irq_setup(void)
{
return platform_add_devices(sh7201_devices,
ARRAY_SIZE(sh7201_devices));
}
-__initcall(sh7201_devices_setup);
+arch_initcall(sh7201_devices_setup);
void __init plat_irq_setup(void)
{
return platform_add_devices(sh7203_devices,
ARRAY_SIZE(sh7203_devices));
}
-__initcall(sh7203_devices_setup);
+arch_initcall(sh7203_devices_setup);
void __init plat_irq_setup(void)
{
return platform_add_devices(sh7206_devices,
ARRAY_SIZE(sh7206_devices));
}
-__initcall(sh7206_devices_setup);
+arch_initcall(sh7206_devices_setup);
void __init plat_irq_setup(void)
{
return platform_add_devices(sh7705_devices,
ARRAY_SIZE(sh7705_devices));
}
-__initcall(sh7705_devices_setup);
+arch_initcall(sh7705_devices_setup);
static struct platform_device *sh7705_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh770x_devices,
ARRAY_SIZE(sh770x_devices));
}
-__initcall(sh770x_devices_setup);
+arch_initcall(sh770x_devices_setup);
static struct platform_device *sh770x_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7710_devices,
ARRAY_SIZE(sh7710_devices));
}
-__initcall(sh7710_devices_setup);
+arch_initcall(sh7710_devices_setup);
static struct platform_device *sh7710_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7720_devices,
ARRAY_SIZE(sh7720_devices));
}
-__initcall(sh7720_devices_setup);
+arch_initcall(sh7720_devices_setup);
static struct platform_device *sh7720_early_devices[] __initdata = {
&cmt0_device,
return platform_add_devices(sh4202_devices,
ARRAY_SIZE(sh4202_devices));
}
-__initcall(sh4202_devices_setup);
+arch_initcall(sh4202_devices_setup);
static struct platform_device *sh4202_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7750_devices,
ARRAY_SIZE(sh7750_devices));
}
-__initcall(sh7750_devices_setup);
+arch_initcall(sh7750_devices_setup);
static struct platform_device *sh7750_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7760_devices,
ARRAY_SIZE(sh7760_devices));
}
-__initcall(sh7760_devices_setup);
+arch_initcall(sh7760_devices_setup);
static struct platform_device *sh7760_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7343_devices,
ARRAY_SIZE(sh7343_devices));
}
-__initcall(sh7343_devices_setup);
+arch_initcall(sh7343_devices_setup);
static struct platform_device *sh7343_early_devices[] __initdata = {
&cmt_device,
return platform_add_devices(sh7366_devices,
ARRAY_SIZE(sh7366_devices));
}
-__initcall(sh7366_devices_setup);
+arch_initcall(sh7366_devices_setup);
static struct platform_device *sh7366_early_devices[] __initdata = {
&cmt_device,
return platform_add_devices(sh7722_devices,
ARRAY_SIZE(sh7722_devices));
}
-__initcall(sh7722_devices_setup);
+arch_initcall(sh7722_devices_setup);
static struct platform_device *sh7722_early_devices[] __initdata = {
&cmt_device,
return platform_add_devices(sh7723_devices,
ARRAY_SIZE(sh7723_devices));
}
-__initcall(sh7723_devices_setup);
+arch_initcall(sh7723_devices_setup);
static struct platform_device *sh7723_early_devices[] __initdata = {
&cmt_device,
return platform_add_devices(sh7724_devices,
ARRAY_SIZE(sh7724_devices));
}
-device_initcall(sh7724_devices_setup);
+arch_initcall(sh7724_devices_setup);
static struct platform_device *sh7724_early_devices[] __initdata = {
&cmt_device,
return platform_add_devices(sh7763_devices,
ARRAY_SIZE(sh7763_devices));
}
-__initcall(sh7763_devices_setup);
+arch_initcall(sh7763_devices_setup);
static struct platform_device *sh7763_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7770_devices,
ARRAY_SIZE(sh7770_devices));
}
-__initcall(sh7770_devices_setup);
+arch_initcall(sh7770_devices_setup);
static struct platform_device *sh7770_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7780_devices,
ARRAY_SIZE(sh7780_devices));
}
-__initcall(sh7780_devices_setup);
+arch_initcall(sh7780_devices_setup);
static struct platform_device *sh7780_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7785_devices,
ARRAY_SIZE(sh7785_devices));
}
-__initcall(sh7785_devices_setup);
+arch_initcall(sh7785_devices_setup);
static struct platform_device *sh7785_early_devices[] __initdata = {
&tmu0_device,
return platform_add_devices(sh7786_devices,
ARRAY_SIZE(sh7786_devices));
}
-device_initcall(sh7786_devices_setup);
+arch_initcall(sh7786_devices_setup);
void __init plat_early_device_setup(void)
{
return platform_add_devices(shx3_devices,
ARRAY_SIZE(shx3_devices));
}
-__initcall(shx3_devices_setup);
+arch_initcall(shx3_devices_setup);
void __init plat_early_device_setup(void)
{
return platform_add_devices(sh5_devices,
ARRAY_SIZE(sh5_devices));
}
-__initcall(sh5_devices_setup);
+arch_initcall(sh5_devices_setup);
void __init plat_early_device_setup(void)
{
tst #SUSP_SH_SF, r0
bt skip_set_sf
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+ /* DBSC: put memory in self-refresh mode */
- /* SDRAM: disable power down and put in self-refresh mode */
+ mov.l dben_reg, r4
+ mov.l dben_data0, r1
+ mov.l r1, @r4
+
+ mov.l dbrfpdn0_reg, r4
+ mov.l dbrfpdn0_data0, r1
+ mov.l r1, @r4
+
+ mov.l dbcmdcnt_reg, r4
+ mov.l dbcmdcnt_data0, r1
+ mov.l r1, @r4
+
+ mov.l dbcmdcnt_reg, r4
+ mov.l dbcmdcnt_data1, r1
+ mov.l r1, @r4
+
+ mov.l dbrfpdn0_reg, r4
+ mov.l dbrfpdn0_data1, r1
+ mov.l r1, @r4
+#else
+ /* SBSC: disable power down and put in self-refresh mode */
mov.l 1f, r4
mov.l 2f, r1
mov.l @r4, r2
mov.l 3f, r3
and r3, r2
mov.l r2, @r4
+#endif
skip_set_sf:
tst #SUSP_SH_SLEEP, r0
tst #SUSP_SH_SF, r0
bt skip_restore_sf
- /* SDRAM: set auto-refresh mode */
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+ /* DBSC: put memory in auto-refresh mode */
+
+ mov.l dbrfpdn0_reg, r4
+ mov.l dbrfpdn0_data0, r1
+ mov.l r1, @r4
+
+ /* sleep 140 ns */
+ nop
+ nop
+ nop
+ nop
+
+ mov.l dbcmdcnt_reg, r4
+ mov.l dbcmdcnt_data0, r1
+ mov.l r1, @r4
+
+ mov.l dbcmdcnt_reg, r4
+ mov.l dbcmdcnt_data1, r1
+ mov.l r1, @r4
+
+ mov.l dben_reg, r4
+ mov.l dben_data1, r1
+ mov.l r1, @r4
+
+ mov.l dbrfpdn0_reg, r4
+ mov.l dbrfpdn0_data2, r1
+ mov.l r1, @r4
+#else
+ /* SBSC: set auto-refresh mode */
mov.l 1f, r4
mov.l @r4, r2
mov.l 4f, r3
add r4, r3
or r2, r3
mov.l r3, @r1
+#endif
skip_restore_sf:
rts
nop
.balign 4
+#ifdef CONFIG_CPU_SUBTYPE_SH7724
+dben_reg: .long 0xfd000010 /* DBEN */
+dben_data0: .long 0
+dben_data1: .long 1
+dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */
+dbrfpdn0_data0: .long 0
+dbrfpdn0_data1: .long 1
+dbrfpdn0_data2: .long 0x00010000
+dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */
+dbcmdcnt_data0: .long 2
+dbcmdcnt_data1: .long 4
+#else
1: .long 0xfe400008 /* SDCR0 */
2: .long 0x00000400
3: .long 0xffff7fff
4: .long 0xfffffbff
+#endif
5: .long 0xa4150020 /* STBCR */
6: .long 0xfe40001c /* RTCOR */
7: .long 0xfe400018 /* RTCNT */
select HAVE_UNSTABLE_SCHED_CLOCK
select HAVE_IDE
select HAVE_OPROFILE
+ select HAVE_PERF_COUNTERS if (!M386 && !M486)
select HAVE_IOREMAP_PROT
select HAVE_KPROBES
select ARCH_WANT_OPTIONAL_GPIOLIB
config X86_LOCAL_APIC
def_bool y
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
- select HAVE_PERF_COUNTERS if (!M386 && !M486)
config X86_IO_APIC
def_bool y
level = cpuid_eax(1);
if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
+
+ /*
+ * Some BIOSes incorrectly force this feature, but only K8
+ * revision D (model = 0x14) and later actually support it.
+ */
+ if (c->x86_model < 0x14)
+ clear_cpu_cap(c, X86_FEATURE_LAHF_LM);
}
if (c->x86 == 0x10 || c->x86 == 0x11)
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
}
-static const struct cpu_dev *this_cpu __cpuinitdata;
+static void __cpuinit default_init(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_X86_64
+ display_cacheinfo(c);
+#else
+ /* Not much we can do here... */
+ /* Check if at least it has cpuid */
+ if (c->cpuid_level == -1) {
+ /* No cpuid. It must be an ancient CPU */
+ if (c->x86 == 4)
+ strcpy(c->x86_model_id, "486");
+ else if (c->x86 == 3)
+ strcpy(c->x86_model_id, "386");
+ }
+#endif
+}
+
+static const struct cpu_dev __cpuinitconst default_cpu = {
+ .c_init = default_init,
+ .c_vendor = "Unknown",
+ .c_x86_vendor = X86_VENDOR_UNKNOWN,
+};
+
+static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
#ifdef CONFIG_X86_64
static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
-static void __cpuinit default_init(struct cpuinfo_x86 *c)
-{
-#ifdef CONFIG_X86_64
- display_cacheinfo(c);
-#else
- /* Not much we can do here... */
- /* Check if at least it has cpuid */
- if (c->cpuid_level == -1) {
- /* No cpuid. It must be an ancient CPU */
- if (c->x86 == 4)
- strcpy(c->x86_model_id, "486");
- else if (c->x86 == 3)
- strcpy(c->x86_model_id, "386");
- }
-#endif
-}
-
-static const struct cpu_dev __cpuinitconst default_cpu = {
- .c_init = default_init,
- .c_vendor = "Unknown",
- .c_x86_vendor = X86_VENDOR_UNKNOWN,
-};
-
static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
+static DEFINE_PER_CPU(bool, thermal_throttle_active);
static atomic_t therm_throt_en = ATOMIC_INIT(0);
{
unsigned int cpu = smp_processor_id();
__u64 tmp_jiffs = get_jiffies_64();
+ bool was_throttled = __get_cpu_var(thermal_throttle_active);
+ bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
- if (curr)
+ if (is_throttled)
__get_cpu_var(thermal_throttle_count)++;
- if (time_before64(tmp_jiffs, __get_cpu_var(next_check)))
+ if (!(was_throttled ^ is_throttled) &&
+ time_before64(tmp_jiffs, __get_cpu_var(next_check)))
return 0;
__get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
/* if we just entered the thermal event */
- if (curr) {
+ if (is_throttled) {
printk(KERN_CRIT "CPU%d: Temperature above threshold, "
- "cpu clock throttled (total events = %lu)\n", cpu,
- __get_cpu_var(thermal_throttle_count));
+ "cpu clock throttled (total events = %lu)\n",
+ cpu, __get_cpu_var(thermal_throttle_count));
add_taint(TAINT_MACHINE_CHECK);
- } else {
- printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu);
+ } else if (was_throttled) {
+ printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
}
return 1;
int num_counters_fixed;
int counter_bits;
u64 counter_mask;
+ int apic;
u64 max_period;
u64 intel_ctrl;
};
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
static bool reserve_pmc_hardware(void)
{
+#ifdef CONFIG_X86_LOCAL_APIC
int i;
if (nmi_watchdog == NMI_LOCAL_APIC)
if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
goto eventsel_fail;
}
+#endif
return true;
+#ifdef CONFIG_X86_LOCAL_APIC
eventsel_fail:
for (i--; i >= 0; i--)
release_evntsel_nmi(x86_pmu.eventsel + i);
enable_lapic_nmi_watchdog();
return false;
+#endif
}
static void release_pmc_hardware(void)
{
+#ifdef CONFIG_X86_LOCAL_APIC
int i;
for (i = 0; i < x86_pmu.num_counters; i++) {
if (nmi_watchdog == NMI_LOCAL_APIC)
enable_lapic_nmi_watchdog();
+#endif
}
static void hw_perf_counter_destroy(struct perf_counter *counter)
hwc->sample_period = x86_pmu.max_period;
hwc->last_period = hwc->sample_period;
atomic64_set(&hwc->period_left, hwc->sample_period);
+ } else {
+ /*
+ * If we have a PMU initialized but no APIC
+ * interrupts, we cannot sample hardware
+ * counters (user-space has to fall back and
+ * sample via a hrtimer based software counter):
+ */
+ if (!x86_pmu.apic)
+ return -EOPNOTSUPP;
}
counter->destroy = hw_perf_counter_destroy;
void set_perf_counter_pending(void)
{
+#ifdef CONFIG_X86_LOCAL_APIC
apic->send_IPI_self(LOCAL_PENDING_VECTOR);
+#endif
}
void perf_counters_lapic_init(void)
{
- if (!x86_pmu_initialized())
+#ifdef CONFIG_X86_LOCAL_APIC
+ if (!x86_pmu.apic || !x86_pmu_initialized())
return;
/*
* Always use NMI for PMU
*/
apic_write(APIC_LVTPC, APIC_DM_NMI);
+#endif
}
static int __kprobes
regs = args->regs;
+#ifdef CONFIG_X86_LOCAL_APIC
apic_write(APIC_LVTPC, APIC_DM_NMI);
+#endif
/*
* Can't rely on the handled return value to say it was our NMI, two
* counters could trigger 'simultaneously' raising two back-to-back NMIs.
.event_map = p6_pmu_event_map,
.raw_event = p6_pmu_raw_event,
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
+ .apic = 1,
.max_period = (1ULL << 31) - 1,
.version = 0,
.num_counters = 2,
.event_map = intel_pmu_event_map,
.raw_event = intel_pmu_raw_event,
.max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ .apic = 1,
/*
* Intel PMCs cannot be accessed sanely above 32 bit width,
* so we install an artificial 1<<31 period regardless of
.num_counters = 4,
.counter_bits = 48,
.counter_mask = (1ULL << 48) - 1,
+ .apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
};
return -ENODEV;
}
+ x86_pmu = p6_pmu;
+
if (!cpu_has_apic) {
- pr_info("no Local APIC, try rebooting with lapic");
- return -ENODEV;
+ pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
+ pr_info("no hardware sampling interrupt available.\n");
+ x86_pmu.apic = 0;
}
- x86_pmu = p6_pmu;
-
return 0;
}
}
static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
- { /* Handle problems with rebooting on Apple MacBook5,2 */
+ { /* Handle problems with rebooting on Apple MacBook5 */
.callback = set_pci_reboot,
- .ident = "Apple MacBook",
+ .ident = "Apple MacBook5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"),
},
},
- { /* Handle problems with rebooting on Apple MacBookPro5,1 */
+ { /* Handle problems with rebooting on Apple MacBookPro5 */
.callback = set_pci_reboot,
- .ident = "Apple MacBookPro5,1",
+ .ident = "Apple MacBookPro5",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"),
},
},
{ }
* use the TSC value at the transitions to calculate a pretty
* good value for the TSC frequencty.
*/
+static inline int pit_verify_msb(unsigned char val)
+{
+ /* Ignore LSB */
+ inb(0x42);
+ return inb(0x42) == val;
+}
+
static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
{
int count;
u64 tsc = 0;
for (count = 0; count < 50000; count++) {
- /* Ignore LSB */
- inb(0x42);
- if (inb(0x42) != val)
+ if (!pit_verify_msb(val))
break;
tsc = get_cycles();
}
* to do that is to just read back the 16-bit counter
* once from the PIT.
*/
- inb(0x42);
- inb(0x42);
+ pit_verify_msb(0);
if (pit_expect_msb(0xff, &tsc, &d1)) {
for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
* Iterate until the error is less than 500 ppm
*/
delta -= tsc;
- if (d1+d2 < delta >> 11)
- goto success;
+ if (d1+d2 >= delta >> 11)
+ continue;
+
+ /*
+ * Check the PIT one more time to verify that
+ * all TSC reads were stable wrt the PIT.
+ *
+ * This also guarantees serialization of the
+ * last cycle read ('d2') in pit_expect_msb.
+ */
+ if (!pit_verify_msb(0xfe - i))
+ break;
+ goto success;
}
}
printk("Fast TSC calibration failed\n");
AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
+ AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
+ link offline */
/* ap->flags bits */
int (*check_ready)(struct ata_link *link))
{
struct ata_port *ap = link->ap;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
const char *reason = NULL;
unsigned long now, msecs;
struct ata_taskfile tf;
/* wait for link to become ready */
rc = ata_wait_after_reset(link, deadline, check_ready);
- /* link occupied, -ENODEV too is an error */
- if (rc) {
+ if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
+ /*
+ * Workaround for cases where link online status can't
+ * be trusted. Treat device readiness timeout as link
+ * offline.
+ */
+ ata_link_printk(link, KERN_INFO,
+ "device not ready, treating as offline\n");
+ *class = ATA_DEV_NONE;
+ } else if (rc) {
+ /* link occupied, -ENODEV too is an error */
reason = "device not ready";
goto fail;
- }
- *class = ahci_dev_classify(ap);
+ } else
+ *class = ahci_dev_classify(ap);
DPRINTK("EXIT, class=%u\n", *class);
return 0;
irq_sts = readl(port_mmio + PORT_IRQ_STAT);
if (irq_sts & PORT_IRQ_BAD_PMP) {
ata_link_printk(link, KERN_WARNING,
- "failed due to HW bug, retry pmp=0\n");
+ "applying SB600 PMP SRST workaround "
+ "and retrying\n");
rc = ahci_do_softreset(link, class, 0, deadline,
ahci_check_ready);
}
return !ver || strcmp(ver, dmi->driver_data) < 0;
}
+static bool ahci_broken_online(struct pci_dev *pdev)
+{
+#define ENCODE_BUSDEVFN(bus, slot, func) \
+ (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
+ static const struct dmi_system_id sysids[] = {
+ /*
+ * There are several gigabyte boards which use
+ * SIMG5723s configured as hardware RAID. Certain
+ * 5723 firmware revisions shipped there keep the link
+ * online but fail to answer properly to SRST or
+ * IDENTIFY when no device is attached downstream
+ * causing libata to retry quite a few times leading
+ * to excessive detection delay.
+ *
+ * As these firmwares respond to the second reset try
+ * with invalid device signature, considering unknown
+ * sig as offline works around the problem acceptably.
+ */
+ {
+ .ident = "EP45-DQ6",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
+ },
+ .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
+ },
+ {
+ .ident = "EP45-DS5",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
+ },
+ .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
+ },
+ { } /* terminate list */
+ };
+#undef ENCODE_BUSDEVFN
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ unsigned int val;
+
+ if (!dmi)
+ return false;
+
+ val = (unsigned long)dmi->driver_data;
+
+ return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
+}
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
"BIOS update required for suspend/resume\n");
}
+ if (ahci_broken_online(pdev)) {
+ hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
+ dev_info(&pdev->dev,
+ "online status unreliable, applying workaround\n");
+ }
+
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
{ "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
{ "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
+ /* this one allows HPA unlocking but fails IOs on the area */
+ { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA },
+
/* Devices which report 1 sector over size HPA */
{ "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
{ "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
ata_port_desc(ap, "no IRQ, using PIO polling");
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(dev, "failed to allocate memory for private data\n");
if (!info->ide_addr) {
dev_err(dev, "failed to map IO base\n");
ret = -ENOMEM;
- goto err_ide_ioremap;
+ goto err_put;
}
info->alt_addr = devm_ioremap(dev,
if (!info->alt_addr) {
dev_err(dev, "failed to map CTL base\n");
ret = -ENOMEM;
- goto err_alt_ioremap;
+ goto err_put;
}
ap->ioaddr.cmd_addr = info->ide_addr;
irq ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
-err_alt_ioremap:
- devm_iounmap(dev, info->ide_addr);
-
-err_ide_ioremap:
+err_put:
clk_put(info->mck);
- kfree(info);
-
return ret;
}
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct at91_ide_info *info;
- struct device *dev = &pdev->dev;
if (!host)
return 0;
if (!info)
return 0;
- devm_iounmap(dev, info->ide_addr);
- devm_iounmap(dev, info->alt_addr);
clk_put(info->mck);
- kfree(info);
return 0;
}
/*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
+ * (C) 2009 Bartlomiej Zolnierkiewicz
*
* Based on
*
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
-
- /* Check this is correct - the order is odd in both drivers */
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
- u16 pio_mode_data, pio_timing_data;
+ u32 pio_timing_data;
+ u16 pio_mode_data;
pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
pio_mode_data &= ~(0x7 << (4 * dn));
pio_mode_data |= pio << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
- pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
+ pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
pio_timing_data &= ~(0xFF << timing_shift);
pio_timing_data |= (pio_timings[pio] << timing_shift);
- pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
+ pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
}
/**
udma_mode_data |= dma << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
} else {
- u16 mwdma_timing_data;
- /* Check this is correct - the order is odd in both drivers */
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
+ u32 mwdma_timing_data;
dma -= XFER_MW_DMA_0;
- pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
+ pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
+ &mwdma_timing_data);
mwdma_timing_data &= ~(0xFF << timing_shift);
mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
- pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
+ pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
+ mwdma_timing_data);
}
/*
* We must now look at the PIO mode situation. We may need to
static int adma_enabled;
static int swncq_enabled = 1;
+static int msi_enabled;
static void nv_adma_register_mode(struct ata_port *ap)
{
} else if (type == SWNCQ)
nv_swncq_host_init(host);
+ if (msi_enabled) {
+ dev_printk(KERN_NOTICE, &pdev->dev, "Using MSI\n");
+ pci_enable_msi(pdev);
+ }
+
pci_set_master(pdev);
return ata_host_activate(host, pdev->irq, ipriv->irq_handler,
IRQF_SHARED, ipriv->sht);
MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
module_param_named(swncq, swncq_enabled, bool, 0444);
MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
+module_param_named(msi, msi_enabled, bool, 0444);
+MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");
drv->driver.remove = platform_drv_remove;
if (drv->shutdown)
drv->driver.shutdown = platform_drv_shutdown;
- if (drv->suspend || drv->resume)
- pr_warning("Platform driver '%s' needs updating - please use "
- "dev_pm_ops\n", drv->driver.name);
return driver_register(&drv->driver);
}
static int pty_write_room(struct tty_struct *tty)
{
+ if (tty->stopped)
+ return 0;
return pty_space(tty->link);
}
struct platform_device *pdev;
unsigned long flags;
+ unsigned long flags_suspend;
unsigned long match_value;
unsigned long next_match_value;
unsigned long max_match_value;
return -EBUSY; /* cannot unregister clockevent and clocksource */
}
+static int sh_cmt_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+
+ /* save flag state and stop CMT channel */
+ p->flags_suspend = p->flags;
+ sh_cmt_stop(p, p->flags);
+ return 0;
+}
+
+static int sh_cmt_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_cmt_priv *p = platform_get_drvdata(pdev);
+
+ /* start CMT channel from saved state */
+ sh_cmt_start(p, p->flags_suspend);
+ return 0;
+}
+
+static struct dev_pm_ops sh_cmt_dev_pm_ops = {
+ .suspend = sh_cmt_suspend,
+ .resume = sh_cmt_resume,
+};
+
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
.remove = __devexit_p(sh_cmt_remove),
.driver = {
.name = "sh_cmt",
+ .pm = &sh_cmt_dev_pm_ops,
}
};
*(pkg->data_size) = 0;
} else if (tfr->data_size > *(pkg->data_size)) {
DMERR("Insufficient space to receive package [%u] "
- "(%u vs %lu)", tfr->request_type,
+ "(%u vs %zu)", tfr->request_type,
tfr->data_size, *(pkg->data_size));
*(pkg->data_size) = 0;
else
new->md_minor = MINOR(unit) >> MdpMinorShift;
+ mutex_init(&new->open_mutex);
mutex_init(&new->reconfig_mutex);
INIT_LIST_HEAD(&new->disks);
INIT_LIST_HEAD(&new->all_mddevs);
/* otherwise we have to go forward and ... */
mddev->events ++;
if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */
- /* .. if the array isn't clean, insist on an odd 'events' */
- if ((mddev->events&1)==0) {
- mddev->events++;
+ /* .. if the array isn't clean, an 'even' event must also go
+ * to spares. */
+ if ((mddev->events&1)==0)
nospares = 0;
- }
} else {
- /* otherwise insist on an even 'events' (for clean states) */
- if ((mddev->events&1)) {
- mddev->events++;
+ /* otherwise an 'odd' event must go to spares */
+ if ((mddev->events&1))
nospares = 0;
- }
}
}
if (max < mddev->resync_min)
return -EINVAL;
if (max < mddev->resync_max &&
+ mddev->ro == 0 &&
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
struct gendisk *disk = mddev->gendisk;
mdk_rdev_t *rdev;
+ mutex_lock(&mddev->open_mutex);
if (atomic_read(&mddev->openers) > is_open) {
printk("md: %s still in use.\n",mdname(mddev));
- return -EBUSY;
- }
-
- if (mddev->pers) {
+ err = -EBUSY;
+ } else if (mddev->pers) {
if (mddev->sync_thread) {
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
if (mode == 1)
set_disk_ro(disk, 1);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = 0;
}
-
+out:
+ mutex_unlock(&mddev->open_mutex);
+ if (err)
+ return err;
/*
* Free resources if final stop
*/
blk_integrity_unregister(disk);
md_new_event(mddev);
sysfs_notify_dirent(mddev->sysfs_state);
-out:
return err;
}
}
BUG_ON(mddev != bdev->bd_disk->private_data);
- if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1)))
+ if ((err = mutex_lock_interruptible(&mddev->open_mutex)))
goto out;
err = 0;
atomic_inc(&mddev->openers);
- mddev_unlock(mddev);
+ mutex_unlock(&mddev->open_mutex);
check_disk_change(bdev);
out:
* so we don't loop trying */
int in_sync; /* know to not need resync */
+ /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
+ * that we are never stopping an array while it is open.
+ * 'reconfig_mutex' protects all other reconfiguration.
+ * These locks are separate due to conflicting interactions
+ * with bdev->bd_mutex.
+ * Lock ordering is:
+ * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
+ * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
+ */
+ struct mutex open_mutex;
struct mutex reconfig_mutex;
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
conf->reshape_progress < raid5_size(mddev, 0, 0)) {
sector_nr = raid5_size(mddev, 0, 0)
- conf->reshape_progress;
- } else if (mddev->delta_disks > 0 &&
+ } else if (mddev->delta_disks >= 0 &&
conf->reshape_progress > 0)
sector_nr = conf->reshape_progress;
sector_div(sector_nr, new_data_disks);
(old_disks-max_degraded));
/* here_old is the first stripe that we might need to read
* from */
- if (here_new >= here_old) {
+ if (mddev->delta_disks == 0) {
+ /* We cannot be sure it is safe to start an in-place
+ * reshape. It is only safe if user-space if monitoring
+ * and taking constant backups.
+ * mdadm always starts a situation like this in
+ * readonly mode so it can take control before
+ * allowing any writes. So just check for that.
+ */
+ if ((here_new * mddev->new_chunk_sectors !=
+ here_old * mddev->chunk_sectors) ||
+ mddev->ro == 0) {
+ printk(KERN_ERR "raid5: in-place reshape must be started"
+ " in read-only mode - aborting\n");
+ return -EINVAL;
+ }
+ } else if (mddev->delta_disks < 0
+ ? (here_new * mddev->new_chunk_sectors <=
+ here_old * mddev->chunk_sectors)
+ : (here_new * mddev->new_chunk_sectors >=
+ here_old * mddev->chunk_sectors)) {
/* Reading from the same stripe as writing to - bad */
printk(KERN_ERR "raid5: reshape_position too early for "
"auto-recovery - aborting.\n");
mddev->degraded--;
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
- d++)
- raid5_remove_disk(mddev, d);
+ d++) {
+ mdk_rdev_t *rdev = conf->disks[d].rdev;
+ if (rdev && raid5_remove_disk(mddev, d) == 0) {
+ char nm[20];
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ rdev->raid_disk = -1;
+ }
+ }
}
mddev->layout = conf->algorithm;
mddev->chunk_sectors = conf->chunk_sectors;
/* dump all registers */
static void qt1010_dump_regs(struct qt1010_priv *priv)
{
- char buf[52], buf2[4];
u8 reg, val;
for (reg = 0; ; reg++) {
if (reg % 16 == 0) {
if (reg)
- printk("%s\n", buf);
- sprintf(buf, "%02x: ", reg);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "%02x:", reg);
}
if (qt1010_readreg(priv, reg, &val) == 0)
- sprintf(buf2, "%02x ", val);
+ printk(KERN_CONT " %02x", val);
else
- strcpy(buf2, "-- ");
- strcat(buf, buf2);
+ printk(KERN_CONT " --");
if (reg == 0x2f)
break;
}
- printk("%s\n", buf);
+ printk(KERN_CONT "\n");
}
static int qt1010_set_params(struct dvb_frontend *fe,
struct xc2028_data *priv = fe->tuner_priv;
int rc = 0;
- /* Avoid firmware reload on slow devices */
- if (no_poweroff)
+ /* Avoid firmware reload on slow devices or if PM disabled */
+ if (no_poweroff || priv->ctrl.disable_power_mgmt)
return 0;
tuner_dbg("Putting xc2028/3028 into poweroff mode.\n");
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
+ unsigned int disable_power_mgmt:1;
unsigned int demod;
enum firmware_type type:2;
};
switch (req->cmd) {
case GET_CONFIG:
- case BOOT:
case READ_MEMORY:
case RECONNECT_USB:
case GET_IR_CODE:
case WRITE_VIRTUAL_MEMORY:
case COPY_FIRMWARE:
case DOWNLOAD_FIRMWARE:
+ case BOOT:
break;
default:
err("unknown command:%d", req->cmd);
struct cx22700_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx22700_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx22700_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
struct cx22702_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx22702_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx22702_state), GFP_KERNEL);
if (state == NULL)
goto error;
int ret;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct cx24110_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct cx24110_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
struct dvb_dummy_fe_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct dvb_dummy_fe_state), GFP_KERNEL);
if (state == NULL) goto error;
/* create dvb_frontend */
{ .addr = config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } };
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct l64781_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct l64781_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
dprintk("%s\n", __func__);
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL);
if (state == NULL)
goto error;
struct mt312_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct mt312_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL);
if (state == NULL)
goto error;
struct nxt6000_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct nxt6000_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct nxt6000_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
struct or51132_state* state = NULL;
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct or51132_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct or51132_state), GFP_KERNEL);
if (state == NULL)
return NULL;
struct or51211_state* state = NULL;
/* Allocate memory for the internal state */
- state = kmalloc(sizeof(struct or51211_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct or51211_state), GFP_KERNEL);
if (state == NULL)
return NULL;
u16 reg;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct s5h1409_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct s5h1409_state), GFP_KERNEL);
if (state == NULL)
goto error;
u16 reg;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct s5h1411_state), GFP_KERNEL);
if (state == NULL)
goto error;
dprintk("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct si21xx_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct si21xx_state), GFP_KERNEL);
if (state == NULL)
goto error;
struct sp8870_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct sp8870_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct sp8870_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
struct sp887x_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct sp887x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct sp887x_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0288_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0288_state), GFP_KERNEL);
if (state == NULL)
goto error;
struct stv0297_state *state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0297_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0297_state), GFP_KERNEL);
if (state == NULL)
goto error;
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct stv0299_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct stv0299_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
u8 id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10021_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10021_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
dprintk(1, "%s()\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10048_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10048_state), GFP_KERNEL);
if (state == NULL)
goto error;
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10045 state\n");
return NULL;
int id;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda1004x_state), GFP_KERNEL);
if (!state) {
printk(KERN_ERR "Can't alocate memory for tda10046 state\n");
return NULL;
dprintk ("%s\n", __func__);
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda10086_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda10086_state), GFP_KERNEL);
if (!state)
return NULL;
struct tda8083_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct tda8083_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct tda8083_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
struct ves1820_state* state = NULL;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct ves1820_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct ves1820_state), GFP_KERNEL);
if (state == NULL)
goto error;
u8 identity;
/* allocate memory for the internal state */
- state = kmalloc(sizeof(struct ves1x93_state), GFP_KERNEL);
+ state = kzalloc(sizeof(struct ves1x93_state), GFP_KERNEL);
if (state == NULL) goto error;
/* setup the state */
static void zl10353_dump_regs(struct dvb_frontend *fe)
{
struct zl10353_state *state = fe->demodulator_priv;
- char buf[52], buf2[4];
int ret;
u8 reg;
for (reg = 0; ; reg++) {
if (reg % 16 == 0) {
if (reg)
- printk(KERN_DEBUG "%s\n", buf);
- sprintf(buf, "%02x: ", reg);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG "%02x:", reg);
}
ret = zl10353_read_register(state, reg);
if (ret >= 0)
- sprintf(buf2, "%02x ", (u8)ret);
+ printk(KERN_CONT " %02x", (u8)ret);
else
- strcpy(buf2, "-- ");
- strcat(buf, buf2);
+ printk(KERN_CONT " --");
if (reg == 0xff)
break;
}
- printk(KERN_DEBUG "%s\n", buf);
+ printk(KERN_CONT "\n");
}
static void zl10353_calc_nominal_rate(struct dvb_frontend *fe,
config DVB_SIANO_SMS1XXX
tristate "Siano SMS1XXX USB dongle support"
- depends on DVB_CORE && USB
+ depends on DVB_CORE && USB && INPUT
---help---
Choose Y here if you have a USB dongle with a SMS1XXX chipset.