shared across all System Controller members.
TC/TCLIB Timer required properties:
-- compatible: Should be "atmel,<chip>-pit".
+- compatible: Should be "atmel,<chip>-tcb".
<chip> can be "at91rm9200" or "at91sam9x5"
- reg: Should contain registers location and length
- interrupts: Should contain all interrupts for the TC block
--- /dev/null
+* EETI eGalax Multiple Touch Controller
+
+Required properties:
+- compatible: must be "eeti,egalax_ts"
+- reg: i2c slave address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: touch controller interrupt
+- wakeup-gpios: the gpio pin to be used for waking up the controller
+ as well as uased as irq pin
+
+Example:
+
+ egalax_ts@04 {
+ compatible = "eeti,egalax_ts";
+ reg = <0x04>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <9 2>;
+ wakeup-gpios = <&gpio1 9 0>;
+ };
BIOS and Kernel Developer's Guide (BKDG) For AMD Family 15h Processors
(not yet published)
-Author: Andreas Herrmann <andreas.herrmann3@amd.com>
+Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
Description
-----------
F: include/linux/altera_jtaguart.h
AMD FAM15H PROCESSOR POWER MONITORING DRIVER
-M: Andreas Herrmann <andreas.herrmann3@amd.com>
+M: Andreas Herrmann <herrmann.der.user@googlemail.com>
L: lm-sensors@lm-sensors.org
S: Maintained
F: Documentation/hwmon/fam15h_power
S: Maintained
F: arch/arm/
+ARM SUB-ARCHITECTURES
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: MAINTAINED
+F: arch/arm/mach-*/
+F: arch/arm/plat-*/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
+
ARM PRIMECELL AACI PL041 DRIVER
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/pinctrl/spear/
PKTCDVD DRIVER
-M: Peter Osterlund <petero2@telia.com>
+M: Jiri Kosina <jkosina@suse.cz>
S: Maintained
F: drivers/block/pktcdvd.c
F: include/linux/pktcdvd.h
VERSION = 3
PATCHLEVEL = 7
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Terrified Chipmunk
# *DOCUMENTATION*
#size-cells = <0>;
btn3 {
- label = "Buttin 3";
+ label = "Button 3";
gpios = <&pioA 30 1>;
linux,code = <0x103>;
gpio-key,wakeup;
};
btn4 {
- label = "Buttin 4";
+ label = "Button 4";
gpios = <&pioA 31 1>;
linux,code = <0x104>;
gpio-key,wakeup;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80004000 0x1000>;
interrupts = <0 21 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80122000 0x1000>;
interrupts = <0 22 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80128000 0x1000>;
interrupts = <0 55 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x80110000 0x1000>;
interrupts = <0 12 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
compatible = "stericsson,db8500-i2c", "st,nomadik-i2c", "arm,primecell";
reg = <0x8012a000 0x1000>;
interrupts = <0 51 0x4>;
+ arm,primecell-periphid = <0x180024>;
+
#address-cells = <1>;
#size-cells = <0>;
v-i2c-supply = <&db8500_vape_reg>;
interrupts = <0 60 0x4>;
status = "disabled";
};
+
sdi@80118000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80118000 0x1000>;
interrupts = <0 50 0x4>;
status = "disabled";
};
+
sdi@80005000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80005000 0x1000>;
interrupts = <0 41 0x4>;
status = "disabled";
};
+
sdi@80119000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80119000 0x1000>;
interrupts = <0 59 0x4>;
status = "disabled";
};
+
sdi@80114000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80114000 0x1000>;
interrupts = <0 99 0x4>;
status = "disabled";
};
+
sdi@80008000 {
compatible = "arm,pl18x", "arm,primecell";
- reg = <0x80114000 0x1000>;
+ reg = <0x80008000 0x1000>;
interrupts = <0 100 0x4>;
status = "disabled";
};
compatible = "samsung,trats", "samsung,exynos4210";
memory {
- reg = <0x40000000 0x20000000
- 0x60000000 0x20000000>;
+ reg = <0x40000000 0x10000000
+ 0x50000000 0x10000000
+ 0x60000000 0x10000000
+ 0x70000000 0x10000000>;
};
chosen {
interrupts = <13>, <56>;
interrupt-names = "gpmi-dma", "bch";
clocks = <&clks 34>;
+ clock-names = "gpmi_io";
fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
interrupts = <88>, <41>;
interrupt-names = "gpmi-dma", "bch";
clocks = <&clks 50>;
+ clock-names = "gpmi_io";
fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
interrupt-names = "common", "tx", "rx", "sidetone";
interrupt-parent = <&intc>;
ti,buffer-size = <1280>;
- ti,hwmods = "mcbsp2";
+ ti,hwmods = "mcbsp2", "mcbsp2_sidetone";
};
mcbsp3: mcbsp@49024000 {
interrupt-names = "common", "tx", "rx", "sidetone";
interrupt-parent = <&intc>;
ti,buffer-size = <128>;
- ti,hwmods = "mcbsp3";
+ ti,hwmods = "mcbsp3", "mcbsp3_sidetone";
};
mcbsp4: mcbsp@49026000 {
CONFIG_SPI=y
CONFIG_SPI_IMX=y
CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_MC9S08DZ60=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_IMX2_WDT=y
CONFIG_SOC_CAMERA_OV2640=y
CONFIG_VIDEO_MX3=y
CONFIG_FB=y
+CONFIG_LCD_PLATFORM=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
-CONFIG_NO_HZ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370_XP=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_XP=y
+# CONFIG_CACHE_L2X0 is not set
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
-CONFIG_USE_OF=y
+# CONFIG_COMPACTION is not set
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_ARCH_VERSATILE=y
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
--- /dev/null
+/*
+ * Copyright (c) 2011 Picochip Ltd., Jamie Iles
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
+ * accesses to the 8250.
+ */
+
+#include <linux/serial_reg.h>
+
+ .macro senduart,rd,rx
+ str \rd, [\rx, #UART_TX << UART_SHIFT]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1002b
+ .endm
+
+ /* The UART's don't have any flow control IO's wired up. */
+ .macro waituart,rd,rx
+ .endm
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
- * accesses to the 8250.
*/
-#include <linux/serial_reg.h>
#define UART_SHIFT 2
#define PICOXCELL_UART1_BASE 0x80230000
ldr \rp, =PICOXCELL_UART1_BASE
.endm
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- /* The UART's don't have any flow control IO's wired up. */
- .macro waituart,rd,rx
- .endm
+#include "8250_32.S"
* published by the Free Software Foundation.
*/
+#define UART_SHIFT 2
+#define DEBUG_LL_UART_OFFSET 0x00002000
+
.macro addruart, rp, rv, tmp
mov \rp, #DEBUG_LL_UART_OFFSET
orr \rp, \rp, #0x00c00000
orr \rp, \rp, #0xff000000 @ physical base
.endm
+#include "8250_32.S"
+
bool
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
menu "Atmel AT91 System-on-Chip"
comment "Atmel AT91 Processor"
-config SOC_AT91SAM9
- bool
- select AT91_SAM9_SMC
- select AT91_SAM9_TIME
- select CPU_ARM926T
- select MULTI_IRQ_HANDLER
- select SPARSE_IRQ
-
config SOC_AT91RM9200
bool "AT91RM9200"
select CPU_ARM920T
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91rm9200", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91rm9200.0", &twi_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
static struct platform_device at91rm9200_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91rm9200_twi_device = {
.name = "i2c-at91rm9200",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260", &twi_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260.0", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g20.0", &twi_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
CLKDEV_CON_DEV_ID("usart", "fffb0000.serial", &usart0_clk),
static struct platform_device at91sam9260_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
};
static struct platform_device at91sam9260_twi_device = {
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk),
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &hck0),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261", &twi_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9261.0", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9g10.0", &twi_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
CLKDEV_CON_ID("pioB", &pioB_clk),
CLKDEV_CON_ID("pioC", &pioC_clk),
static struct platform_device at91sam9261_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
};
static struct platform_device at91sam9261_twi_device = {
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk),
CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk),
- CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260", &twi_clk),
+ CLKDEV_CON_DEV_ID(NULL, "i2c-at91sam9260.0", &twi_clk),
/* fake hclk clock */
CLKDEV_CON_DEV_ID("hclk", "at91_ohci", &ohci_clk),
CLKDEV_CON_ID("pioA", &pioA_clk),
static struct platform_device at91sam9263_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91sam9263_twi_device = {
.name = "i2c-at91sam9260",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
static struct platform_device at91sam9rl_twi_device = {
.name = "i2c-gpio",
- .id = -1,
+ .id = 0,
.dev.platform_data = &pdata,
};
static struct platform_device at91sam9rl_twi_device = {
.name = "i2c-at91sam9g20",
- .id = -1,
+ .id = 0,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
if (!priority)
priority = at91x40_default_irq_priority;
- at91_aic_init(priority);
+ at91_aic_init(priority, at91_extern_irq);
}
.max_speed_hz = 125000 * 16,
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9263_ID_IRQ1,
+ .irq = NR_IRQS_LEGACY + AT91SAM9263_ID_IRQ1,
},
#endif
};
.max_speed_hz = 125000 * 26, /* (max sample rate @ 3V) * (cmd + data + overhead) */
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9261_ID_IRQ0,
+ .irq = NR_IRQS_LEGACY + AT91SAM9261_ID_IRQ0,
.controller_data = (void *) AT91_PIN_PA28, /* CS pin */
},
#endif
.max_speed_hz = 125000 * 26, /* (max sample rate @ 3V) * (cmd + data + overhead) */
.bus_num = 0,
.platform_data = &ads_info,
- .irq = AT91SAM9263_ID_IRQ1,
+ .irq = NR_IRQS_LEGACY + AT91SAM9263_ID_IRQ1,
},
#endif
};
extern void __init at91_init_irq_default(void);
extern void __init at91_init_interrupts(unsigned int priority[]);
extern void __init at91x40_init_interrupts(unsigned int priority[]);
-extern void __init at91_aic_init(unsigned int priority[]);
+extern void __init at91_aic_init(unsigned int priority[],
+ unsigned int ext_irq_mask);
extern int __init at91_aic_of_init(struct device_node *node,
struct device_node *parent);
extern int __init at91_aic5_of_init(struct device_node *node,
/*
* Initialize the AIC interrupt controller.
*/
-void __init at91_aic_init(unsigned int *priority)
+void __init at91_aic_init(unsigned int *priority, unsigned int ext_irq_mask)
{
unsigned int i;
int irq_base;
- if (at91_aic_pm_init())
+ at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
+ * sizeof(*at91_extern_irq), GFP_KERNEL);
+
+ if (at91_aic_pm_init() || at91_extern_irq == NULL)
panic("Unable to allocate bit maps\n");
+ *at91_extern_irq = ext_irq_mask;
+
at91_aic_base = ioremap(AT91_AIC, 512);
if (!at91_aic_base)
panic("Unable to ioremap AIC registers\n");
void __init at91_init_interrupts(unsigned int *priority)
{
/* Initialize the AIC interrupt controller */
- at91_aic_init(priority);
+ at91_aic_init(priority, at91_extern_irq);
/* Enable GPIO interrupts */
at91_gpio_irq_setup();
}
/* at91sam9g10 */
- if ((cidr & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
+ if ((socid & ~AT91_CIDR_EXT) == ARCH_ID_AT91SAM9G10) {
at91_soc_initdata.type = AT91_SOC_SAM9G10;
at91_boot_soc = at91sam9261_soc;
}
#include <plat/fimc-core.h>
#include <plat/iic-core.h>
#include <plat/tv-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include "common.h"
s5p_fb_setname(0, "exynos4-fb");
s5p_hdmi_setname("exynos4-hdmi");
+
+ s3c64xx_spi_setname("exynos4210-spi");
}
static void __init exynos5_map_io(void)
s3c_i2c0_setname("s3c2440-i2c");
s3c_i2c1_setname("s3c2440-i2c");
s3c_i2c2_setname("s3c2440-i2c");
+
+ s3c64xx_spi_setname("exynos4210-spi");
}
static void __init exynos4_init_clocks(int xtal)
DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
+ .smp = smp_ops(exynos_smp_ops),
.init_irq = exynos4_init_irq,
.map_io = exynos4_dt_map_io,
.handle_irq = gic_handle_irq,
busy->div.hw.init = &init;
clk = clk_register(NULL, &busy->div.hw);
- if (!clk)
+ if (IS_ERR(clk))
kfree(busy);
return clk;
clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
- clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0), 7);
- clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0), 8);
+ clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per7", ccm(CCM_CGCR0), 7);
+ clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "per8", ccm(CCM_CGCR0), 8);
clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
- clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
+ clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 6);
clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
- clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
+ clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 6);
clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
}
l2x0_base = ioremap(MX3x_L2CC_BASE_ADDR, 4096);
- if (IS_ERR(l2x0_base)) {
- printk(KERN_ERR "remapping L2 cache area failed with %ld\n",
- PTR_ERR(l2x0_base));
+ if (!l2x0_base) {
+ printk(KERN_ERR "remapping L2 cache area failed\n");
return;
}
select I2C_OMAP
select MENELAUS if ARCH_OMAP2
select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
- select PINCTRL
select PM_RUNTIME
select REGULATOR
select SERIAL_OMAP
#include <linux/input.h>
#include <linux/gpio_keys.h>
#include <linux/opp.h>
+#include <linux/cpu.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
};
#endif
-static void __init beagle_opp_init(void)
+static int __init beagle_opp_init(void)
{
int r = 0;
- /* Initialize the omap3 opp table */
- if (omap3_opp_init()) {
+ if (!machine_is_omap3_beagle())
+ return 0;
+
+ /* Initialize the omap3 opp table if not already created. */
+ r = omap3_opp_init();
+ if (IS_ERR_VALUE(r) && (r != -EEXIST)) {
pr_err("%s: opp default init failed\n", __func__);
- return;
+ return r;
}
/* Custom OPP enabled for all xM versions */
if (cpu_is_omap3630()) {
struct device *mpu_dev, *iva_dev;
- mpu_dev = omap_device_get_by_hwmod_name("mpu");
+ mpu_dev = get_cpu_device(0);
iva_dev = omap_device_get_by_hwmod_name("iva");
if (IS_ERR(mpu_dev) || IS_ERR(iva_dev)) {
pr_err("%s: Aiee.. no mpu/dsp devices? %p %p\n",
__func__, mpu_dev, iva_dev);
- return;
+ return -ENODEV;
}
/* Enable MPU 1GHz and lower opps */
r = opp_enable(mpu_dev, 800000000);
opp_disable(iva_dev, 660000000);
}
}
- return;
+ return 0;
}
+device_initcall(beagle_opp_init);
static void __init omap3_beagle_init(void)
{
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
-
- beagle_opp_init();
}
MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
CLK(NULL, "gfx_fck_div_ck", &gfx_fck_div_ck, CK_AM33XX),
CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
CLK(NULL, "clkout2_ck", &clkout2_ck, CK_AM33XX),
+ CLK(NULL, "timer_32k_ck", &clkdiv32k_ick, CK_AM33XX),
+ CLK(NULL, "timer_sys_ck", &sys_clkin_ck, CK_AM33XX),
};
int __init am33xx_clk_init(void)
"sys_off_mode", NULL, NULL, NULL,
"gpio_9", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_CTS, 150,
- "uart1_cts", NULL, NULL, NULL,
+ "uart1_cts", "ssi1_rdy_tx", NULL, NULL,
"gpio_150", "hsusb3_tll_clk", NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_RTS, 149,
- "uart1_rts", NULL, NULL, NULL,
+ "uart1_rts", "ssi1_flag_tx", NULL, NULL,
"gpio_149", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_RX, 151,
- "uart1_rx", NULL, "mcbsp1_clkr", "mcspi4_clk",
+ "uart1_rx", "ss1_wake_tx", "mcbsp1_clkr", "mcspi4_clk",
"gpio_151", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART1_TX, 148,
- "uart1_tx", NULL, NULL, NULL,
+ "uart1_tx", "ssi1_dat_tx", NULL, NULL,
"gpio_148", NULL, NULL, "safe_mode"),
_OMAP3_MUXENTRY(UART2_CTS, 144,
"uart2_cts", "mcbsp3_dx", "gpt9_pwm_evt", NULL,
#define PM_RTA_ERRATUM_i608 (1 << 0)
#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
+#define PM_PER_MEMORIES_ERRATUM_i582 (1 << 2)
#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
extern u16 pm34xx_errata;
/* Enable the l2 cache toggling in sleep logic */
enable_omap3630_toggle_l2_on_restore();
if (omap_rev() < OMAP3630_REV_ES1_2)
- pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
+ pm34xx_errata |= (PM_SDRC_WAKEUP_ERRATUM_i583 |
+ PM_PER_MEMORIES_ERRATUM_i582);
+ } else if (cpu_is_omap34xx()) {
+ pm34xx_errata |= PM_PER_MEMORIES_ERRATUM_i582;
}
}
int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
- struct clockdomain *neon_clkdm, *mpu_clkdm;
+ struct clockdomain *neon_clkdm, *mpu_clkdm, *per_clkdm, *wkup_clkdm;
int ret;
if (!omap3_has_io_chain_ctrl())
neon_clkdm = clkdm_lookup("neon_clkdm");
mpu_clkdm = clkdm_lookup("mpu_clkdm");
+ per_clkdm = clkdm_lookup("per_clkdm");
+ wkup_clkdm = clkdm_lookup("wkup_clkdm");
#ifdef CONFIG_SUSPEND
omap_pm_suspend = omap3_pm_suspend;
if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
omap3630_ctrl_disable_rta();
+ /*
+ * The UART3/4 FIFO and the sidetone memory in McBSP2/3 are
+ * not correctly reset when the PER powerdomain comes back
+ * from OFF or OSWR when the CORE powerdomain is kept active.
+ * See OMAP36xx Erratum i582 "PER Domain reset issue after
+ * Domain-OFF/OSWR Wakeup". This wakeup dependency is not a
+ * complete workaround. The kernel must also prevent the PER
+ * powerdomain from going to OSWR/OFF while the CORE
+ * powerdomain is not going to OSWR/OFF. And if PER last
+ * power state was off while CORE last power state was ON, the
+ * UART3/4 and McBSP2/3 SIDETONE devices need to run a
+ * self-test using their loopback tests; if that fails, those
+ * devices are unusable until the PER/CORE can complete a transition
+ * from ON to OSWR/OFF and then back to ON.
+ *
+ * XXX Technically this workaround is only needed if off-mode
+ * or OSWR is enabled.
+ */
+ if (IS_PM34XX_ERRATUM(PM_PER_MEMORIES_ERRATUM_i582))
+ clkdm_add_wkdep(per_clkdm, wkup_clkdm);
+
clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
omap3_secure_ram_storage =
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
+ if (console_uart_id == bdata->id) {
+ omap_device_enable(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ }
+
oh->dev_attr = uart;
if (((cpu_is_omap34xx() || cpu_is_omap44xx()) && bdata->pads)
#include <plat/nand-core.h>
#include <plat/adc-core.h>
#include <plat/rtc-core.h>
+#include <plat/spi-core.h>
static struct map_desc s3c2416_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
/* initialize device information early */
s3c2416_default_sdhci0();
s3c2416_default_sdhci1();
+ s3c64xx_spi_setname("s3c2443-spi");
iotable_init(s3c2416_iodesc, ARRAY_SIZE(s3c2416_iodesc));
}
#include <plat/nand-core.h>
#include <plat/adc-core.h>
#include <plat/rtc-core.h>
+#include <plat/spi-core.h>
static struct map_desc s3c2443_iodesc[] __initdata = {
IODESC_ENT(WATCHDOG),
s3c24xx_gpiocfg_default.set_pull = s3c2443_gpio_setpull;
s3c24xx_gpiocfg_default.get_pull = s3c2443_gpio_getpull;
+ /* initialize device information early */
+ s3c64xx_spi_setname("s3c2443-spi");
+
iotable_init(s3c2443_iodesc, ARRAY_SIZE(s3c2443_iodesc));
}
#include <plat/sdhci.h>
#include <plat/adc-core.h>
#include <plat/fb-core.h>
+#include <plat/spi-core.h>
#include <plat/gpio-cfg.h>
#include <plat/regs-irqtype.h>
#include <plat/regs-serial.h>
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
s3c_fb_setname("s5p64x0-fb");
+ s3c64xx_spi_setname("s5p64x0-spi");
s5p64x0_default_sdhci0();
s5p64x0_default_sdhci1();
/* initialize any device information early */
s3c_adc_setname("s3c64xx-adc");
s3c_fb_setname("s5p64x0-fb");
+ s3c64xx_spi_setname("s5p64x0-spi");
s5p64x0_default_sdhci0();
s5p64x0_default_sdhci1();
#include <plat/fb-core.h>
#include <plat/iic-core.h>
#include <plat/onenand-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include <plat/watchdog-reset.h>
s3c_onenand_setname("s5pc100-onenand");
s3c_fb_setname("s5pc100-fb");
s3c_cfcon_setname("s5pc100-pata");
+
+ s3c64xx_spi_setname("s5pc100-spi");
}
void __init s5pc100_init_clocks(int xtal)
#include <plat/iic-core.h>
#include <plat/keypad-core.h>
#include <plat/tv-core.h>
+#include <plat/spi-core.h>
#include <plat/regs-serial.h>
#include "common.h"
/* setup TV devices */
s5p_hdmi_setname("s5pv210-hdmi");
+
+ s3c64xx_spi_setname("s5pv210-spi");
}
void __init s5pv210_init_clocks(int xtal)
{
#ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*16way */
- l2x0_init((void __iomem __force *)(0xf0100000), 0x40470000, 0x82000fff);
+ l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff);
#endif
r8a7779_pm_init();
#include <linux/stat.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/irq.h>
#include <linux/platform_data/clk-ux500.h>
#include <asm/hardware/gic.h>
struct resource res[] = {
{
.start = data->iobase,
- .end = data->iobase + SZ_4K - 1,
+ .end = data->iobase + data->iosize - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
select CLKDEV_LOOKUP
select GENERIC_IRQ_CHIP
select OMAP_DM_TIMER
+ select PINCTRL
select PROC_DEVICETREE if PROC_FS
select SPARSE_IRQ
select USE_OF
--- /dev/null
+/*
+ * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __PLAT_S3C_SPI_CORE_H
+#define __PLAT_S3C_SPI_CORE_H
+
+/* These functions are only for use with the core support code, such as
+ * the cpu specific initialisation code
+ */
+
+/* re-define device name depending on support. */
+static inline void s3c64xx_spi_setname(char *name)
+{
+#ifdef CONFIG_S3C64XX_DEV_SPI0
+ s3c64xx_device_spi0.name = name;
+#endif
+#ifdef CONFIG_S3C64XX_DEV_SPI1
+ s3c64xx_device_spi1.name = name;
+#endif
+#ifdef CONFIG_S3C64XX_DEV_SPI2
+ s3c64xx_device_spi2.name = name;
+#endif
+}
+
+#endif /* __PLAT_S3C_SPI_CORE_H */
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/opcodes-virt.h>
#include <xen/interface/xen.h>
-/* HVC 0xEA1 */
-#ifdef CONFIG_THUMB2_KERNEL
-#define xen_hvc .word 0xf7e08ea1
-#else
-#define xen_hvc .word 0xe140ea71
-#endif
+#define XEN_IMM 0xEA1
#define HYPERCALL_SIMPLE(hypercall) \
ENTRY(HYPERVISOR_##hypercall) \
mov r12, #__HYPERVISOR_##hypercall; \
- xen_hvc; \
+ __HVC(XEN_IMM); \
mov pc, lr; \
ENDPROC(HYPERVISOR_##hypercall)
stmdb sp!, {r4} \
ldr r4, [sp, #4] \
mov r12, #__HYPERVISOR_##hypercall; \
- xen_hvc \
+ __HVC(XEN_IMM); \
ldm sp!, {r4} \
mov pc, lr \
ENDPROC(HYPERVISOR_##hypercall)
mov r2, r3
ldr r3, [sp, #8]
ldr r4, [sp, #4]
- xen_hvc
+ __HVC(XEN_IMM)
ldm sp!, {r4}
mov pc, lr
ENDPROC(privcmd_call);
select GENERIC_CPU_DEVICES
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
config ZONE_DMA
bool
INITRD_PHYS = 0x02180000
INITRD_VIRT = 0x02180000
+OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment
+
#
# If you don't define ZRELADDR above,
# then it defaults to ZTEXTADDR
targets: $(obj)/Image
$(obj)/Image: vmlinux FORCE
- $(OBJCOPY) -O binary -R .note -R .comment -S vmlinux $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S vmlinux $@
#$(obj)/Image: $(CONFIGURE) $(SYSTEM)
-# $(OBJCOPY) -O binary -R .note -R .comment -g -S $(SYSTEM) $@
+# $(OBJCOPY) $(OBJCOPYFLAGS) -g -S $(SYSTEM) $@
bzImage: zImage
zImage: $(CONFIGURE) compressed/$(LINUX)
- $(OBJCOPY) -O binary -R .note -R .comment -S compressed/$(LINUX) $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S compressed/$(LINUX) $@
bootpImage: bootp/bootp
- $(OBJCOPY) -O binary -R .note -R .comment -S bootp/bootp $@
+ $(OBJCOPY) $(OBJCOPYFLAGS) -S bootp/bootp $@
compressed/$(LINUX): $(LINUX) dep
@$(MAKE) -C compressed $(LINUX)
#define __ARCH_WANT_SYS_RT_SIGACTION
#define __ARCH_WANT_SYS_RT_SIGSUSPEND
#define __ARCH_WANT_SYS_EXECVE
-#define __ARCH_WANT_KERNEL_EXECVE
/*
* "Conditional" syscalls
call schedule_tail
calll.p @(gr21,gr0)
or gr20,gr20,gr8
- bra sys_exit
-
- .globl ret_from_kernel_execve
-ret_from_kernel_execve:
- ori gr28,0,sp
bra __syscall_exit
###################################################################################################
subicc gr5,#0,gr0,icc0
beq icc0,#0,__entry_return_direct
-__entry_preempt_need_resched:
- ldi @(gr15,#TI_FLAGS),gr4
- andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
- beq icc0,#1,__entry_return_direct
-
- setlos #PREEMPT_ACTIVE,gr5
- sti gr5,@(gr15,#TI_FLAGS)
-
- andi gr23,#~PSR_PIL,gr23
- movgs gr23,psr
-
- call schedule
- sti gr0,@(gr15,#TI_PRE_COUNT)
-
- movsg psr,gr23
- ori gr23,#PSR_PIL_14,gr23
- movgs gr23,psr
- bra __entry_preempt_need_resched
-#else
- bra __entry_return_direct
+ subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
+ call preempt_schedule_irq
#endif
+ bra __entry_return_direct
###############################################################################
childregs = (struct pt_regs *)
(task_stack_page(p) + THREAD_SIZE - FRV_FRAME0_SIZE);
+ /* set up the userspace frame (the only place that the USP is stored) */
+ *childregs = *__kernel_frame0_ptr;
+
p->set_child_tid = p->clear_child_tid = NULL;
p->thread.frame = childregs;
p->thread.frame0 = childregs;
if (unlikely(!regs)) {
- memset(childregs, 0, sizeof(struct pt_regs));
childregs->gr9 = usp; /* function */
childregs->gr8 = arg;
- childregs->psr = PSR_S;
p->thread.pc = (unsigned long) ret_from_kernel_thread;
save_user_regs(p->thread.user);
return 0;
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#ifndef _ASM_X86_XEN_HYPERVISOR_H
#define _ASM_X86_XEN_HYPERVISOR_H
-/* arch/i386/kernel/setup.c */
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
{
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
- memcpy(vcpu->run->mmio.data, frag->data, frag->len);
+ memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
return X86EMUL_CONTINUE;
}
bytes -= handled;
val += handled;
- while (bytes) {
- unsigned now = min(bytes, 8U);
-
- frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
- frag->gpa = gpa;
- frag->data = val;
- frag->len = now;
-
- gpa += now;
- val += now;
- bytes -= now;
- }
+ WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
+ frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
+ frag->gpa = gpa;
+ frag->data = val;
+ frag->len = bytes;
return X86EMUL_CONTINUE;
}
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
- vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
+ vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
vcpu->run->exit_reason = KVM_EXIT_MMIO;
vcpu->run->mmio.phys_addr = gpa;
*
* read:
* for each fragment
- * write gpa, len
- * exit
- * copy data
+ * for each mmio piece in the fragment
+ * write gpa, len
+ * exit
+ * copy data
* execute insn
*
* write:
* for each fragment
- * write gpa, len
- * copy data
- * exit
+ * for each mmio piece in the fragment
+ * write gpa, len
+ * copy data
+ * exit
*/
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
+ unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
- frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
+ frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
+ len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
- memcpy(frag->data, run->mmio.data, frag->len);
+ memcpy(frag->data, run->mmio.data, len);
+
+ if (frag->len <= 8) {
+ /* Switch to the next fragment. */
+ frag++;
+ vcpu->mmio_cur_fragment++;
+ } else {
+ /* Go forward to the next mmio piece. */
+ frag->data += len;
+ frag->gpa += len;
+ frag->len -= len;
+ }
+
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
if (vcpu->mmio_is_write)
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
- /* Initiate next fragment */
- ++frag;
+
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
- memcpy(run->mmio.data, frag->data, frag->len);
- run->mmio.len = frag->len;
+ memcpy(run->mmio.data, frag->data, min(8u, frag->len));
+ run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
return this_cpu_read(xen_vcpu_info.arch.cr2);
}
+void xen_flush_tlb_all(void)
+{
+ struct mmuext_op *op;
+ struct multicall_space mcs;
+
+ trace_xen_mmu_flush_tlb_all(0);
+
+ preempt_disable();
+
+ mcs = xen_mc_entry(sizeof(*op));
+
+ op = mcs.args;
+ op->cmd = MMUEXT_TLB_FLUSH_ALL;
+ MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+ preempt_enable();
+}
static void xen_flush_tlb(void)
{
struct mmuext_op *op;
err = 0;
out:
- flush_tlb_all();
+ xen_flush_tlb_all();
return err;
}
select GENERIC_CPU_DEVICES
select MODULES_USE_ELF_RELA
select GENERIC_PCI_IOMAP
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_KERNEL_EXECVE
select ARCH_WANT_OPTIONAL_GPIOLIB
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
static inline void iounmap(volatile void __iomem *addr)
{
}
+
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
#endif /* CONFIG_MMU */
/*
/* Clearing a0 terminates the backtrace. */
#define start_thread(regs, new_pc, new_sp) \
+ memset(regs, 0, sizeof(*regs)); \
regs->pc = new_pc; \
regs->ps = USER_PS_VALUE; \
regs->areg[1] = new_sp; \
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
-/* Create a kernel thread without removing it from tasklists */
-extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
-
/* Copy and release all segment info associated with a VM */
#define copy_segments(p, mm) do { } while(0)
#define release_segments(mm) do { } while(0)
struct pt_regs;
struct sigaction;
-asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
+asmlinkage long sys_execve(char*, char**, char**, struct pt_regs*);
asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
asmlinkage long xtensa_ptrace(long, long, long, long);
asmlinkage long xtensa_sigreturn(struct pt_regs*);
-/*
- * include/asm-xtensa/unistd.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
+#ifndef _XTENSA_UNISTD_H
+#define _XTENSA_UNISTD_H
+#define __ARCH_WANT_SYS_EXECVE
#include <uapi/asm/unistd.h>
-
/*
* "Conditional" syscalls
*
#define __IGNORE_mmap /* use mmap2 */
#define __IGNORE_vfork /* use clone */
#define __IGNORE_fadvise64 /* use fadvise64_64 */
+
+#endif /* _XTENSA_UNISTD_H */
-/*
- * include/asm-xtensa/unistd.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2012 Tensilica Inc.
- */
-
-#ifndef _UAPI_XTENSA_UNISTD_H
+#if !defined(_UAPI_XTENSA_UNISTD_H) || defined(__SYSCALL)
#define _UAPI_XTENSA_UNISTD_H
#ifndef __SYSCALL
#define __NR_clone 116
__SYSCALL(116, xtensa_clone, 5)
#define __NR_execve 117
-__SYSCALL(117, xtensa_execve, 3)
+__SYSCALL(117, sys_execve, 3)
#define __NR_exit 118
__SYSCALL(118, sys_exit, 1)
#define __NR_exit_group 119
#define SYS_XTENSA_COUNT 5 /* count */
+#undef __SYSCALL
+
#endif /* _UAPI_XTENSA_UNISTD_H */
retw
-/*
- * Create a kernel thread
- *
- * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
- * a2 a2 a3 a4
- */
-
-ENTRY(kernel_thread)
- entry a1, 16
-
- mov a5, a2 # preserve fn over syscall
- mov a7, a3 # preserve args over syscall
-
- movi a3, _CLONE_VM | _CLONE_UNTRACED
- movi a2, __NR_clone
- or a6, a4, a3 # arg0: flags
- mov a3, a1 # arg1: sp
- syscall
-
- beq a3, a1, 1f # branch if parent
- mov a6, a7 # args
- callx4 a5 # fn(args)
-
- movi a2, __NR_exit
- syscall # return value of fn(args) still in a6
-
-1: retw
-
-/*
- * Do a system call from kernel instead of calling sys_execve, so we end up
- * with proper pt_regs.
- *
- * int kernel_execve(const char *fname, char *const argv[], charg *const envp[])
- * a2 a2 a3 a4
- */
-
-ENTRY(kernel_execve)
- entry a1, 16
- mov a6, a2 # arg0 is in a6
- movi a2, __NR_execve
- syscall
-
- retw
-
/*
* Task switch.
*
j common_exception_return
+/*
+ * Kernel thread creation helper
+ * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg
+ * left from _switch_to: a6 = prev
+ */
+ENTRY(ret_from_kernel_thread)
+
+ call4 schedule_tail
+ mov a6, a3
+ callx4 a2
+ j common_exception_return
+
+ENDPROC(ret_from_kernel_thread)
#include <asm/regs.h>
extern void ret_from_fork(void);
+extern void ret_from_kernel_thread(void);
struct task_struct *current_set[NR_CPUS] = {&init_task, };
/*
* Copy thread.
*
+ * There are two modes in which this function is called:
+ * 1) Userspace thread creation,
+ * regs != NULL, usp_thread_fn is userspace stack pointer.
+ * It is expected to copy parent regs (in case CLONE_VM is not set
+ * in the clone_flags) and set up passed usp in the childregs.
+ * 2) Kernel thread creation,
+ * regs == NULL, usp_thread_fn is the function to run in the new thread
+ * and thread_fn_arg is its parameter.
+ * childregs are not used for the kernel threads.
+ *
* The stack layout for the new thread looks like this:
*
- * +------------------------+ <- sp in childregs (= tos)
+ * +------------------------+
* | childregs |
* +------------------------+ <- thread.sp = sp in dummy-frame
* | dummy-frame | (saved in dummy-frame spill-area)
* +------------------------+
*
- * We create a dummy frame to return to ret_from_fork:
- * a0 points to ret_from_fork (simulating a call4)
+ * We create a dummy frame to return to either ret_from_fork or
+ * ret_from_kernel_thread:
+ * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
* sp points to itself (thread.sp)
- * a2, a3 are unused.
+ * a2, a3 are unused for userspace threads,
+ * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
*
* Note: This is a pristine frame, so we don't need any spill region on top of
* childregs.
* involved. Much simpler to just not copy those live frames across.
*/
-int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long unused,
- struct task_struct * p, struct pt_regs * regs)
+int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
+ unsigned long thread_fn_arg,
+ struct task_struct *p, struct pt_regs *unused)
{
- struct pt_regs *childregs;
- unsigned long tos;
- int user_mode = user_mode(regs);
+ struct pt_regs *childregs = task_pt_regs(p);
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
struct thread_info *ti;
#endif
- /* Set up new TSS. */
- tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
- if (user_mode)
- childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
- else
- childregs = (struct pt_regs*)tos - 1;
-
- /* This does not copy all the regs. In a bout of brilliance or madness,
- ARs beyond a0-a15 exist past the end of the struct. */
- *childregs = *regs;
-
/* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
*((int*)childregs - 3) = (unsigned long)childregs;
*((int*)childregs - 4) = 0;
- childregs->areg[2] = 0;
- p->set_child_tid = p->clear_child_tid = NULL;
- p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
p->thread.sp = (unsigned long)childregs;
- if (user_mode(regs)) {
+ if (!(p->flags & PF_KTHREAD)) {
+ struct pt_regs *regs = current_pt_regs();
+ unsigned long usp = usp_thread_fn ?
+ usp_thread_fn : regs->areg[1];
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_fork, 0x1);
+
+ /* This does not copy all the regs.
+ * In a bout of brilliance or madness,
+ * ARs beyond a0-a15 exist past the end of the struct.
+ */
+ *childregs = *regs;
childregs->areg[1] = usp;
+ childregs->areg[2] = 0;
+
+ /* When sharing memory with the parent thread, the child
+ usually starts on a pristine stack, so we have to reset
+ windowbase, windowstart and wmask.
+ (Note that such a new thread is required to always create
+ an initial call4 frame)
+ The exception is vfork, where the new thread continues to
+ run on the parent's stack until it calls execve. This could
+ be a call8 or call12, which requires a legal stack frame
+ of the previous caller for the overflow handlers to work.
+ (Note that it's always legal to overflow live registers).
+ In this case, ensure to spill at least the stack pointer
+ of that frame. */
+
if (clone_flags & CLONE_VM) {
- childregs->wmask = 1; /* can't share live windows */
+ /* check that caller window is live and same stack */
+ int len = childregs->wmask & ~0xf;
+ if (regs->areg[1] == usp && len != 0) {
+ int callinc = (regs->areg[0] >> 30) & 3;
+ int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
+ put_user(regs->areg[caller_ars+1],
+ (unsigned __user*)(usp - 12));
+ }
+ childregs->wmask = 1;
+ childregs->windowstart = 1;
+ childregs->windowbase = 0;
} else {
int len = childregs->wmask & ~0xf;
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
// FIXME: we need to set THREADPTR in thread_info...
if (clone_flags & CLONE_SETTLS)
childregs->areg[2] = childregs->areg[6];
-
} else {
- /* In kernel space, we start a new thread with a new stack. */
- childregs->wmask = 1;
- childregs->areg[1] = tos;
+ p->thread.ra = MAKE_RA_FOR_CALL(
+ (unsigned long)ret_from_kernel_thread, 1);
+
+ /* pass parameters to ret_from_kernel_thread:
+ * a2 = thread_fn, a3 = thread_fn arg
+ */
+ *((int *)childregs - 1) = thread_fn_arg;
+ *((int *)childregs - 2) = usp_thread_fn;
+
+ /* Childregs are only used when we're going to userspace
+ * in which case start_thread will set them up.
+ */
}
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
void __user *child_tid, long a5,
struct pt_regs *regs)
{
- if (!newsp)
- newsp = regs->areg[1];
return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
}
-
-/*
- * xtensa_execve() executes a new program.
- */
-
-asmlinkage
-long xtensa_execve(const char __user *name,
- const char __user *const __user *argv,
- const char __user *const __user *envp,
- long a3, long a4, long a5,
- struct pt_regs *regs)
-{
- long error;
- struct filename *filename;
-
- filename = getname(name);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
- error = do_execve(filename->name, argv, envp, regs);
- putname(filename);
-out:
- return error;
-}
-
syscall_t sys_call_table[__NR_syscall_count] /* FIXME __cacheline_aligned */= {
[0 ... __NR_syscall_count - 1] = (syscall_t)&sys_ni_syscall,
-#undef __SYSCALL
#define __SYSCALL(nr,symbol,nargs) [ nr ] = (syscall_t)symbol,
-#undef __KERNEL_SYSCALLS__
-#include <asm/unistd.h>
+#include <uapi/asm/unistd.h>
};
asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
return (long)ret;
}
-asmlinkage long xtensa_fadvise64_64(int fd, int advice, unsigned long long offset, unsigned long long len)
+asmlinkage long xtensa_fadvise64_64(int fd, int advice,
+ unsigned long long offset, unsigned long long len)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(empty_zero_page);
/*
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
- depends on BLK_CGROUP=y && EXPERIMENTAL
+ depends on BLK_CGROUP=y
default n
---help---
Block layer bio throttling support. It can be used to limit
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
+
+ /*
+ * root blkg is destroyed. Just clear the pointer since
+ * root_rl does not take reference on root blkg.
+ */
+ q->root_blkg = NULL;
+ q->root_rl.blkg = NULL;
}
static void blkg_rcu_free(struct rcu_head *rcu_head)
*/
if (rl == &q->root_rl) {
ent = &q->blkg_list;
+ /* There are no more block groups, hence no request lists */
+ if (list_empty(ent))
+ return NULL;
} else {
blkg = container_of(rl, struct blkcg_gq, rl);
ent = &blkg->q_node;
struct request *rqa = container_of(a, struct request, queuelist);
struct request *rqb = container_of(b, struct request, queuelist);
- return !(rqa->q <= rqb->q);
+ return !(rqa->q < rqb->q ||
+ (rqa->q == rqb->q && blk_rq_pos(rqa) < blk_rq_pos(rqb)));
}
/*
config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
+ select CHECK_SIGNATURE
help
This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here.
module will be called DAC960.
config BLK_DEV_UMEM
- tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)"
- depends on PCI && EXPERIMENTAL
+ tristate "Micro Memory MM5415 Battery Backed RAM support"
+ depends on PCI
---help---
Saying Y here will include support for the MM5415 family of
battery backed (Non-volatile) RAM cards.
a disc is opened for writing.
config CDROM_PKTCDVD_WCACHE
- bool "Enable write caching (EXPERIMENTAL)"
- depends on CDROM_PKTCDVD && EXPERIMENTAL
+ bool "Enable write caching"
+ depends on CDROM_PKTCDVD
help
If enabled, write caching will be set for the CD-R/W device. For now
this option is dangerous unless the CD-RW media is known good, as we
config VIRTIO_BLK
- tristate "Virtio block driver (EXPERIMENTAL)"
- depends on EXPERIMENTAL && VIRTIO
+ tristate "Virtio block driver"
+ depends on VIRTIO
---help---
This is the virtual block driver for virtio. It can be used with
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
config BLK_DEV_RBD
tristate "Rados block device (RBD)"
- depends on INET && EXPERIMENTAL && BLOCK
+ depends on INET && BLOCK
select CEPH_LIB
select LIBCRC32C
select CRYPTO_AES
return;
}
/* write all data in the battery backed cache to disk */
- memset(flush_buf, 0, 4);
return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
4, 0, CTLR_LUNID, TYPE_CMD);
kfree(flush_buf);
static struct platform_device floppy_device[N_DRIVE];
+static bool floppy_available(int drive)
+{
+ if (!(allowed_drive_mask & (1 << drive)))
+ return false;
+ if (fdc_state[FDC(drive)].version == FDC_NONE)
+ return false;
+ return true;
+}
+
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
{
int drive = (*part & 3) | ((*part & 0x80) >> 5);
- if (drive >= N_DRIVE ||
- !(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE)
+ if (drive >= N_DRIVE || !floppy_available(drive))
return NULL;
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
return NULL;
static int __init do_floppy_init(void)
{
- int i, unit, drive;
- int err, dr;
+ int i, unit, drive, err;
set_debugt();
interruptjiffies = resultjiffies = jiffies;
raw_cmd = NULL;
- for (dr = 0; dr < N_DRIVE; dr++) {
- disks[dr] = alloc_disk(1);
- if (!disks[dr]) {
- err = -ENOMEM;
- goto out_put_disk;
- }
+ floppy_wq = alloc_ordered_workqueue("floppy", 0);
+ if (!floppy_wq)
+ return -ENOMEM;
- floppy_wq = alloc_ordered_workqueue("floppy", 0);
- if (!floppy_wq) {
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ disks[drive] = alloc_disk(1);
+ if (!disks[drive]) {
err = -ENOMEM;
goto out_put_disk;
}
- disks[dr]->queue = blk_init_queue(do_fd_request, &floppy_lock);
- if (!disks[dr]->queue) {
+ disks[drive]->queue = blk_init_queue(do_fd_request, &floppy_lock);
+ if (!disks[drive]->queue) {
err = -ENOMEM;
- goto out_destroy_workq;
+ goto out_put_disk;
}
- blk_queue_max_hw_sectors(disks[dr]->queue, 64);
- disks[dr]->major = FLOPPY_MAJOR;
- disks[dr]->first_minor = TOMINOR(dr);
- disks[dr]->fops = &floppy_fops;
- sprintf(disks[dr]->disk_name, "fd%d", dr);
+ blk_queue_max_hw_sectors(disks[drive]->queue, 64);
+ disks[drive]->major = FLOPPY_MAJOR;
+ disks[drive]->first_minor = TOMINOR(drive);
+ disks[drive]->fops = &floppy_fops;
+ sprintf(disks[drive]->disk_name, "fd%d", drive);
- init_timer(&motor_off_timer[dr]);
- motor_off_timer[dr].data = dr;
- motor_off_timer[dr].function = motor_off_callback;
+ init_timer(&motor_off_timer[drive]);
+ motor_off_timer[drive].data = drive;
+ motor_off_timer[drive].function = motor_off_callback;
}
err = register_blkdev(FLOPPY_MAJOR, "fd");
}
for (drive = 0; drive < N_DRIVE; drive++) {
- if (!(allowed_drive_mask & (1 << drive)))
- continue;
- if (fdc_state[FDC(drive)].version == FDC_NONE)
+ if (!floppy_available(drive))
continue;
floppy_device[drive].name = floppy_device_name;
err = platform_device_register(&floppy_device[drive]);
if (err)
- goto out_release_dma;
+ goto out_remove_drives;
err = device_create_file(&floppy_device[drive].dev,
&dev_attr_cmos);
out_unreg_platform_dev:
platform_device_unregister(&floppy_device[drive]);
+out_remove_drives:
+ while (drive--) {
+ if (floppy_available(drive)) {
+ del_gendisk(disks[drive]);
+ device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
+ platform_device_unregister(&floppy_device[drive]);
+ }
+ }
out_release_dma:
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
out_unreg_region:
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
platform_driver_unregister(&floppy_driver);
-out_destroy_workq:
- destroy_workqueue(floppy_wq);
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
- while (dr--) {
- del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue) {
- blk_cleanup_queue(disks[dr]->queue);
- /*
- * put_disk() is not paired with add_disk() and
- * will put queue reference one extra time. fix it.
- */
- disks[dr]->queue = NULL;
+ for (drive = 0; drive < N_DRIVE; drive++) {
+ if (!disks[drive])
+ break;
+ if (disks[drive]->queue) {
+ del_timer_sync(&motor_off_timer[drive]);
+ blk_cleanup_queue(disks[drive]->queue);
+ disks[drive]->queue = NULL;
}
- put_disk(disks[dr]);
+ put_disk(disks[drive]);
}
+ destroy_workqueue(floppy_wq);
return err;
}
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
- if ((allowed_drive_mask & (1 << drive)) &&
- fdc_state[FDC(drive)].version != FDC_NONE) {
+ if (floppy_available(drive)) {
del_gendisk(disks[drive]);
device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
platform_device_unregister(&floppy_device[drive]);
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
- return -EBUSY;
+ /*
+ * If we've explicitly asked to tear down the loop device,
+ * and it has an elevated reference count, set it for auto-teardown when
+ * the last reference goes away. This stops $!~#$@ udev from
+ * preventing teardown because it decided that it needs to run blkid on
+ * the loopback device whenever they appear. xfstests is notorious for
+ * failing tests because blkid via udev races with a losetup
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+ * command to fail with EBUSY.
+ */
+ if (lo->lo_refcnt > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+ }
if (filp == NULL)
return -EINVAL;
}
return rv;
}
-
-static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+static void mtip_set_timeout(struct driver_data *dd,
+ struct host_to_dev_fis *fis,
+ unsigned int *timeout, u8 erasemode)
{
switch (fis->command) {
case ATA_CMD_DOWNLOAD_MICRO:
break;
case ATA_CMD_SEC_ERASE_UNIT:
case 0xFC:
- *timeout = 240000; /* 4 minutes */
+ if (erasemode)
+ *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+ else
+ *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
break;
case ATA_CMD_STANDBYNOW1:
*timeout = 120000; /* 2 minutes */
unsigned int transfer_size;
unsigned long task_file_data;
int intotal = outtotal + req_task->out_size;
+ int erasemode = 0;
taskout = req_task->out_size;
taskin = req_task->in_size;
fis.lba_hi,
fis.device);
- mtip_set_timeout(&fis, &timeout);
+ /* check for erase mode support during secure erase.*/
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
+ && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ erasemode = 1;
+ }
+
+ mtip_set_timeout(dd, &fis, &timeout, erasemode);
/* Determine the correct transfer size.*/
if (force_single_sector)
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
+/* check for erase mode support during secure erase */
+#define MTIP_SEC_ERASE_MODE 0x3
+
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
struct block_device *bdev;
/* Cached size parameter. */
sector_t size;
- bool flush_support;
- bool discard_secure;
+ unsigned int flush_support:1;
+ unsigned int discard_secure:1;
};
struct backend_info;
{
struct xen_blkif *blkif;
- blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
+ blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
- memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
}
}
-void xen_blkif_free(struct xen_blkif *blkif)
+static void xen_blkif_free(struct xen_blkif *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
VBD_SHOW(mode, "%s\n", be->mode);
-int xenvbd_sysfs_addif(struct xenbus_device *dev)
+static int xenvbd_sysfs_addif(struct xenbus_device *dev)
{
int error;
return error;
}
-void xenvbd_sysfs_delif(struct xenbus_device *dev)
+static void xenvbd_sysfs_delif(struct xenbus_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
device_remove_file(&dev->dev, &dev_attr_mode);
* http://www.gnu.org/licenses/gpl.html
*
* Maintainer:
- * Andreas Herrmann <andreas.herrmann3@amd.com>
+ * Andreas Herrmann <herrmann.der.user@googlemail.com>
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs
}
chip->gpio_chip.ngpio = GEN_74X164_NUMBER_GPIOS * chip->registers;
- chip->buffer = devm_kzalloc(&spi->dev, chip->gpio_chip.ngpio, GFP_KERNEL);
+ chip->buffer = devm_kzalloc(&spi->dev, chip->registers, GFP_KERNEL);
if (!chip->buffer) {
ret = -ENOMEM;
goto exit_destroy;
if (ret)
return ret;
+ mvebu_gpio_set(chip, pin, value);
+
spin_lock_irqsave(&mvchip->lock, flags);
u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
u &= ~(1 << pin);
ct->handler = handle_edge_irq;
ct->chip.name = mvchip->chip.label;
- irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE,
+ irq_setup_generic_chip(gc, IRQ_MSK(ngpios), 0,
IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
/* Setup irq domain on top of the generic chip. */
}
}
+/**
+ * _clear_gpio_debounce - clear debounce settings for a gpio
+ * @bank: the gpio bank we're acting upon
+ * @gpio: the gpio number on this @gpio
+ *
+ * If a gpio is using debounce, then clear the debounce enable bit and if
+ * this is the only gpio in this bank using debounce, then clear the debounce
+ * time too. The debounce clock will also be disabled when calling this function
+ * if this is the only gpio in the bank using debounce.
+ */
+static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
+{
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+
+ if (!bank->dbck_flag)
+ return;
+
+ if (!(bank->dbck_enable_mask & gpio_bit))
+ return;
+
+ bank->dbck_enable_mask &= ~gpio_bit;
+ bank->context.debounce_en &= ~gpio_bit;
+ __raw_writel(bank->context.debounce_en,
+ bank->base + bank->regs->debounce_en);
+
+ if (!bank->dbck_enable_mask) {
+ bank->context.debounce = 0;
+ __raw_writel(bank->context.debounce, bank->base +
+ bank->regs->debounce);
+ clk_disable(bank->dbck);
+ bank->dbck_enabled = false;
+ }
+}
+
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
unsigned trigger)
{
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
+ _clear_gpio_debounce(bank, gpio);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
- tgpio->last_ier &= ~(1 << offset);
+ tgpio->last_ier &= ~(1UL << offset);
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
unsigned long flags;
spin_lock_irqsave(&tgpio->lock, flags);
- tgpio->last_ier |= 1 << offset;
+ tgpio->last_ier |= 1UL << offset;
iowrite32(tgpio->last_ier, tgpio->membase + TGPIO_IER);
spin_unlock_irqrestore(&tgpio->lock, flags);
}
*/
status = gpio_request(gpio, "sysfs");
- if (status < 0)
+ if (status < 0) {
+ if (status == -EPROBE_DEFER)
+ status = -ENODEV;
goto done;
-
+ }
status = gpio_export(gpio, true);
if (status < 0)
gpio_free(gpio);
spin_lock_irqsave(&gpio_lock, flags);
- if (!gpio_is_valid(gpio))
+ if (!gpio_is_valid(gpio)) {
+ status = -EINVAL;
goto done;
+ }
desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip == NULL)
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node)
return -ENOMEM;
- node->offset = roundup(offset, mm->block_size);
- node->length = rounddown(offset + length, mm->block_size) - node->offset;
+
+ if (length) {
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size);
+ node->length -= node->offset;
+ }
list_add_tail(&node->nl_entry, &mm->nodes);
list_add_tail(&node->fl_entry, &mm->free);
mm->heap_nodes++;
- mm->heap_size += length;
return 0;
}
u32 block_size;
int heap_nodes;
- u32 heap_size;
};
int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
((priv->base.ram.size & 0x000000ff) << 32);
tags = nv_rd32(priv, 0x100320);
- if (tags) {
- ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
- if (ret)
- return ret;
+ ret = nouveau_mm_init(&priv->base.tags, 0, tags, 1);
+ if (ret)
+ return ret;
- nv_debug(priv, "%d compression tags\n", tags);
- }
+ nv_debug(priv, "%d compression tags\n", tags);
size = (priv->base.ram.size >> 12) - rsvd_head - rsvd_tail;
switch (device->chipset) {
case DCB_I2C_NVIO_BIT:
port->drive = info.drive & 0x0f;
if (device->card_type < NV_D0) {
- if (info.drive >= ARRAY_SIZE(nv50_i2c_port))
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
break;
port->drive = nv50_i2c_port[port->drive];
port->sense = port->drive;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
struct nouveau_display *disp;
+ u32 pclass = dev->pdev->class >> 8;
int ret, gen;
disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nv_device(drm->device)->card_type < NV_50)
- ret = nv04_display_create(dev);
- else
- if (nv_device(drm->device)->card_type < NV_D0)
- ret = nv50_display_create(dev);
- else
- ret = nvd0_display_create(dev);
- if (ret)
- goto disp_create_err;
-
- if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (nouveau_modeset == 1 ||
+ (nouveau_modeset < 0 && pclass == PCI_CLASS_DISPLAY_VGA)) {
+ if (nv_device(drm->device)->card_type < NV_50)
+ ret = nv04_display_create(dev);
+ else
+ if (nv_device(drm->device)->card_type < NV_D0)
+ ret = nv50_display_create(dev);
+ else
+ ret = nvd0_display_create(dev);
if (ret)
- goto vblank_err;
+ goto disp_create_err;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ goto vblank_err;
+ }
+
+ nouveau_backlight_init(dev);
}
- nouveau_backlight_init(dev);
return 0;
vblank_err:
nouveau_backlight_exit(dev);
drm_vblank_cleanup(dev);
- disp->dtor(dev);
+ if (disp->dtor)
+ disp->dtor(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
static int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(modeset, "enable driver");
-static int nouveau_modeset = -1;
+MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
+ "0 = disabled, 1 = enabled, 2 = headless)");
+int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
nouveau_pm_fini(dev);
- nouveau_display_fini(dev);
+ if (dev->mode_config.num_crtc)
+ nouveau_display_fini(dev);
nouveau_display_destroy(dev);
nouveau_irq_fini(dev);
pm_state.event == PM_EVENT_PRETHAW)
return 0;
- NV_INFO(drm, "suspending fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "suspending fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
- NV_INFO(drm, "suspending display...\n");
- ret = nouveau_display_suspend(dev);
- if (ret)
- return ret;
+ NV_INFO(drm, "suspending display...\n");
+ ret = nouveau_display_suspend(dev);
+ if (ret)
+ return ret;
+ }
NV_INFO(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
nouveau_client_init(&cli->base);
}
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return ret;
}
nouveau_irq_postinstall(dev);
nouveau_pm_resume(dev);
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ if (dev->mode_config.num_crtc) {
+ NV_INFO(drm, "resuming display...\n");
+ nouveau_display_resume(dev);
+ }
return 0;
}
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
nouveau_modeset = 0;
- else
#endif
- nouveau_modeset = 1;
}
if (!nouveau_modeset)
nv_info((cli), fmt, ##args); \
} while (0)
+extern int nouveau_modeset;
+
#endif
nv_subdev(pmc)->intr(nv_subdev(pmc));
- if (device->card_type >= NV_D0) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nvd0_display_intr(dev);
- } else
- if (device->card_type >= NV_50) {
- if (nv_rd32(device, 0x000100) & 0x04000000)
- nv50_display_intr(dev);
+ if (dev->mode_config.num_crtc) {
+ if (device->card_type >= NV_D0) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nvd0_display_intr(dev);
+ } else
+ if (device->card_type >= NV_50) {
+ if (nv_rd32(device, 0x000100) & 0x04000000)
+ nv50_display_intr(dev);
+ }
}
return IRQ_HANDLED;
NVWriteVgaCrtc(dev, 0, NV_CIO_CR_MODE_INDEX, saved_cr_mode);
if (blue == 0x18) {
- NV_INFO(drm, "Load detected on head A\n");
+ NV_DEBUG(drm, "Load detected on head A\n");
return connector_status_connected;
}
if (nv17_dac_sample_load(encoder) &
NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
- NV_INFO(drm, "Load detected on output %c\n",
- '@' + ffs(dcb->or));
+ NV_DEBUG(drm, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
} else {
return connector_status_disconnected;
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on vga encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
+ nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on lvds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
if (was_powersaving && is_powersaving_dpms(mode))
return;
return;
nv_encoder->last_dpms = mode;
- NV_INFO(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on tmds encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
nv04_dfp_update_backlight(encoder, mode);
nv04_dfp_update_fp_control(encoder, mode);
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
uint8_t crtc1A;
- NV_INFO(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
- mode, nv_encoder->dcb->index);
+ NV_DEBUG(drm, "Setting dpms mode %d on TV encoder (output %d)\n",
+ mode, nv_encoder->dcb->index);
state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
helper->dpms(encoder, DRM_MODE_DPMS_ON);
- NV_INFO(drm, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
- '@' + ffs(nv_encoder->dcb->or));
+ NV_DEBUG(drm, "Output %s is running on CRTC %d using output %c\n",
+ drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
}
static void nv04_tv_destroy(struct drm_encoder *encoder)
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI 0x0249
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO 0x024a
#define USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS 0x024b
#define MS_RDESC 0x08
#define MS_NOGET 0x10
#define MS_DUPLICATE_USAGES 0x20
+#define MS_RDESC_3K 0x40
-/*
- * Microsoft Wireless Desktop Receiver (Model 1028) has
- * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
- */
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ /*
+ * Microsoft Wireless Desktop Receiver (Model 1028) has
+ * 'Usage Min/Max' where it ought to have 'Physical Min/Max'
+ */
if ((quirks & MS_RDESC) && *rsize == 571 && rdesc[557] == 0x19 &&
rdesc[559] == 0x29) {
hid_info(hdev, "fixing up Microsoft Wireless Receiver Model 1028 report descriptor\n");
rdesc[557] = 0x35;
rdesc[559] = 0x45;
}
+ /* the same as above (s/usage/physical/) */
+ if ((quirks & MS_RDESC_3K) && *rsize == 106 &&
+ !memcmp((char []){ 0x19, 0x00, 0x29, 0xff },
+ &rdesc[94], 4)) {
+ rdesc[94] = 0x35;
+ rdesc[96] = 0x45;
+ }
return rdesc;
}
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
.driver_data = MS_PRESENTER },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
- .driver_data = MS_ERGONOMY },
+ .driver_data = MS_ERGONOMY | MS_RDESC_3K },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
},
{ .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
- MT_QUIRK_SLOT_IS_CONTACTNUMBER,
- .maxcontacts = 10
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER
},
{ .name = MT_CLS_FLATFROG,
* contact max are global to the report */
td->last_field_index = field->index;
return -1;
- }
case HID_DG_TOUCH:
/* Legacy devices use TIPSWITCH and not TOUCH.
* Let's just ignore this field. */
return -1;
+ }
/* let hid-input decide for the others */
return 0;
* fam15h_power.c - AMD Family 15h processor power monitoring
*
* Copyright (c) 2011 Advanced Micro Devices, Inc.
- * Author: Andreas Herrmann <andreas.herrmann3@amd.com>
+ * Author: Andreas Herrmann <herrmann.der.user@googlemail.com>
*
*
* This driver is free software; you can redistribute it and/or
#include <asm/processor.h>
MODULE_DESCRIPTION("AMD Family 15h CPU processor power monitor");
-MODULE_AUTHOR("Andreas Herrmann <andreas.herrmann3@amd.com>");
+MODULE_AUTHOR("Andreas Herrmann <herrmann.der.user@googlemail.com>");
MODULE_LICENSE("GPL");
/* D18F3 */
.driver = {
.name = "gpio-fan",
.pm = GPIO_FAN_PM,
+#ifdef CONFIG_OF_GPIO
.of_match_table = of_match_ptr(of_gpio_fan_match),
+#endif
},
};
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-$(CONFIG_I2C_MUX) += i2c-mux.o
obj-y += algos/ busses/ muxes/
+obj-$(CONFIG_I2C_STUB) += i2c-stub.o
ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG
CFLAGS_i2c-core.o := -Wno-deprecated-declarations
tristate "Intel 82801 (ICH/PCH)"
depends on PCI
select CHECK_SIGNATURE if X86 && DMI
- select GPIOLIB if I2C_MUX
help
If you say yes to this option, support will be included for the Intel
801 family of mainboard I2C interfaces. Specifically, the following
obj-$(CONFIG_I2C_ELEKTOR) += i2c-elektor.o
obj-$(CONFIG_I2C_PCA_ISA) += i2c-pca-isa.o
obj-$(CONFIG_I2C_SIBYTE) += i2c-sibyte.o
-obj-$(CONFIG_I2C_STUB) += i2c-stub.o
obj-$(CONFIG_SCx200_ACB) += scx200_acb.o
obj-$(CONFIG_SCx200_I2C) += scx200_i2c.o
#include <linux/wait.h>
#include <linux/err.h>
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
#include <linux/gpio.h>
#include <linux/i2c-mux-gpio.h>
#include <linux/platform_device.h>
int len;
u8 *data;
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
const struct i801_mux_config *mux_drvdata;
struct platform_device *mux_pdev;
#endif
static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {}
#endif /* CONFIG_X86 && CONFIG_DMI */
-#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE
+#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
+ defined CONFIG_DMI
static struct i801_mux_config i801_mux_config_asus_z8_d12 = {
.gpio_chip = "gpio_ich",
.values = { 0x02, 0x03 },
id = dmi_first_match(mux_dmi_table);
if (id) {
- /* Remove from branch classes from trunk */
+ /* Remove branch classes from trunk */
mux_config = id->driver_data;
for (i = 0; i < mux_config->n_values; i++)
class &= ~mux_config->classes[i];
+++ /dev/null
-/*
- i2c-stub.c - I2C/SMBus chip emulator
-
- Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
- Copyright (C) 2007 Jean Delvare <khali@linux-fr.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#define DEBUG 1
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-
-#define MAX_CHIPS 10
-#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK)
-
-static unsigned short chip_addr[MAX_CHIPS];
-module_param_array(chip_addr, ushort, NULL, S_IRUGO);
-MODULE_PARM_DESC(chip_addr,
- "Chip addresses (up to 10, between 0x03 and 0x77)");
-
-static unsigned long functionality = STUB_FUNC;
-module_param(functionality, ulong, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(functionality, "Override functionality bitfield");
-
-struct stub_chip {
- u8 pointer;
- u16 words[256]; /* Byte operations use the LSB as per SMBus
- specification */
-};
-
-static struct stub_chip *stub_chips;
-
-/* Return negative errno on error. */
-static s32 stub_xfer(struct i2c_adapter * adap, u16 addr, unsigned short flags,
- char read_write, u8 command, int size, union i2c_smbus_data * data)
-{
- s32 ret;
- int i, len;
- struct stub_chip *chip = NULL;
-
- /* Search for the right chip */
- for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
- if (addr == chip_addr[i]) {
- chip = stub_chips + i;
- break;
- }
- }
- if (!chip)
- return -ENODEV;
-
- switch (size) {
-
- case I2C_SMBUS_QUICK:
- dev_dbg(&adap->dev, "smbus quick - addr 0x%02x\n", addr);
- ret = 0;
- break;
-
- case I2C_SMBUS_BYTE:
- if (read_write == I2C_SMBUS_WRITE) {
- chip->pointer = command;
- dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
- "wrote 0x%02x.\n",
- addr, command);
- } else {
- data->byte = chip->words[chip->pointer++] & 0xff;
- dev_dbg(&adap->dev, "smbus byte - addr 0x%02x, "
- "read 0x%02x.\n",
- addr, data->byte);
- }
-
- ret = 0;
- break;
-
- case I2C_SMBUS_BYTE_DATA:
- if (read_write == I2C_SMBUS_WRITE) {
- chip->words[command] &= 0xff00;
- chip->words[command] |= data->byte;
- dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- "wrote 0x%02x at 0x%02x.\n",
- addr, data->byte, command);
- } else {
- data->byte = chip->words[command] & 0xff;
- dev_dbg(&adap->dev, "smbus byte data - addr 0x%02x, "
- "read 0x%02x at 0x%02x.\n",
- addr, data->byte, command);
- }
- chip->pointer = command + 1;
-
- ret = 0;
- break;
-
- case I2C_SMBUS_WORD_DATA:
- if (read_write == I2C_SMBUS_WRITE) {
- chip->words[command] = data->word;
- dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
- "wrote 0x%04x at 0x%02x.\n",
- addr, data->word, command);
- } else {
- data->word = chip->words[command];
- dev_dbg(&adap->dev, "smbus word data - addr 0x%02x, "
- "read 0x%04x at 0x%02x.\n",
- addr, data->word, command);
- }
-
- ret = 0;
- break;
-
- case I2C_SMBUS_I2C_BLOCK_DATA:
- len = data->block[0];
- if (read_write == I2C_SMBUS_WRITE) {
- for (i = 0; i < len; i++) {
- chip->words[command + i] &= 0xff00;
- chip->words[command + i] |= data->block[1 + i];
- }
- dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, "
- "wrote %d bytes at 0x%02x.\n",
- addr, len, command);
- } else {
- for (i = 0; i < len; i++) {
- data->block[1 + i] =
- chip->words[command + i] & 0xff;
- }
- dev_dbg(&adap->dev, "i2c block data - addr 0x%02x, "
- "read %d bytes at 0x%02x.\n",
- addr, len, command);
- }
-
- ret = 0;
- break;
-
- default:
- dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n");
- ret = -EOPNOTSUPP;
- break;
- } /* switch (size) */
-
- return ret;
-}
-
-static u32 stub_func(struct i2c_adapter *adapter)
-{
- return STUB_FUNC & functionality;
-}
-
-static const struct i2c_algorithm smbus_algorithm = {
- .functionality = stub_func,
- .smbus_xfer = stub_xfer,
-};
-
-static struct i2c_adapter stub_adapter = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
- .algo = &smbus_algorithm,
- .name = "SMBus stub driver",
-};
-
-static int __init i2c_stub_init(void)
-{
- int i, ret;
-
- if (!chip_addr[0]) {
- printk(KERN_ERR "i2c-stub: Please specify a chip address\n");
- return -ENODEV;
- }
-
- for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
- if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) {
- printk(KERN_ERR "i2c-stub: Invalid chip address "
- "0x%02x\n", chip_addr[i]);
- return -EINVAL;
- }
-
- printk(KERN_INFO "i2c-stub: Virtual chip at 0x%02x\n",
- chip_addr[i]);
- }
-
- /* Allocate memory for all chips at once */
- stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
- if (!stub_chips) {
- printk(KERN_ERR "i2c-stub: Out of memory\n");
- return -ENOMEM;
- }
-
- ret = i2c_add_adapter(&stub_adapter);
- if (ret)
- kfree(stub_chips);
- return ret;
-}
-
-static void __exit i2c_stub_exit(void)
-{
- i2c_del_adapter(&stub_adapter);
- kfree(stub_chips);
-}
-
-MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
-MODULE_DESCRIPTION("I2C stub driver");
-MODULE_LICENSE("GPL");
-
-module_init(i2c_stub_init);
-module_exit(i2c_stub_exit);
-
--- /dev/null
+/*
+ i2c-stub.c - I2C/SMBus chip emulator
+
+ Copyright (c) 2004 Mark M. Hoffman <mhoffman@lightlink.com>
+ Copyright (C) 2007, 2012 Jean Delvare <khali@linux-fr.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#define DEBUG 1
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+
+#define MAX_CHIPS 10
+#define STUB_FUNC (I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
+
+static unsigned short chip_addr[MAX_CHIPS];
+module_param_array(chip_addr, ushort, NULL, S_IRUGO);
+MODULE_PARM_DESC(chip_addr,
+ "Chip addresses (up to 10, between 0x03 and 0x77)");
+
+static unsigned long functionality = STUB_FUNC;
+module_param(functionality, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(functionality, "Override functionality bitfield");
+
+struct stub_chip {
+ u8 pointer;
+ u16 words[256]; /* Byte operations use the LSB as per SMBus
+ specification */
+};
+
+static struct stub_chip *stub_chips;
+
+/* Return negative errno on error. */
+static s32 stub_xfer(struct i2c_adapter *adap, u16 addr, unsigned short flags,
+ char read_write, u8 command, int size, union i2c_smbus_data *data)
+{
+ s32 ret;
+ int i, len;
+ struct stub_chip *chip = NULL;
+
+ /* Search for the right chip */
+ for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
+ if (addr == chip_addr[i]) {
+ chip = stub_chips + i;
+ break;
+ }
+ }
+ if (!chip)
+ return -ENODEV;
+
+ switch (size) {
+
+ case I2C_SMBUS_QUICK:
+ dev_dbg(&adap->dev, "smbus quick - addr 0x%02x\n", addr);
+ ret = 0;
+ break;
+
+ case I2C_SMBUS_BYTE:
+ if (read_write == I2C_SMBUS_WRITE) {
+ chip->pointer = command;
+ dev_dbg(&adap->dev,
+ "smbus byte - addr 0x%02x, wrote 0x%02x.\n",
+ addr, command);
+ } else {
+ data->byte = chip->words[chip->pointer++] & 0xff;
+ dev_dbg(&adap->dev,
+ "smbus byte - addr 0x%02x, read 0x%02x.\n",
+ addr, data->byte);
+ }
+
+ ret = 0;
+ break;
+
+ case I2C_SMBUS_BYTE_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ chip->words[command] &= 0xff00;
+ chip->words[command] |= data->byte;
+ dev_dbg(&adap->dev,
+ "smbus byte data - addr 0x%02x, wrote 0x%02x at 0x%02x.\n",
+ addr, data->byte, command);
+ } else {
+ data->byte = chip->words[command] & 0xff;
+ dev_dbg(&adap->dev,
+ "smbus byte data - addr 0x%02x, read 0x%02x at 0x%02x.\n",
+ addr, data->byte, command);
+ }
+ chip->pointer = command + 1;
+
+ ret = 0;
+ break;
+
+ case I2C_SMBUS_WORD_DATA:
+ if (read_write == I2C_SMBUS_WRITE) {
+ chip->words[command] = data->word;
+ dev_dbg(&adap->dev,
+ "smbus word data - addr 0x%02x, wrote 0x%04x at 0x%02x.\n",
+ addr, data->word, command);
+ } else {
+ data->word = chip->words[command];
+ dev_dbg(&adap->dev,
+ "smbus word data - addr 0x%02x, read 0x%04x at 0x%02x.\n",
+ addr, data->word, command);
+ }
+
+ ret = 0;
+ break;
+
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ len = data->block[0];
+ if (read_write == I2C_SMBUS_WRITE) {
+ for (i = 0; i < len; i++) {
+ chip->words[command + i] &= 0xff00;
+ chip->words[command + i] |= data->block[1 + i];
+ }
+ dev_dbg(&adap->dev,
+ "i2c block data - addr 0x%02x, wrote %d bytes at 0x%02x.\n",
+ addr, len, command);
+ } else {
+ for (i = 0; i < len; i++) {
+ data->block[1 + i] =
+ chip->words[command + i] & 0xff;
+ }
+ dev_dbg(&adap->dev,
+ "i2c block data - addr 0x%02x, read %d bytes at 0x%02x.\n",
+ addr, len, command);
+ }
+
+ ret = 0;
+ break;
+
+ default:
+ dev_dbg(&adap->dev, "Unsupported I2C/SMBus command\n");
+ ret = -EOPNOTSUPP;
+ break;
+ } /* switch (size) */
+
+ return ret;
+}
+
+static u32 stub_func(struct i2c_adapter *adapter)
+{
+ return STUB_FUNC & functionality;
+}
+
+static const struct i2c_algorithm smbus_algorithm = {
+ .functionality = stub_func,
+ .smbus_xfer = stub_xfer,
+};
+
+static struct i2c_adapter stub_adapter = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
+ .algo = &smbus_algorithm,
+ .name = "SMBus stub driver",
+};
+
+static int __init i2c_stub_init(void)
+{
+ int i, ret;
+
+ if (!chip_addr[0]) {
+ pr_err("i2c-stub: Please specify a chip address\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) {
+ if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) {
+ pr_err("i2c-stub: Invalid chip address 0x%02x\n",
+ chip_addr[i]);
+ return -EINVAL;
+ }
+
+ pr_info("i2c-stub: Virtual chip at 0x%02x\n", chip_addr[i]);
+ }
+
+ /* Allocate memory for all chips at once */
+ stub_chips = kzalloc(i * sizeof(struct stub_chip), GFP_KERNEL);
+ if (!stub_chips) {
+ pr_err("i2c-stub: Out of memory\n");
+ return -ENOMEM;
+ }
+
+ ret = i2c_add_adapter(&stub_adapter);
+ if (ret)
+ kfree(stub_chips);
+ return ret;
+}
+
+static void __exit i2c_stub_exit(void)
+{
+ i2c_del_adapter(&stub_adapter);
+ kfree(stub_chips);
+}
+
+MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
+MODULE_DESCRIPTION("I2C stub driver");
+MODULE_LICENSE("GPL");
+
+module_init(i2c_stub_init);
+module_exit(i2c_stub_exit);
config KEYBOARD_LPC32XX
tristate "LPC32XX matrix key scanner support"
depends on ARCH_LPC32XX && OF
+ select INPUT_MATRIXKMAP
help
Say Y here if you want to use NXP LPC32XX SoC key scanner interface,
connected to a key matrix.
unsigned int mask = 0, direct_key_num = 0;
unsigned long kpc = 0;
+ /* clear pending interrupt bit */
+ keypad_readl(KPC);
+
/* enable matrix keys with automatic scan */
if (pdata->matrix_key_rows && pdata->matrix_key_cols) {
kpc |= KPC_ASACT | KPC_MIE | KPC_ME | KPC_MS_ALL;
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
+/* MacbookPro10,2 (unibody, October 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
+#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ /* MacbookPro10,2 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
/* Terminating entry */
{}
};
{ SN_COORD, -150, 6730 },
{ SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { SN_PRESSURE, 0, 300 },
+ { SN_WIDTH, 0, 2048 },
+ { SN_COORD, -4750, 5280 },
+ { SN_COORD, -150, 6730 },
+ { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
+ },
{}
};
features->pktlen = WACOM_PKGLEN_TPC2FG;
}
- if (features->type == MTSCREEN || WACOM_24HDT)
+ if (features->type == MTSCREEN || features->type == WACOM_24HDT)
features->pktlen = WACOM_PKGLEN_MTOUCH;
if (features->type == BAMBOO_PT) {
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+
+ __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
wacom_setup_cintiq(wacom_wac);
break;
config TOUCHSCREEN_EGALAX
tristate "EETI eGalax multi-touch panel support"
- depends on I2C
+ depends on I2C && OF
help
Say Y here to enable support for I2C connected EETI
eGalax multi-touch panels.
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/input/mt.h>
+#include <linux/of_gpio.h>
/*
* Mouse Mode: some panel may configure the controller to mouse mode,
/* wake up controller by an falling edge of interrupt gpio. */
static int egalax_wake_up_device(struct i2c_client *client)
{
- int gpio = irq_to_gpio(client->irq);
+ struct device_node *np = client->dev.of_node;
+ int gpio;
int ret;
+ if (!np)
+ return -ENODEV;
+
+ gpio = of_get_named_gpio(np, "wakeup-gpios", 0);
+ if (!gpio_is_valid(gpio))
+ return -ENODEV;
+
ret = gpio_request(gpio, "egalax_irq");
if (ret < 0) {
dev_err(&client->dev,
ts->input_dev = input_dev;
/* controller may be in sleep, wake it up. */
- egalax_wake_up_device(client);
+ error = egalax_wake_up_device(client);
+ if (error) {
+ dev_err(&client->dev, "Failed to wake up the controller\n");
+ goto err_free_dev;
+ }
ret = egalax_firmware_version(client);
if (ret < 0) {
static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+static struct of_device_id egalax_ts_dt_ids[] = {
+ { .compatible = "eeti,egalax_ts" },
+ { /* sentinel */ }
+};
+
static struct i2c_driver egalax_ts_driver = {
.driver = {
.name = "egalax_ts",
.owner = THIS_MODULE,
.pm = &egalax_ts_pm_ops,
+ .of_match_table = of_match_ptr(egalax_ts_dt_ids),
},
.id_table = egalax_ts_id,
.probe = egalax_ts_probe,
__set_bit(BTN_TOUCH, input_dev->keybit);
input_set_abs_params(ptsc->dev, ABS_X, 0, 0x3ff, 0, 0);
input_set_abs_params(ptsc->dev, ABS_Y, 0, 0x3ff, 0, 0);
- input_set_abs_params(ptsc->dev, ABS_PRESSURE, 0, 0, 0, 0);
serio_set_drvdata(serio, ptsc);
}
conf->nfaults = 0;
- rdev_for_each(rdev, mddev)
+ rdev_for_each(rdev, mddev) {
conf->rdev = rdev;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ }
md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
mddev->private = conf;
|| disk_idx < 0)
continue;
if (test_bit(Replacement, &rdev->flags))
- disk = conf->mirrors + conf->raid_disks + disk_idx;
+ disk = conf->mirrors + mddev->raid_disks + disk_idx;
else
disk = conf->mirrors + disk_idx;
clear_bit(Unmerged, &rdev->flags);
}
md_integrity_add_rdev(rdev, mddev);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
discard_supported = true;
}
- if (discard_supported)
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
- else
- queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
-
+ if (mddev->queue) {
+ if (discard_supported)
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
+ mddev->queue);
+ }
/* need to check that every block has at least one working mirror */
if (!enough(conf, -1)) {
printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
int
qla24xx_disable_vp(scsi_qla_host_t *vha)
{
+ unsigned long flags;
int ret;
ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
/* Remove port id from vp target map */
+ spin_lock_irqsave(&vha->hw->vport_slock, flags);
qlt_update_vp_map(vha, RESET_AL_PA);
+ spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
qla2x00_mark_vp_devices_dead(vha);
atomic_set(&vha->vp_state, VP_FAILED);
int pmap_len;
fc_port_t *fcport;
int global_resets;
+ unsigned long flags;
retry:
global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
res = true;
qlt_undelete_sess(sess);
kref_get(&sess->se_sess->sess_kref);
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
+
if (sess->local && !local)
sess->local = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
*/
kref_get(&sess->se_sess->sess_kref);
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ sess->conf_compl_supported = (fcport->flags & FCF_CONF_COMP_SUPPORTED);
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
"Reappeared sess %p\n", sess);
}
- sess->s_id = fcport->d_id;
- sess->loop_id = fcport->loop_id;
- sess->conf_compl_supported = !!(fcport->flags &
- FCF_CONF_COMP_SUPPORTED);
+ ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id,
+ (fcport->flags & FCF_CONF_COMP_SUPPORTED));
}
if (sess && sess->local) {
int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
void *, uint8_t *, uint16_t);
+ void (*update_sess)(struct qla_tgt_sess *, port_id_t, uint16_t, bool);
struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
const uint16_t);
struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
struct tcm_qla2xxx_tpg, se_tpg);
struct tcm_qla2xxx_lport *lport = tpg->lport;
- return &lport->lport_name[0];
+ return lport->lport_naa_name;
}
static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
return 0;
}
+static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
+ uint16_t loop_id, bool conf_compl_supported)
+{
+ struct qla_tgt *tgt = sess->tgt;
+ struct qla_hw_data *ha = tgt->ha;
+ struct tcm_qla2xxx_lport *lport = ha->tgt.target_lport_ptr;
+ struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
+ struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
+ struct tcm_qla2xxx_nacl, se_node_acl);
+ u32 key;
+
+
+ if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
+ pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+ sess,
+ sess->port_name[0], sess->port_name[1],
+ sess->port_name[2], sess->port_name[3],
+ sess->port_name[4], sess->port_name[5],
+ sess->port_name[6], sess->port_name[7],
+ sess->loop_id, loop_id,
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
+ s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+
+ if (sess->loop_id != loop_id) {
+ /*
+ * Because we can shuffle loop IDs around and we
+ * update different sessions non-atomically, we might
+ * have overwritten this session's old loop ID
+ * already, and we might end up overwriting some other
+ * session that will be updated later. So we have to
+ * be extra careful and we can't warn about those things...
+ */
+ if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
+ lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
+
+ lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
+
+ sess->loop_id = loop_id;
+ }
+
+ if (sess->s_id.b24 != s_id.b24) {
+ key = (((u32) sess->s_id.b.domain << 16) |
+ ((u32) sess->s_id.b.area << 8) |
+ ((u32) sess->s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key))
+ WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
+ "Found wrong se_nacl when updating s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+ else
+ WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
+
+ key = (((u32) s_id.b.domain << 16) |
+ ((u32) s_id.b.area << 8) |
+ ((u32) s_id.b.al_pa));
+
+ if (btree_lookup32(&lport->lport_fcport_map, key)) {
+ WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
+ s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+ btree_update32(&lport->lport_fcport_map, key, se_nacl);
+ } else {
+ btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
+ }
+
+ sess->s_id = s_id;
+ nacl->nport_id = key;
+ }
+
+ sess->conf_compl_supported = conf_compl_supported;
+}
+
/*
* Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
*/
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
.free_session = tcm_qla2xxx_free_session,
+ .update_sess = tcm_qla2xxx_update_sess,
.check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
.find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
.find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
lport->lport_wwpn = wwpn;
tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
wwpn);
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
ret = tcm_qla2xxx_init_lport(lport);
if (ret != 0)
lport->lport_npiv_wwnn = npiv_wwnn;
tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
+ sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
/* FIXME: tcm_qla2xxx_npiv_make_lport */
ret = -ENOSYS;
u64 lport_npiv_wwnn;
/* ASCII formatted WWPN for FC Target Lport */
char lport_name[TCM_QLA2XXX_NAMELEN];
+ /* ASCII formatted naa WWPN for VPD page 83 etc */
+ char lport_naa_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
if (!ret) {
dev_err(ssp->dev, "DMA transfer timeout\n");
ret = -ETIMEDOUT;
+ dmaengine_terminate_all(ssp->dmach);
goto err_vmalloc;
}
first = last = 0;
}
- m->status = 0;
+ m->status = status;
spi_finalize_current_message(master);
return status;
*/
iscsit_thread_check_cpumask(conn, current, 1);
- schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ wait_event_interruptible(conn->queues_wq,
+ !iscsit_conn_all_queues_empty(conn) ||
+ ts->status == ISCSI_THREAD_SET_RESET);
if ((ts->status == ISCSI_THREAD_SET_RESET) ||
signal_pending(current))
};
struct iscsi_conn {
+ wait_queue_head_t queues_wq;
/* Authentication Successful for this connection */
u8 auth_complete;
/* State connection is currently in */
static int iscsi_login_init_conn(struct iscsi_conn *conn)
{
+ init_waitqueue_head(&conn->queues_wq);
INIT_LIST_HEAD(&conn->conn_list);
INIT_LIST_HEAD(&conn->conn_cmd_list);
INIT_LIST_HEAD(&conn->immed_queue_list);
atomic_set(&conn->check_immediate_queue, 1);
spin_unlock_bh(&conn->immed_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsi_conn *conn)
atomic_inc(&cmd->response_queue_count);
spin_unlock_bh(&conn->response_queue_lock);
- wake_up_process(conn->thread_set->tx_thread);
+ wake_up(&conn->queues_wq);
}
struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *conn)
}
}
+bool iscsit_conn_all_queues_empty(struct iscsi_conn *conn)
+{
+ bool empty;
+
+ spin_lock_bh(&conn->immed_queue_lock);
+ empty = list_empty(&conn->immed_queue_list);
+ spin_unlock_bh(&conn->immed_queue_lock);
+
+ if (!empty)
+ return empty;
+
+ spin_lock_bh(&conn->response_queue_lock);
+ empty = list_empty(&conn->response_queue_list);
+ spin_unlock_bh(&conn->response_queue_lock);
+
+ return empty;
+}
+
void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn)
{
struct iscsi_queue_req *qr, *qr_tmp;
extern void iscsit_add_cmd_to_response_queue(struct iscsi_cmd *, struct iscsi_conn *, u8);
extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_conn *);
extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);
+extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
extern void iscsit_free_cmd(struct iscsi_cmd *);
if (ret < 0)
goto out;
- if (core_dev_setup_virtual_lun0() < 0)
+ ret = core_dev_setup_virtual_lun0();
+ if (ret < 0)
goto out;
return 0;
static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
- u32 tmp, aligned_max_sectors;
+ u32 aligned_max_sectors;
+ u32 alignment;
/*
* Limit max_sectors to a PAGE_SIZE aligned value for modern
* transport_allocate_data_tasks() operation.
*/
- tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
- aligned_max_sectors = (tmp / block_size);
- if (max_sectors != aligned_max_sectors) {
- printk(KERN_INFO "Rounding down aligned max_sectors from %u"
- " to %u\n", max_sectors, aligned_max_sectors);
- return aligned_max_sectors;
- }
+ alignment = max(1ul, PAGE_SIZE / block_size);
+ aligned_max_sectors = rounddown(max_sectors, alignment);
+
+ if (max_sectors != aligned_max_sectors)
+ pr_info("Rounding down aligned max_sectors from %u to %u\n",
+ max_sectors, aligned_max_sectors);
- return max_sectors;
+ return aligned_max_sectors;
}
void se_dev_set_default_attribs(
return 0;
}
+static int sbc_emulate_noop(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
{
return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
size = 0;
cmd->execute_cmd = sbc_emulate_verify;
break;
+ case REZERO_UNIT:
+ case SEEK_6:
+ case SEEK_10:
+ /*
+ * There are still clients out there which use these old SCSI-2
+ * commands. This mainly happens when running VMs with legacy
+ * guest systems, connected via SCSI command pass-through to
+ * iSCSI targets. Make them happy and return status GOOD.
+ */
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_noop;
+ break;
default:
ret = spc_parse_cdb(cmd, &size);
if (ret)
unsigned char buf[SE_INQUIRY_BUF];
int p, ret;
+ memset(buf, 0, SE_INQUIRY_BUF);
+
if (dev == tpg->tpg_virt_lun0.lun_se_dev)
buf[0] = 0x3f; /* Not connected */
else
printk("ABORT_TASK: Found referenced %s task_tag: %u\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- spin_lock_irq(&se_cmd->t_state_lock);
+ spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & CMD_T_COMPLETE) {
printk("ABORT_TASK: ref_tag: %u already complete, skipping\n", ref_tag);
- spin_unlock_irq(&se_cmd->t_state_lock);
+ spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
goto out;
}
se_cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irq(&se_cmd->t_state_lock);
+ spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list);
kref_get(&se_cmd->cmd_kref);
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
- transport_generic_free_cmd(se_cmd, 0);
}
/**
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
info->feature_resize = val;
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
#endif
}
+static void gntdev_free_map(struct grant_map *map)
+{
+ if (map == NULL)
+ return;
+
+ if (map->pages)
+ free_xenballooned_pages(map->count, map->pages);
+ kfree(map->pages);
+ kfree(map->grants);
+ kfree(map->map_ops);
+ kfree(map->unmap_ops);
+ kfree(map->kmap_ops);
+ kfree(map);
+}
+
static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
{
struct grant_map *add;
return add;
err:
- kfree(add->pages);
- kfree(add->grants);
- kfree(add->map_ops);
- kfree(add->unmap_ops);
- kfree(add->kmap_ops);
- kfree(add);
+ gntdev_free_map(add);
return NULL;
}
evtchn_put(map->notify.event);
}
- if (map->pages) {
- if (!use_ptemod)
- unmap_grant_pages(map, 0, map->count);
-
- free_xenballooned_pages(map->count, map->pages);
- }
- kfree(map->pages);
- kfree(map->grants);
- kfree(map->map_ops);
- kfree(map->unmap_ops);
- kfree(map);
+ if (map->pages && !use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
+ gntdev_free_map(map);
}
/* ------------------------------------------------------------------ */
goto out;
/* Can't write a xenbus message larger we can buffer */
- if ((len + u->len) > sizeof(u->u.buffer)) {
+ if (len > sizeof(u->u.buffer) - u->len) {
/* On error, dump existing buffer */
u->len = 0;
rc = -EINVAL;
unsigned int sz = sizeof(struct bio) + extra_size;
struct kmem_cache *slab = NULL;
struct bio_slab *bslab, *new_bio_slabs;
+ unsigned int new_bio_slab_max;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock);
goto out_unlock;
if (bio_slab_nr == bio_slab_max && entry == -1) {
- bio_slab_max <<= 1;
+ new_bio_slab_max = bio_slab_max << 1;
new_bio_slabs = krealloc(bio_slabs,
- bio_slab_max * sizeof(struct bio_slab),
+ new_bio_slab_max * sizeof(struct bio_slab),
GFP_KERNEL);
if (!new_bio_slabs)
goto out_unlock;
+ bio_slab_max = new_bio_slab_max;
bio_slabs = new_bio_slabs;
}
if (entry == -1)
return ret;
}
+static ssize_t blkdev_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_read(file, ppos, pipe, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+static ssize_t blkdev_splice_write(struct pipe_inode_info *pipe,
+ struct file *file, loff_t *ppos, size_t len,
+ unsigned int flags)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_splice_write(pipe, file, ppos, len, flags);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
+
/*
* Try to release a page associated with block device when the system
* is under memory pressure.
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_blkdev_ioctl,
#endif
- .splice_read = generic_file_splice_read,
- .splice_write = generic_file_splice_write,
+ .splice_read = blkdev_splice_read,
+ .splice_write = blkdev_splice_write,
};
int ioctl_by_bdev(struct block_device *bdev, unsigned cmd, unsigned long arg)
*max_len = handle_length;
type = 255;
}
+ if (dentry)
+ dput(dentry);
return type;
}
"inode=%lu", ino + 1);
continue;
}
+ BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
+ err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
+ if (err)
+ goto fail;
ext4_lock_group(sb, group);
ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
ext4_unlock_group(sb, group);
goto out;
got:
+ BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
+ err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
+ if (err)
+ goto fail;
+
/* We may have to initialize the block bitmap if it isn't already */
if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
goto fail;
}
- BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
- err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
- if (err)
- goto fail;
-
BUFFER_TRACE(group_desc_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, group_desc_bh);
if (err)
}
ext4_unlock_group(sb, group);
- BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
- err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
- if (err)
- goto fail;
-
BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
if (err)
return __close_fd(files, fd);
if (fd >= rlimit(RLIMIT_NOFILE))
- return -EMFILE;
+ return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, fd);
return -EINVAL;
if (newfd >= rlimit(RLIMIT_NOFILE))
- return -EMFILE;
+ return -EBADF;
spin_lock(&files->file_lock);
err = expand_files(files, newfd);
--- /dev/null
+/*
+ * Statically sized hash table implementation
+ * (C) 2012 Sasha Levin <levinsasha928@gmail.com>
+ */
+
+#ifndef _LINUX_HASHTABLE_H
+#define _LINUX_HASHTABLE_H
+
+#include <linux/list.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/hash.h>
+#include <linux/rculist.h>
+
+#define DEFINE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)] = \
+ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
+
+#define DECLARE_HASHTABLE(name, bits) \
+ struct hlist_head name[1 << (bits)]
+
+#define HASH_SIZE(name) (ARRAY_SIZE(name))
+#define HASH_BITS(name) ilog2(HASH_SIZE(name))
+
+/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
+#define hash_min(val, bits) \
+ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
+
+static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ INIT_HLIST_HEAD(&ht[i]);
+}
+
+/**
+ * hash_init - initialize a hash table
+ * @hashtable: hashtable to be initialized
+ *
+ * Calculates the size of the hashtable from the given parameter, otherwise
+ * same as hash_init_size.
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_add - add an object to a hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add(hashtable, node, key) \
+ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_add_rcu - add an object to a rcu enabled hashtable
+ * @hashtable: hashtable to add to
+ * @node: the &struct hlist_node of the object to be added
+ * @key: the key of the object to be added
+ */
+#define hash_add_rcu(hashtable, node, key) \
+ hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
+
+/**
+ * hash_hashed - check whether an object is in any hashtable
+ * @node: the &struct hlist_node of the object to be checked
+ */
+static inline bool hash_hashed(struct hlist_node *node)
+{
+ return !hlist_unhashed(node);
+}
+
+static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
+{
+ unsigned int i;
+
+ for (i = 0; i < sz; i++)
+ if (!hlist_empty(&ht[i]))
+ return false;
+
+ return true;
+}
+
+/**
+ * hash_empty - check whether a hashtable is empty
+ * @hashtable: hashtable to check
+ *
+ * This has to be a macro since HASH_BITS() will not work on pointers since
+ * it calculates the size during preprocessing.
+ */
+#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
+
+/**
+ * hash_del - remove an object from a hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del(struct hlist_node *node)
+{
+ hlist_del_init(node);
+}
+
+/**
+ * hash_del_rcu - remove an object from a rcu enabled hashtable
+ * @node: &struct hlist_node of the object to remove
+ */
+static inline void hash_del_rcu(struct hlist_node *node)
+{
+ hlist_del_init_rcu(node);
+}
+
+/**
+ * hash_for_each - iterate over a hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_rcu - iterate over a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_rcu(name, bkt, node, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_rcu(obj, node, &name[bkt], member)
+
+/**
+ * hash_for_each_safe - iterate over a hashtable safe against removal of
+ * hash entry
+ * @name: hashtable to iterate
+ * @bkt: integer to use as bucket loop cursor
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @obj: the type * to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ */
+#define hash_for_each_safe(name, bkt, node, tmp, obj, member) \
+ for ((bkt) = 0, node = NULL; node == NULL && (bkt) < HASH_SIZE(name); (bkt)++)\
+ hlist_for_each_entry_safe(obj, node, tmp, &name[bkt], member)
+
+/**
+ * hash_for_each_possible - iterate over all possible objects hashing to the
+ * same bucket
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible(name, obj, node, member, key) \
+ hlist_for_each_entry(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_rcu - iterate over all possible objects hashing to the
+ * same bucket in an rcu enabled hashtable
+ * in a rcu enabled hashtable
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_rcu(name, obj, node, member, key) \
+ hlist_for_each_entry_rcu(obj, node, &name[hash_min(key, HASH_BITS(name))], member)
+
+/**
+ * hash_for_each_possible_safe - iterate over all possible objects hashing to the
+ * same bucket safe against removals
+ * @name: hashtable to iterate
+ * @obj: the type * to use as a loop cursor for each entry
+ * @node: the &struct list_head to use as a loop cursor for each entry
+ * @tmp: a &struct used for temporary storage
+ * @member: the name of the hlist_node within the struct
+ * @key: the key of the objects to iterate over
+ */
+#define hash_for_each_possible_safe(name, obj, node, tmp, member, key) \
+ hlist_for_each_entry_safe(obj, node, tmp, \
+ &name[hash_min(key, HASH_BITS(name))], member)
+
+
+#endif
*/
#define KVM_MEMSLOT_INVALID (1UL << 16)
-/*
- * If we support unaligned MMIO, at most one fragment will be split into two:
- */
-#ifdef KVM_UNALIGNED_MMIO
-# define KVM_EXTRA_MMIO_FRAGMENTS 1
-#else
-# define KVM_EXTRA_MMIO_FRAGMENTS 0
-#endif
-
-#define KVM_USER_MMIO_SIZE 8
-
-#define KVM_MAX_MMIO_FRAGMENTS \
- (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
+/* Two fragments for cross MMIO pages. */
+#define KVM_MAX_MMIO_FRAGMENTS 2
/*
* For the normal pfn, the highest 12 bits should be zero,
struct mutex mtx;
};
+#define light_mb() barrier()
+#define heavy_mb() synchronize_sched()
+
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
- rcu_read_lock();
+ rcu_read_lock_sched();
if (unlikely(p->locked)) {
- rcu_read_unlock();
+ rcu_read_unlock_sched();
mutex_lock(&p->mtx);
this_cpu_inc(*p->counters);
mutex_unlock(&p->mtx);
return;
}
this_cpu_inc(*p->counters);
- rcu_read_unlock();
+ rcu_read_unlock_sched();
+ light_mb(); /* A, between read of p->locked and read of data, paired with D */
}
static inline void percpu_up_read(struct percpu_rw_semaphore *p)
{
- /*
- * On X86, write operation in this_cpu_dec serves as a memory unlock
- * barrier (i.e. memory accesses may be moved before the write, but
- * no memory accesses are moved past the write).
- * On other architectures this may not be the case, so we need smp_mb()
- * there.
- */
-#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
- barrier();
-#else
- smp_mb();
-#endif
+ light_mb(); /* B, between read of the data and write to p->counter, paired with C */
this_cpu_dec(*p->counters);
}
{
mutex_lock(&p->mtx);
p->locked = true;
- synchronize_rcu();
+ synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
- smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
+ heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
}
static inline void percpu_up_write(struct percpu_rw_semaphore *p)
{
+ heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
p->locked = false;
mutex_unlock(&p->mtx);
}
-header-y += md_p.h
-header-y += md_u.h
+++ /dev/null
-/*
- md_p.h : physical layout of Linux RAID devices
- Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2, or (at your option)
- any later version.
-
- You should have received a copy of the GNU General Public License
- (for example /usr/src/linux/COPYING); if not, write to the Free
- Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
-
-#ifndef _MD_P_H
-#define _MD_P_H
-
-#include <linux/types.h>
-
-/*
- * RAID superblock.
- *
- * The RAID superblock maintains some statistics on each RAID configuration.
- * Each real device in the RAID set contains it near the end of the device.
- * Some of the ideas are copied from the ext2fs implementation.
- *
- * We currently use 4096 bytes as follows:
- *
- * word offset function
- *
- * 0 - 31 Constant generic RAID device information.
- * 32 - 63 Generic state information.
- * 64 - 127 Personality specific information.
- * 128 - 511 12 32-words descriptors of the disks in the raid set.
- * 512 - 911 Reserved.
- * 912 - 1023 Disk specific descriptor.
- */
-
-/*
- * If x is the real device size in bytes, we return an apparent size of:
- *
- * y = (x & ~(MD_RESERVED_BYTES - 1)) - MD_RESERVED_BYTES
- *
- * and place the 4kB superblock at offset y.
- */
-#define MD_RESERVED_BYTES (64 * 1024)
-#define MD_RESERVED_SECTORS (MD_RESERVED_BYTES / 512)
-
-#define MD_NEW_SIZE_SECTORS(x) ((x & ~(MD_RESERVED_SECTORS - 1)) - MD_RESERVED_SECTORS)
-
-#define MD_SB_BYTES 4096
-#define MD_SB_WORDS (MD_SB_BYTES / 4)
-#define MD_SB_SECTORS (MD_SB_BYTES / 512)
-
-/*
- * The following are counted in 32-bit words
- */
-#define MD_SB_GENERIC_OFFSET 0
-#define MD_SB_PERSONALITY_OFFSET 64
-#define MD_SB_DISKS_OFFSET 128
-#define MD_SB_DESCRIPTOR_OFFSET 992
-
-#define MD_SB_GENERIC_CONSTANT_WORDS 32
-#define MD_SB_GENERIC_STATE_WORDS 32
-#define MD_SB_GENERIC_WORDS (MD_SB_GENERIC_CONSTANT_WORDS + MD_SB_GENERIC_STATE_WORDS)
-#define MD_SB_PERSONALITY_WORDS 64
-#define MD_SB_DESCRIPTOR_WORDS 32
-#define MD_SB_DISKS 27
-#define MD_SB_DISKS_WORDS (MD_SB_DISKS*MD_SB_DESCRIPTOR_WORDS)
-#define MD_SB_RESERVED_WORDS (1024 - MD_SB_GENERIC_WORDS - MD_SB_PERSONALITY_WORDS - MD_SB_DISKS_WORDS - MD_SB_DESCRIPTOR_WORDS)
-#define MD_SB_EQUAL_WORDS (MD_SB_GENERIC_WORDS + MD_SB_PERSONALITY_WORDS + MD_SB_DISKS_WORDS)
-
-/*
- * Device "operational" state bits
- */
-#define MD_DISK_FAULTY 0 /* disk is faulty / operational */
-#define MD_DISK_ACTIVE 1 /* disk is running or spare disk */
-#define MD_DISK_SYNC 2 /* disk is in sync with the raid set */
-#define MD_DISK_REMOVED 3 /* disk is in sync with the raid set */
-
-#define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config.
- * read requests will only be sent here in
- * dire need
- */
-
-typedef struct mdp_device_descriptor_s {
- __u32 number; /* 0 Device number in the entire set */
- __u32 major; /* 1 Device major number */
- __u32 minor; /* 2 Device minor number */
- __u32 raid_disk; /* 3 The role of the device in the raid set */
- __u32 state; /* 4 Operational state */
- __u32 reserved[MD_SB_DESCRIPTOR_WORDS - 5];
-} mdp_disk_t;
-
-#define MD_SB_MAGIC 0xa92b4efc
-
-/*
- * Superblock state bits
- */
-#define MD_SB_CLEAN 0
-#define MD_SB_ERRORS 1
-
-#define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */
-
-/*
- * Notes:
- * - if an array is being reshaped (restriped) in order to change the
- * the number of active devices in the array, 'raid_disks' will be
- * the larger of the old and new numbers. 'delta_disks' will
- * be the "new - old". So if +ve, raid_disks is the new value, and
- * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the
- * old value and "raid_disks+delta_disks" is the new (smaller) value.
- */
-
-
-typedef struct mdp_superblock_s {
- /*
- * Constant generic information
- */
- __u32 md_magic; /* 0 MD identifier */
- __u32 major_version; /* 1 major version to which the set conforms */
- __u32 minor_version; /* 2 minor version ... */
- __u32 patch_version; /* 3 patchlevel version ... */
- __u32 gvalid_words; /* 4 Number of used words in this section */
- __u32 set_uuid0; /* 5 Raid set identifier */
- __u32 ctime; /* 6 Creation time */
- __u32 level; /* 7 Raid personality */
- __u32 size; /* 8 Apparent size of each individual disk */
- __u32 nr_disks; /* 9 total disks in the raid set */
- __u32 raid_disks; /* 10 disks in a fully functional raid set */
- __u32 md_minor; /* 11 preferred MD minor device number */
- __u32 not_persistent; /* 12 does it have a persistent superblock */
- __u32 set_uuid1; /* 13 Raid set identifier #2 */
- __u32 set_uuid2; /* 14 Raid set identifier #3 */
- __u32 set_uuid3; /* 15 Raid set identifier #4 */
- __u32 gstate_creserved[MD_SB_GENERIC_CONSTANT_WORDS - 16];
-
- /*
- * Generic state information
- */
- __u32 utime; /* 0 Superblock update time */
- __u32 state; /* 1 State bits (clean, ...) */
- __u32 active_disks; /* 2 Number of currently active disks */
- __u32 working_disks; /* 3 Number of working disks */
- __u32 failed_disks; /* 4 Number of failed disks */
- __u32 spare_disks; /* 5 Number of spare disks */
- __u32 sb_csum; /* 6 checksum of the whole superblock */
-#ifdef __BIG_ENDIAN
- __u32 events_hi; /* 7 high-order of superblock update count */
- __u32 events_lo; /* 8 low-order of superblock update count */
- __u32 cp_events_hi; /* 9 high-order of checkpoint update count */
- __u32 cp_events_lo; /* 10 low-order of checkpoint update count */
-#else
- __u32 events_lo; /* 7 low-order of superblock update count */
- __u32 events_hi; /* 8 high-order of superblock update count */
- __u32 cp_events_lo; /* 9 low-order of checkpoint update count */
- __u32 cp_events_hi; /* 10 high-order of checkpoint update count */
-#endif
- __u32 recovery_cp; /* 11 recovery checkpoint sector count */
- /* There are only valid for minor_version > 90 */
- __u64 reshape_position; /* 12,13 next address in array-space for reshape */
- __u32 new_level; /* 14 new level we are reshaping to */
- __u32 delta_disks; /* 15 change in number of raid_disks */
- __u32 new_layout; /* 16 new layout */
- __u32 new_chunk; /* 17 new chunk size (bytes) */
- __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18];
-
- /*
- * Personality information
- */
- __u32 layout; /* 0 the array's physical layout */
- __u32 chunk_size; /* 1 chunk size in bytes */
- __u32 root_pv; /* 2 LV root PV */
- __u32 root_block; /* 3 LV root block */
- __u32 pstate_reserved[MD_SB_PERSONALITY_WORDS - 4];
-
- /*
- * Disks information
- */
- mdp_disk_t disks[MD_SB_DISKS];
-
- /*
- * Reserved
- */
- __u32 reserved[MD_SB_RESERVED_WORDS];
-
- /*
- * Active descriptor
- */
- mdp_disk_t this_disk;
-
-} mdp_super_t;
-
-static inline __u64 md_event(mdp_super_t *sb) {
- __u64 ev = sb->events_hi;
- return (ev<<32)| sb->events_lo;
-}
-
-#define MD_SUPERBLOCK_1_TIME_SEC_MASK ((1ULL<<40) - 1)
-
-/*
- * The version-1 superblock :
- * All numeric fields are little-endian.
- *
- * total size: 256 bytes plus 2 per device.
- * 1K allows 384 devices.
- */
-struct mdp_superblock_1 {
- /* constant array information - 128 bytes */
- __le32 magic; /* MD_SB_MAGIC: 0xa92b4efc - little endian */
- __le32 major_version; /* 1 */
- __le32 feature_map; /* bit 0 set if 'bitmap_offset' is meaningful */
- __le32 pad0; /* always set to 0 when writing */
-
- __u8 set_uuid[16]; /* user-space generated. */
- char set_name[32]; /* set and interpreted by user-space */
-
- __le64 ctime; /* lo 40 bits are seconds, top 24 are microseconds or 0*/
- __le32 level; /* -4 (multipath), -1 (linear), 0,1,4,5 */
- __le32 layout; /* only for raid5 and raid10 currently */
- __le64 size; /* used size of component devices, in 512byte sectors */
-
- __le32 chunksize; /* in 512byte sectors */
- __le32 raid_disks;
- __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts
- * NOTE: signed, so bitmap can be before superblock
- * only meaningful of feature_map[0] is set.
- */
-
- /* These are only valid with feature bit '4' */
- __le32 new_level; /* new level we are reshaping to */
- __le64 reshape_position; /* next address in array-space for reshape */
- __le32 delta_disks; /* change in number of raid_disks */
- __le32 new_layout; /* new layout */
- __le32 new_chunk; /* new chunk size (512byte sectors) */
- __le32 new_offset; /* signed number to add to data_offset in new
- * layout. 0 == no-change. This can be
- * different on each device in the array.
- */
-
- /* constant this-device information - 64 bytes */
- __le64 data_offset; /* sector start of data, often 0 */
- __le64 data_size; /* sectors in this device that can be used for data */
- __le64 super_offset; /* sector start of this superblock */
- __le64 recovery_offset;/* sectors before this offset (from data_offset) have been recovered */
- __le32 dev_number; /* permanent identifier of this device - not role in raid */
- __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */
- __u8 device_uuid[16]; /* user-space setable, ignored by kernel */
- __u8 devflags; /* per-device flags. Only one defined...*/
-#define WriteMostly1 1 /* mask for writemostly flag in above */
- /* Bad block log. If there are any bad blocks the feature flag is set.
- * If offset and size are non-zero, that space is reserved and available
- */
- __u8 bblog_shift; /* shift from sectors to block size */
- __le16 bblog_size; /* number of sectors reserved for list */
- __le32 bblog_offset; /* sector offset from superblock to bblog,
- * signed - not unsigned */
-
- /* array state information - 64 bytes */
- __le64 utime; /* 40 bits second, 24 bits microseconds */
- __le64 events; /* incremented when superblock updated */
- __le64 resync_offset; /* data before this offset (from data_offset) known to be in sync */
- __le32 sb_csum; /* checksum up to devs[max_dev] */
- __le32 max_dev; /* size of devs[] array to consider */
- __u8 pad3[64-32]; /* set to 0 when writing */
-
- /* device state information. Indexed by dev_number.
- * 2 bytes per device
- * Note there are no per-device state flags. State information is rolled
- * into the 'roles' value. If a device is spare or faulty, then it doesn't
- * have a meaningful role.
- */
- __le16 dev_roles[0]; /* role in array, or 0xffff for a spare, or 0xfffe for faulty */
-};
-
-/* feature_map bits */
-#define MD_FEATURE_BITMAP_OFFSET 1
-#define MD_FEATURE_RECOVERY_OFFSET 2 /* recovery_offset is present and
- * must be honoured
- */
-#define MD_FEATURE_RESHAPE_ACTIVE 4
-#define MD_FEATURE_BAD_BLOCKS 8 /* badblock list is not empty */
-#define MD_FEATURE_REPLACEMENT 16 /* This device is replacing an
- * active device with same 'role'.
- * 'recovery_offset' is also set.
- */
-#define MD_FEATURE_RESHAPE_BACKWARDS 32 /* Reshape doesn't change number
- * of devices, but is going
- * backwards anyway.
- */
-#define MD_FEATURE_NEW_OFFSET 64 /* new_offset must be honoured */
-#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
- |MD_FEATURE_RECOVERY_OFFSET \
- |MD_FEATURE_RESHAPE_ACTIVE \
- |MD_FEATURE_BAD_BLOCKS \
- |MD_FEATURE_REPLACEMENT \
- |MD_FEATURE_RESHAPE_BACKWARDS \
- |MD_FEATURE_NEW_OFFSET \
- )
-
-#endif
(for example /usr/src/linux/COPYING); if not, write to the Free
Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-
#ifndef _MD_U_H
#define _MD_U_H
-/*
- * Different major versions are not compatible.
- * Different minor versions are only downward compatible.
- * Different patchlevel versions are downward and upward compatible.
- */
-#define MD_MAJOR_VERSION 0
-#define MD_MINOR_VERSION 90
-/*
- * MD_PATCHLEVEL_VERSION indicates kernel functionality.
- * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
- * and major_version/minor_version accordingly
- * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
- * in the super status byte
- * >=3 means that bitmap superblock version 4 is supported, which uses
- * little-ending representation rather than host-endian
- */
-#define MD_PATCHLEVEL_VERSION 3
-
-/* ioctls */
-
-/* status */
-#define RAID_VERSION _IOR (MD_MAJOR, 0x10, mdu_version_t)
-#define GET_ARRAY_INFO _IOR (MD_MAJOR, 0x11, mdu_array_info_t)
-#define GET_DISK_INFO _IOR (MD_MAJOR, 0x12, mdu_disk_info_t)