F: net/ieee802154/
F: drivers/ieee802154/
+IIO SUBSYSTEM AND DRIVERS
+M: Jonathan Cameron <jic23@cam.ac.uk>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: drivers/staging/iio/
+
IKANOS/ADI EAGLE ADSL USB DRIVER
M: Matthieu Castet <castet.matthieu@free.fr>
M: Stanislaw Gruszka <stf_xl@wp.pl>
{
struct thread_info *thread = current_thread_info();
int ret;
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
oops_enter();
console_verbose();
bust_spinlocks(1);
if (!user_mode(regs))
- report_bug(regs->ARM_pc, regs);
+ bug_type = report_bug(regs->ARM_pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
#include <asm/page.h>
#define PROC_INFO \
+ . = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
.default_device = &sdp4430_lcd_device,
};
-static void omap_4430sdp_display_init(void)
+static void __init omap_4430sdp_display_init(void)
{
int r;
#define board_mux NULL
#endif
-static void omap4_sdp4430_wifi_mux_init(void)
+static void __init omap4_sdp4430_wifi_mux_init(void)
{
omap_mux_init_gpio(GPIO_WIFI_IRQ, OMAP_PIN_INPUT |
OMAP_PIN_OFF_WAKEUPENABLE);
.board_tcxo_clock = WL12XX_TCXOCLOCK_26,
};
-static void omap4_sdp4430_wifi_init(void)
+static void __init omap4_sdp4430_wifi_init(void)
{
+ int ret;
+
omap4_sdp4430_wifi_mux_init();
- if (wl12xx_set_platform_data(&omap4_sdp4430_wlan_data))
- pr_err("Error setting wl12xx data\n");
- platform_device_register(&omap_vwlan_device);
+ ret = wl12xx_set_platform_data(&omap4_sdp4430_wlan_data);
+ if (ret)
+ pr_err("Error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap_vwlan_device);
+ if (ret)
+ pr_err("Error registering wl12xx device: %d\n", ret);
}
static void __init omap_4430sdp_init(void)
{ OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
};
+static void __init omap3_evm_wl12xx_init(void)
+{
+#ifdef CONFIG_WL12XX_PLATFORM_DATA
+ int ret;
+
+ /* WL12xx WLAN Init */
+ ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
+ ret = platform_device_register(&omap3evm_wlan_regulator);
+ if (ret)
+ pr_err("error registering wl12xx device: %d\n", ret);
+#endif
+}
+
static void __init omap3_evm_init(void)
{
omap3_evm_get_revision();
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x();
omap3_evm_display_init();
-
-#ifdef CONFIG_WL12XX_PLATFORM_DATA
- /* WL12xx WLAN Init */
- if (wl12xx_set_platform_data(&omap3evm_wlan_data))
- pr_err("error setting wl12xx data\n");
- platform_device_register(&omap3evm_wlan_regulator);
-#endif
+ omap3_evm_wl12xx_init();
}
MACHINE_START(OMAP3EVM, "OMAP3 EVM")
static void __init omap4_panda_init(void)
{
int package = OMAP_PACKAGE_CBS;
+ int ret;
if (omap_rev() == OMAP4430_REV_ES1_0)
package = OMAP_PACKAGE_CBL;
omap4_mux_init(board_mux, NULL, package);
- if (wl12xx_set_platform_data(&omap_panda_wlan_data))
- pr_err("error setting wl12xx data\n");
+ ret = wl12xx_set_platform_data(&omap_panda_wlan_data);
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap4_panda_i2c_init();
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
void __init zoom_peripherals_init(void)
{
- if (wl12xx_set_platform_data(&omap_zoom_wlan_data))
- pr_err("error setting wl12xx data\n");
+ int ret = wl12xx_set_platform_data(&omap_zoom_wlan_data);
+
+ if (ret)
+ pr_err("error setting wl12xx data: %d\n", ret);
omap_i2c_init();
platform_device_register(&omap_vwlan_device);
}
}
-static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
- struct omap_mmc_platform_data *mmc)
+static int omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
+ struct omap_mmc_platform_data *mmc)
{
char *hc_name;
#define MAX_OMAP_MMC_HWMOD_NAME_LEN 16
-void __init omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
+void omap_init_hsmmc(struct omap2_hsmmc_info *hsmmcinfo, int ctrl_nr)
{
struct omap_hwmod *oh;
struct platform_device *pdev;
kfree(mmc_data);
}
-void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
+void omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
u32 reg;
static char *omap_mux_options;
-static int __init _omap_mux_init_gpio(struct omap_mux_partition *partition,
- int gpio, int val)
+static int _omap_mux_init_gpio(struct omap_mux_partition *partition,
+ int gpio, int val)
{
struct omap_mux_entry *e;
struct omap_mux *gpio_mux = NULL;
return 0;
}
-int __init omap_mux_init_gpio(int gpio, int val)
+int omap_mux_init_gpio(int gpio, int val)
{
struct omap_mux_partition *partition;
int ret;
return -ENODEV;
}
-static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
- const char *muxname,
- struct omap_mux **found_mux)
+static int _omap_mux_get_by_name(struct omap_mux_partition *partition,
+ const char *muxname,
+ struct omap_mux **found_mux)
{
struct omap_mux *mux = NULL;
struct omap_mux_entry *e;
return -ENODEV;
}
-int __init omap_mux_init_signal(const char *muxname, int val)
+int omap_mux_init_signal(const char *muxname, int val)
{
struct omap_mux_partition *partition = NULL;
struct omap_mux *mux = NULL;
omap_mux_package_init_balls(package_balls, superset);
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
omap_mux_set_cmdline_signals();
omap_mux_write_array(partition, board_mux);
{
}
-static void omap_mux_init_signals(struct omap_mux_partition *partition,
- struct omap_board_mux *board_mux)
+static void __init omap_mux_init_signals(struct omap_mux_partition *partition,
+ struct omap_board_mux *board_mux)
{
}
#include <linux/linkage.h>
#include <linux/init.h>
+ __CPUINIT
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_DISABLED) {
- WARN(1, "omap_hwmod: %s: enabled state can only be entered "
- "from initialized, idle, or disabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: enabled state can only be entered from initialized, idle, or disabled state\n",
+ oh->name);
return -EINVAL;
}
pr_debug("omap_hwmod: %s: idling\n", oh->name);
if (oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: idle state can only be entered from "
- "enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: idle state can only be entered from enabled state\n",
+ oh->name);
return -EINVAL;
}
if (oh->_state != _HWMOD_STATE_IDLE &&
oh->_state != _HWMOD_STATE_ENABLED) {
- WARN(1, "omap_hwmod: %s: disabled state can only be entered "
- "from idle, or enabled state\n", oh->name);
+ WARN(1, "omap_hwmod: %s: disabled state can only be entered from idle, or enabled state\n",
+ oh->name);
return -EINVAL;
}
BUG_ON(!oh);
if (!oh->class->sysc || !oh->class->sysc->sysc_flags) {
- WARN(1, "omap_device: %s: OCP barrier impossible due to "
- "device configuration\n", oh->name);
+ WARN(1, "omap_device: %s: OCP barrier impossible due to device configuration\n",
+ oh->name);
return;
}
#include "common.h"
#include <plat/cpu.h>
+#include <plat/irqs.h>
#include <plat/prcm.h>
#include "vp.h"
omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
}
-static void omap_uart_set_forceidle(struct platform_device *pdev)
+static void omap_uart_set_smartidle(struct platform_device *pdev)
{
struct omap_device *od = to_omap_device(pdev);
- omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_FORCE);
+ omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART);
}
#else
static void omap_uart_enable_wakeup(struct platform_device *pdev, bool enable)
{}
static void omap_uart_set_noidle(struct platform_device *pdev) {}
-static void omap_uart_set_forceidle(struct platform_device *pdev) {}
+static void omap_uart_set_smartidle(struct platform_device *pdev) {}
#endif /* CONFIG_PM */
#ifdef CONFIG_OMAP_MUX
omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
omap_up.flags = UPF_BOOT_AUTOCONF;
omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
- omap_up.set_forceidle = omap_uart_set_forceidle;
+ omap_up.set_forceidle = omap_uart_set_smartidle;
omap_up.set_noidle = omap_uart_set_noidle;
omap_up.enable_wakeup = omap_uart_enable_wakeup;
omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
* omap_vc_i2c_init - initialize I2C interface to PMIC
* @voltdm: voltage domain containing VC data
*
- * Use PMIC supplied seetings for I2C high-speed mode and
+ * Use PMIC supplied settings for I2C high-speed mode and
* master code (if set) and program the VC I2C configuration
* register.
*
if (initialized) {
if (voltdm->pmic->i2c_high_speed != i2c_high_speed)
- pr_warn("%s: I2C config for all channels must match.",
- __func__);
+ pr_warn("%s: I2C config for vdd_%s does not match other channels (%u).",
+ __func__, voltdm->name, i2c_high_speed);
return;
}
u32 val;
if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
- pr_err("%s: PMIC info requried to configure vc for"
- "vdd_%s not populated.Hence cannot initialize vc\n",
- __func__, voltdm->name);
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
return;
}
u32 val, sys_clk_rate, timeout, waittime;
u32 vddmin, vddmax, vstepmin, vstepmax;
+ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) {
+ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name);
+ return;
+ }
+
if (!voltdm->read || !voltdm->write) {
pr_err("%s: No read/write API for accessing vdd_%s regs\n",
__func__, voltdm->name);
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
+#ifdef CONFIG_PREEMPT
+ save_and_disable_irqs r9 @ make cssr&csidr read atomic
+#endif
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
+#ifdef CONFIG_PREEMPT
+ restore_irqs_notrace r9
+#endif
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
extern void fpu_init(void);
extern void mxcsr_feature_mask_init(void);
extern int init_fpu(struct task_struct *child);
-extern asmlinkage void math_state_restore(void);
+extern void math_state_restore(void);
extern void __math_state_restore(void);
extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
}
}
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * We can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: TS_USEDFPU must be clear (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ */
+static inline bool interrupted_kernel_fpu_idle(void)
+{
+ return !(current_thread_info()->status & TS_USEDFPU) &&
+ (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static inline bool interrupted_user_mode(void)
+{
+ struct pt_regs *regs = get_irq_regs();
+ return regs && user_mode_vm(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+static inline bool irq_fpu_usable(void)
+{
+ return !in_interrupt() ||
+ interrupted_user_mode() ||
+ interrupted_kernel_fpu_idle();
+}
+
static inline void kernel_fpu_begin(void)
{
struct thread_info *me = current_thread_info();
+
+ WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
if (me->status & TS_USEDFPU)
__save_init_fpu(me->task);
preempt_enable();
}
-static inline bool irq_fpu_usable(void)
-{
- struct pt_regs *regs;
-
- return !in_interrupt() || !(regs = get_irq_regs()) || \
- user_mode(regs) || (read_cr0() & X86_CR0_TS);
-}
-
/*
* Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions
*/
static inline void save_init_fpu(struct task_struct *tsk)
{
+ WARN_ON_ONCE(task_thread_info(tsk)->status & TS_USEDFPU);
preempt_disable();
__save_init_fpu(tsk);
stts();
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
cpuc->pebs_enabled |= 1ULL << hwc->idx;
- WARN_ON_ONCE(cpuc->enabled);
if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1)
intel_pmu_lbr_enable(event);
if (!x86_pmu.lbr_nr)
return;
- WARN_ON_ONCE(cpuc->enabled);
-
/*
* Reset the LBR stack if we changed task context to
* avoid data leaks.
* Careful.. There are problems with IBM-designed IRQ13 behaviour.
* Don't touch unless you *really* know how it works.
*
- * Must be called with kernel preemption disabled (in this case,
- * local interrupts are disabled at the call-site in entry.S).
+ * Must be called with kernel preemption disabled (eg with local
+ * local interrupts as in the case of do_device_not_available).
*/
-asmlinkage void math_state_restore(void)
+void math_state_restore(void)
{
struct thread_info *thread = current_thread_info();
struct task_struct *tsk = thread->task;
dotraplinkage void __kprobes
do_device_not_available(struct pt_regs *regs, long error_code)
{
+ WARN_ON_ONCE(!user_mode_vm(regs));
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
struct math_emu_info info = { };
ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
if (ioc) {
ioc_cgroup_changed(ioc);
- put_io_context(ioc, NULL);
+ put_io_context(ioc);
}
}
}
if (rq->cmd_flags & REQ_ELVPRIV) {
elv_put_request(q, rq);
if (rq->elv.icq)
- put_io_context(rq->elv.icq->ioc, q);
+ put_io_context(rq->elv.icq->ioc);
}
mempool_free(rq, q->rq.rq_pool);
spin_unlock_irq(q->queue_lock);
/* create icq if missing */
- if (unlikely(et->icq_cache && !icq))
+ if ((rw_flags & REQ_ELVPRIV) && unlikely(et->icq_cache && !icq)) {
icq = ioc_create_icq(q, gfp_mask);
+ if (!icq)
+ goto fail_icq;
+ }
- /* rqs are guaranteed to have icq on elv_set_request() if requested */
- if (likely(!et->icq_cache || icq))
- rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+ rq = blk_alloc_request(q, icq, rw_flags, gfp_mask);
+fail_icq:
if (unlikely(!rq)) {
/*
* Allocation failed presumably due to memory. Undo anything
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
return true;
}
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
drive_stat_acct(req, 0);
- elv_bio_merged(q, req, bio);
return true;
}
* on %current's plugged list. Returns %true if merge was successful,
* otherwise %false.
*
- * This function is called without @q->queue_lock; however, elevator is
- * accessed iff there already are requests on the plugged list which in
- * turn guarantees validity of the elevator.
- *
- * Note that, on successful merge, elevator operation
- * elevator_bio_merged_fn() will be called without queue lock. Elevator
- * must be ready for this.
+ * Plugging coalesces IOs from the same issuer for the same purpose without
+ * going through @q->queue_lock. As such it's more of an issuing mechanism
+ * than scheduling, and the request, while may have elvpriv data, is not
+ * added on the elevator at this point. In addition, we don't have
+ * reliable access to the elevator outside queue lock. Only check basic
+ * merging parameters without querying the elevator.
*/
static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
unsigned int *request_count)
(*request_count)++;
- if (rq->q != q)
+ if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
- el_ret = elv_try_merge(rq, bio);
+ el_ret = blk_try_merge(rq, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
ret = bio_attempt_back_merge(q, rq, bio);
if (ret)
el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
if (bio_attempt_back_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
if (bio_attempt_front_merge(q, req, bio)) {
+ elv_bio_merged(q, req, bio);
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
EXPORT_SYMBOL(get_io_context);
-/*
- * Releasing ioc may nest into another put_io_context() leading to nested
- * fast path release. As the ioc's can't be the same, this is okay but
- * makes lockdep whine. Keep track of nesting and use it as subclass.
- */
-#ifdef CONFIG_LOCKDEP
-#define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0)
-#define ioc_release_depth_inc(q) (q)->ioc_release_depth++
-#define ioc_release_depth_dec(q) (q)->ioc_release_depth--
-#else
-#define ioc_release_depth(q) 0
-#define ioc_release_depth_inc(q) do { } while (0)
-#define ioc_release_depth_dec(q) do { } while (0)
-#endif
-
static void icq_free_icq_rcu(struct rcu_head *head)
{
struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
if (rcu_dereference_raw(ioc->icq_hint) == icq)
rcu_assign_pointer(ioc->icq_hint, NULL);
- if (et->ops.elevator_exit_icq_fn) {
- ioc_release_depth_inc(q);
+ if (et->ops.elevator_exit_icq_fn)
et->ops.elevator_exit_icq_fn(icq);
- ioc_release_depth_dec(q);
- }
/*
* @icq->q might have gone away by the time RCU callback runs
struct io_context *ioc = container_of(work, struct io_context,
release_work);
struct request_queue *last_q = NULL;
+ unsigned long flags;
- spin_lock_irq(&ioc->lock);
+ /*
+ * Exiting icq may call into put_io_context() through elevator
+ * which will trigger lockdep warning. The ioc's are guaranteed to
+ * be different, use a different locking subclass here. Use
+ * irqsave variant as there's no spin_lock_irq_nested().
+ */
+ spin_lock_irqsave_nested(&ioc->lock, flags, 1);
while (!hlist_empty(&ioc->icq_list)) {
struct io_cq *icq = hlist_entry(ioc->icq_list.first,
*/
if (last_q) {
spin_unlock(last_q->queue_lock);
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q);
} else {
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
last_q = this_q;
- spin_lock_irq(this_q->queue_lock);
- spin_lock(&ioc->lock);
+ spin_lock_irqsave(this_q->queue_lock, flags);
+ spin_lock_nested(&ioc->lock, 1);
continue;
}
ioc_exit_icq(icq);
if (last_q) {
spin_unlock(last_q->queue_lock);
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
blk_put_queue(last_q);
} else {
- spin_unlock_irq(&ioc->lock);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
kmem_cache_free(iocontext_cachep, ioc);
/**
* put_io_context - put a reference of io_context
* @ioc: io_context to put
- * @locked_q: request_queue the caller is holding queue_lock of (hint)
*
* Decrement reference count of @ioc and release it if the count reaches
- * zero. If the caller is holding queue_lock of a queue, it can indicate
- * that with @locked_q. This is an optimization hint and the caller is
- * allowed to pass in %NULL even when it's holding a queue_lock.
+ * zero.
*/
-void put_io_context(struct io_context *ioc, struct request_queue *locked_q)
+void put_io_context(struct io_context *ioc)
{
- struct request_queue *last_q = locked_q;
unsigned long flags;
if (ioc == NULL)
return;
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
- if (locked_q)
- lockdep_assert_held(locked_q->queue_lock);
-
- if (!atomic_long_dec_and_test(&ioc->refcount))
- return;
/*
- * Destroy @ioc. This is a bit messy because icq's are chained
- * from both ioc and queue, and ioc->lock nests inside queue_lock.
- * The inner ioc->lock should be held to walk our icq_list and then
- * for each icq the outer matching queue_lock should be grabbed.
- * ie. We need to do reverse-order double lock dancing.
- *
- * Another twist is that we are often called with one of the
- * matching queue_locks held as indicated by @locked_q, which
- * prevents performing double-lock dance for other queues.
- *
- * So, we do it in two stages. The fast path uses the queue_lock
- * the caller is holding and, if other queues need to be accessed,
- * uses trylock to avoid introducing locking dependency. This can
- * handle most cases, especially if @ioc was performing IO on only
- * single device.
- *
- * If trylock doesn't cut it, we defer to @ioc->release_work which
- * can do all the double-locking dancing.
+ * Releasing ioc requires reverse order double locking and we may
+ * already be holding a queue_lock. Do it asynchronously from wq.
*/
- spin_lock_irqsave_nested(&ioc->lock, flags,
- ioc_release_depth(locked_q));
-
- while (!hlist_empty(&ioc->icq_list)) {
- struct io_cq *icq = hlist_entry(ioc->icq_list.first,
- struct io_cq, ioc_node);
- struct request_queue *this_q = icq->q;
-
- if (this_q != last_q) {
- if (last_q && last_q != locked_q)
- spin_unlock(last_q->queue_lock);
- last_q = NULL;
-
- if (!spin_trylock(this_q->queue_lock))
- break;
- last_q = this_q;
- continue;
- }
- ioc_exit_icq(icq);
+ if (atomic_long_dec_and_test(&ioc->refcount)) {
+ spin_lock_irqsave(&ioc->lock, flags);
+ if (!hlist_empty(&ioc->icq_list))
+ schedule_work(&ioc->release_work);
+ spin_unlock_irqrestore(&ioc->lock, flags);
}
-
- if (last_q && last_q != locked_q)
- spin_unlock(last_q->queue_lock);
-
- spin_unlock_irqrestore(&ioc->lock, flags);
-
- /* if no icq is left, we're done; otherwise, kick release_work */
- if (hlist_empty(&ioc->icq_list))
- kmem_cache_free(iocontext_cachep, ioc);
- else
- schedule_work(&ioc->release_work);
}
EXPORT_SYMBOL(put_io_context);
task_unlock(task);
atomic_dec(&ioc->nr_tasks);
- put_io_context(ioc, NULL);
+ put_io_context(ioc);
}
/**
{
return attempt_merge(q, rq, next);
}
+
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+ if (!rq_mergeable(rq))
+ return false;
+
+ /* don't merge file system requests and discard requests */
+ if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
+ return false;
+
+ /* don't merge discard requests and secure discard requests */
+ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ return false;
+
+ /* different data direction or already started, don't merge */
+ if (bio_data_dir(bio) != rq_data_dir(rq))
+ return false;
+
+ /* must be same device and not a special request */
+ if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
+ return false;
+
+ /* only merge integrity protected bio into ditto rq */
+ if (bio_integrity(bio) != blk_integrity_rq(rq))
+ return false;
+
+ return true;
+}
+
+int blk_try_merge(struct request *rq, struct bio *bio)
+{
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector)
+ return ELEVATOR_BACK_MERGE;
+ else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector)
+ return ELEVATOR_FRONT_MERGE;
+ return ELEVATOR_NO_MERGE;
+}
struct request *next);
void blk_recalc_rq_segments(struct request *rq);
void blk_rq_set_mixed_merge(struct request *rq);
+bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
+int blk_try_merge(struct request *rq, struct bio *bio);
void blk_queue_congestion_threshold(struct request_queue *q);
mutex_lock(&bsg_mutex);
idr_remove(&bsg_minor_idr, bcd->minor);
- sysfs_remove_link(&q->kobj, "bsg");
+ if (q->kobj.sd)
+ sysfs_remove_link(&q->kobj, "bsg");
device_unregister(bcd->class_dev);
bcd->class_dev = NULL;
kref_put(&bcd->ref, bsg_kref_release_function);
/*
* Lookup the cfqq that this bio will be queued with and allow
- * merge only if rq is queued there. This function can be called
- * from plug merge without queue_lock. In such cases, ioc of @rq
- * and %current are guaranteed to be equal. Avoid lookup which
- * requires queue_lock by using @rq's cic.
+ * merge only if rq is queued there.
*/
- if (current->io_context == RQ_CIC(rq)->icq.ioc) {
- cic = RQ_CIC(rq);
- } else {
- cic = cfq_cic_lookup(cfqd, current->io_context);
- if (!cic)
- return false;
- }
+ cic = cfq_cic_lookup(cfqd, current->io_context);
+ if (!cic)
+ return false;
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
return cfqq == RQ_CFQQ(rq);
cfqd->active_queue = NULL;
if (cfqd->active_cic) {
- put_io_context(cfqd->active_cic->icq.ioc, cfqd->queue);
+ put_io_context(cfqd->active_cic->icq.ioc);
cfqd->active_cic = NULL;
}
}
*/
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
+ enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
+
cfq_log_cfqq(cfqd, cfqq, "preempt");
+ cfq_slice_expired(cfqd, 1);
/*
* workload type is changed, don't save slice, otherwise preempt
* doesn't happen
*/
- if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq))
+ if (old_type != cfqq_type(cfqq))
cfqq->cfqg->saved_workload_slice = 0;
- cfq_slice_expired(cfqd, 1);
-
/*
* Put the new queue at the front of the of the current list,
* so we know that it will be selected next.
/*
* can we safely merge with this request?
*/
-int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
{
- if (!rq_mergeable(rq))
- return 0;
-
- /*
- * Don't merge file system requests and discard requests
- */
- if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
- return 0;
-
- /*
- * Don't merge discard requests and secure discard requests
- */
- if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
- return 0;
-
- /*
- * different data direction or already started, don't merge
- */
- if (bio_data_dir(bio) != rq_data_dir(rq))
- return 0;
-
- /*
- * must be same device and not a special request
- */
- if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
- return 0;
-
- /*
- * only merge integrity protected bio into ditto rq
- */
- if (bio_integrity(bio) != blk_integrity_rq(rq))
+ if (!blk_rq_merge_ok(rq, bio))
return 0;
if (!elv_iosched_allow_merge(rq, bio))
}
EXPORT_SYMBOL(elv_rq_merge_ok);
-int elv_try_merge(struct request *__rq, struct bio *bio)
-{
- int ret = ELEVATOR_NO_MERGE;
-
- /*
- * we can merge and sequence is ok, check if it's possible
- */
- if (elv_rq_merge_ok(__rq, bio)) {
- if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
- ret = ELEVATOR_BACK_MERGE;
- else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
- ret = ELEVATOR_FRONT_MERGE;
- }
-
- return ret;
-}
-
static struct elevator_type *elevator_find(const char *name)
{
struct elevator_type *e;
/*
* First try one-hit cache.
*/
- if (q->last_merge) {
- ret = elv_try_merge(q->last_merge, bio);
+ if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
+ ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
}
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+static void cpu_device_release(struct device *dev)
+{
+ /*
+ * This is an empty function to prevent the driver core from spitting a
+ * warning at us. Yes, I know this is directly opposite of what the
+ * documentation for the driver core and kobjects say, and the author
+ * of this code has already been publically ridiculed for doing
+ * something as foolish as this. However, at this point in time, it is
+ * the only way to handle the issue of statically allocated cpu
+ * devices. The different architectures will have their cpu device
+ * code reworked to properly handle this in the near future, so this
+ * function will then be changed to correctly free up the memory held
+ * by the cpu device.
+ *
+ * Never copy this way of doing things, or you too will be made fun of
+ * on the linux-kerenl list, you have been warned.
+ */
+}
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
int error;
cpu->node_id = cpu_to_node(num);
+ memset(&cpu->dev, 0x00, sizeof(struct device));
cpu->dev.id = num;
cpu->dev.bus = &cpu_subsys;
+ cpu->dev.release = cpu_device_release;
error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
}
static int add_memory_section(int nid, struct mem_section *section,
+ struct memory_block **mem_p,
unsigned long state, enum mem_add_context context)
{
- struct memory_block *mem;
+ struct memory_block *mem = NULL;
+ int scn_nr = __section_nr(section);
int ret = 0;
mutex_lock(&mem_sysfs_mutex);
- mem = find_memory_block(section);
+ if (context == BOOT) {
+ /* same memory block ? */
+ if (mem_p && *mem_p)
+ if (scn_nr >= (*mem_p)->start_section_nr &&
+ scn_nr <= (*mem_p)->end_section_nr) {
+ mem = *mem_p;
+ kobject_get(&mem->dev.kobj);
+ }
+ } else
+ mem = find_memory_block(section);
+
if (mem) {
mem->section_count++;
kobject_put(&mem->dev.kobj);
- } else
+ } else {
ret = init_memory_block(&mem, section, state);
+ /* store memory_block pointer for next loop */
+ if (!ret && context == BOOT)
+ if (mem_p)
+ *mem_p = mem;
+ }
if (!ret) {
if (context == HOTPLUG &&
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG);
+ return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
}
int unregister_memory_section(struct mem_section *section)
int ret;
int err;
unsigned long block_sz;
+ struct memory_block *mem = NULL;
ret = subsys_system_register(&memory_subsys, NULL);
if (ret)
for (i = 0; i < NR_MEM_SECTIONS; i++) {
if (!present_section_nr(i))
continue;
- err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE,
+ /* don't need to reuse memory_block if only one per block */
+ err = add_memory_section(0, __nr_to_section(i),
+ (sections_per_block == 1) ? NULL : &mem,
+ MEM_ONLINE,
BOOT);
if (!ret)
ret = err;
if (!present_section_nr(section_nr))
continue;
mem_sect = __nr_to_section(section_nr);
+
+ /* same memblock ? */
+ if (mem_blk)
+ if ((section_nr >= mem_blk->start_section_nr) &&
+ (section_nr <= mem_blk->end_section_nr))
+ continue;
+
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
+
ret = register_mem_sect_under_node(mem_blk, nid);
if (!err)
err = ret;
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
pr_err("No SPROM available\n");
- } else if (err) {
+ } else if (err)
pr_err("Failed to get SPROM: %d\n", err);
- return -ENOENT;
- }
/* Register found cores */
bcma_register_cores(bus);
core->bus = bus;
err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
- if (err == -ENODEV) {
- core_num++;
- continue;
- } else if (err == -ENXIO)
- continue;
- else if (err == -ESPIPE)
- break;
- else if (err < 0)
+ if (err < 0) {
+ kfree(core);
+ if (err == -ENODEV) {
+ core_num++;
+ continue;
+ } else if (err == -ENXIO) {
+ continue;
+ } else if (err == -ESPIPE) {
+ break;
+ }
return err;
+ }
core->core_index = core_num++;
bus->nr_cores++;
out_put_disk:
while (dr--) {
del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue)
+ if (disks[dr]->queue) {
blk_cleanup_queue(disks[dr]->queue);
+ /*
+ * put_disk() is not paired with add_disk() and
+ * will put queue reference one extra time. fix it.
+ */
+ disks[dr]->queue = NULL;
+ }
put_disk(disks[dr]);
}
return err;
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
+
+ /*
+ * These disks have not called add_disk(). Don't put down
+ * queue reference in put_disk().
+ */
+ if (!(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ disks[drive]->queue = NULL;
+
put_disk(disks[drive]);
}
return __splice_from_pipe(pipe, sd, lo_splice_actor);
}
-static int
+static ssize_t
do_lo_receive(struct loop_device *lo,
struct bio_vec *bvec, int bsize, loff_t pos)
{
struct lo_read_data cookie;
struct splice_desc sd;
struct file *file;
- long retval;
+ ssize_t retval;
cookie.lo = lo;
cookie.page = bvec->bv_page;
file = lo->lo_backing_file;
retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
- if (retval < 0)
- return retval;
- if (retval != bvec->bv_len)
- return -EIO;
- return 0;
+ return retval;
}
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
struct bio_vec *bvec;
- int i, ret = 0;
+ ssize_t s;
+ int i;
bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_receive(lo, bvec, bsize, pos);
- if (ret < 0)
+ s = do_lo_receive(lo, bvec, bsize, pos);
+ if (s < 0)
+ return s;
+
+ if (s != bvec->bv_len) {
+ zero_fill_bio(bio);
break;
+ }
pos += bvec->bv_len;
}
- return ret;
+ return 0;
}
static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
* when the read completes.
* @data Callback data passed to the callback function
* when the read completes.
- * @barrier If non-zero, this command must be completed before
- * issuing any other commands.
* @dir Direction (read or write)
*
* return value
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
int nsect, int nents, int tag, void *callback,
- void *data, int barrier, int dir)
+ void *data, int dir)
{
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
*((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
*((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
fis->device = 1 << 6;
- if (barrier)
- fis->device |= FUA_BIT;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
fis->sect_count = ((tag << 3) | (tag >> 5));
tag,
bio_endio,
bio,
- bio->bi_rw & REQ_FUA,
bio_data_dir(bio));
} else
bio_io_error(bio);
blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_io_min(dd->queue, 4096);
+ /*
+ * write back cache is not supported in the device. FUA depends on
+ * write back cache support, hence setting flush support to zero.
+ */
blk_queue_flush(dd->queue, 0);
/* Set the capacity of the device in 512 byte sectors. */
/* BAR number used to access the HBA registers. */
#define MTIP_ABAR 5
-/* Forced Unit Access Bit */
-#define FUA_BIT 0x80
-
#ifdef DEBUG
#define dbg_printk(format, arg...) \
printk(pr_fmt(format), ##arg);
atomic_t resumeflag; /* Atomic variable to track suspend/resume */
- atomic_t eh_active; /* Flag for error handling tracking */
-
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
};
/* used to tell the module to turn on full debugging messages */
static bool debug;
-/* used to keep tray locked at all times */
-static int keeplocked;
/* default compatibility mode */
static bool autoclose=1;
static bool autoeject;
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
cdrom_dvd_rw_close_write(cdi);
- if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
curslot = info->hdr.curslot;
kfree(info);
- if (cdi->use_count > 1 || keeplocked) {
+ if (cdi->use_count > 1 || cdi->keeplocked) {
if (slot == CDSL_CURRENT) {
return curslot;
} else {
if (!nr)
return -ENOMEM;
- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
- ret = -EFAULT;
- goto out;
- }
-
cgc.data_direction = CGC_DATA_READ;
while (nframes > 0) {
if (nr > nframes)
ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
if (ret)
break;
- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
ret = -EFAULT;
break;
}
nframes -= nr;
lba += nr;
}
-out:
kfree(cgc.buffer);
return ret;
}
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (cdi->use_count != 1 || keeplocked)
+ if (cdi->use_count != 1 || cdi->keeplocked)
return -EBUSY;
if (CDROM_CAN(CDC_LOCK)) {
int ret = cdi->ops->lock_door(cdi, 0);
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (keeplocked)
+ if (cdi->keeplocked)
return -EBUSY;
cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
- keeplocked = arg ? 1 : 0;
+ cdi->keeplocked = arg ? 1 : 0;
/*
* Don't unlock the door on multiple opens by default, but allow
if (err)
return err;
- if (__get_user(c32.auth, &client->auth)
+ if (__get_user(c32.idx, &client->idx)
+ || __get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 5)
+ if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_MASK; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
+intel_dp_link_required(int pixel_clock, int bpp)
{
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int bpp = 24;
-
- if (check_bpp)
- bpp = check_bpp;
- else if (intel_crtc)
- bpp = intel_crtc->bpp;
-
return (pixel_clock * bpp + 9) / 10;
}
return MODE_PANEL;
}
- mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(intel_dp,
- mode->clock, 18);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock, bpp)
+ if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
DMI_MATCH(DMI_BOARD_NAME, "i915GMm-HFS"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
+ return r;
}
r = r600_audio_init(rdev);
static inline void f75375_write16(struct i2c_client *client, u8 reg,
u16 value)
{
- int err = i2c_smbus_write_byte_data(client, reg, (value << 8));
+ int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
if (err)
return;
i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
f75375_read16(client, F75375_REG_FAN_MIN(nr));
data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
- data->pwm[nr] = f75375_read8(client,
- F75375_REG_FAN_PWM_DUTY(nr));
-
}
for (nr = 0; nr < 4; nr++) {
data->in_max[nr] =
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
+ data->pwm[nr] = f75375_read8(client,
+ F75375_REG_FAN_PWM_DUTY(nr));
/* assign MSB, therefore shift it by 8 bits */
data->temp11[nr] =
f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
fanmode |= (3 << FAN_CTRL_MODE(nr));
break;
case 2: /* AUTOMATIC*/
- fanmode |= (2 << FAN_CTRL_MODE(nr));
+ fanmode |= (1 << FAN_CTRL_MODE(nr));
break;
case 3: /* fan speed */
break;
if (data->kind == f75387) {
bool manu, duty;
- if (!(conf & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+ if (!(mode & (1 << F75387_FAN_CTRL_LINEAR(nr))))
data->pwm_mode[nr] = 1;
manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
/* Read fan clock dividers immediately */
w83627ehf_update_fan_div_common(dev, data);
- /* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
- for (i = 0; i < data->pwm_num; i++)
- data->pwm_enable_orig[i] = data->pwm_enable[i];
-
/* Read pwm data to save original values */
w83627ehf_update_pwm_common(dev, data);
for (i = 0; i < data->pwm_num; i++)
#include <linux/mutex.h>
#include <net/neighbour.h>
+#include <net/sch_generic.h>
#include <linux/atomic.h>
u16 reserved;
};
-struct ipoib_pseudoheader {
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u8 hwaddr[INFINIBAND_ALEN];
};
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_pseudoheader *phdr)
+ struct ipoib_cb *cb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, phdr->hwaddr + 4);
+ path = __path_find(dev, cb->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, phdr->hwaddr + 4);
+ path = path_rec_create(dev, cb->hwaddr + 4);
new_path = 1;
}
if (path) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
if (!path->query && path_rec_start(dev, path)) {
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
} else {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb->data;
- skb_pull(skb, sizeof *phdr);
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
- if (phdr->hwaddr[4] == 0xff) {
+ if (cb->hwaddr[4] == 0xff) {
/* Add in the P_Key for multicast*/
- phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- phdr->hwaddr[9] = priv->pkey & 0xff;
+ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ cb->hwaddr[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
+ ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
} else {
/* unicast GID -- should be ARP or RARP reply */
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
skb_dst(skb) ? "neigh" : "dst",
be16_to_cpup((__be16 *) skb->data),
- IPOIB_QPN(phdr->hwaddr),
- phdr->hwaddr + 4);
+ IPOIB_QPN(cb->hwaddr),
+ cb->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
goto unlock;
}
- unicast_arp_send(skb, dev, phdr);
+ unicast_arp_send(skb, dev, cb);
}
}
unlock:
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
- struct dst_entry *dst;
- struct neighbour *n;
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
header->reserved = 0;
/*
- * If we don't have a neighbour structure, stuff the
- * destination address onto the front of the skb so we can
- * figure out where to send the packet later.
+ * If we don't have a dst_entry structure, stuff the
+ * destination address into skb->cb so we can figure out where
+ * to send the packet later.
*/
- dst = skb_dst(skb);
- n = NULL;
- if (dst)
- n = dst_get_neighbour_noref_raw(dst);
- if ((!dst || !n) && daddr) {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+ if (!skb_dst(skb)) {
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
}
return 0;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- /*
- * We add in INFINIBAND_ALEN to allow for the destination
- * address "pseudoheader" for skbs without neighbour struct.
- */
- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+ dev->hard_header_len = IPOIB_ENCAP_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
netif_tx_unlock_bh(dev);
skb->dev = dev;
- if (dst)
- n = dst_get_neighbour_noref_raw(dst);
- if (!dst || !n) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof (struct ipoib_pseudoheader));
- }
-
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
+
netif_tx_lock_bh(dev);
}
netif_tx_unlock_bh(dev);
{
isdn_net_local *lp = netdev_priv(dev);
unsigned char *p;
- ushort len = 0;
+ int len = 0;
switch (lp->p_encap) {
case ISDN_NET_ENCAP_ETHER:
config TWL4030_CORE
bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
- depends on I2C=y && GENERIC_HARDIRQS && IRQ_DOMAIN
+ depends on I2C=y && GENERIC_HARDIRQS
help
Say yes here if you have TWL4030 / TWL6030 family chip on your board.
This core driver provides register access and IRQ handling
static struct twl_client twl_modules[TWL_NUM_SLAVES];
+#ifdef CONFIG_IRQ_DOMAIN
static struct irq_domain domain;
+#endif
/* mapping the module id to slave id and base address */
struct twl_mapping {
pdata->irq_base = status;
pdata->irq_end = pdata->irq_base + nr_irqs;
+#ifdef CONFIG_IRQ_DOMAIN
domain.irq_base = pdata->irq_base;
domain.nr_irq = nr_irqs;
-#ifdef CONFIG_OF_IRQ
domain.of_node = of_node_get(node);
domain.ops = &irq_domain_simple_ops;
-#endif
irq_domain_add(&domain);
+#endif
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
[RES_MAIN_REF] = 0x94,
};
-static int __init twl4030_write_script_byte(u8 address, u8 byte)
+static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
return err;
}
-static int __init twl4030_write_script_ins(u8 address, u16 pmb_message,
+static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err;
return err;
}
-static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
+static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
int err;
return err;
}
-static int __init twl4030_config_wakeup3_sequence(u8 address)
+static int __devinit twl4030_config_wakeup3_sequence(u8 address)
{
int err;
u8 data;
return err;
}
-static int __init twl4030_config_wakeup12_sequence(u8 address)
+static int __devinit twl4030_config_wakeup12_sequence(u8 address)
{
int err = 0;
u8 data;
return err;
}
-static int __init twl4030_config_sleep_sequence(u8 address)
+static int __devinit twl4030_config_sleep_sequence(u8 address)
{
int err;
return err;
}
-static int __init twl4030_config_warmreset_sequence(u8 address)
+static int __devinit twl4030_config_warmreset_sequence(u8 address)
{
int err;
u8 rd_data;
return err;
}
-static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
+static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
int err;
return 0;
}
-static int __init load_twl4030_script(struct twl4030_script *tscript,
+static int __devinit load_twl4030_script(struct twl4030_script *tscript,
u8 address)
{
int err;
pr_err("TWL4030 Unable to power off\n");
}
-void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
+void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
# Misc strange devices
#
-# This one has to live outside of the MISC_DEVICES conditional,
-# because it may be selected by drivers/platform/x86/hp_accel.
+menu "Misc devices"
+
config SENSORS_LIS3LV02D
tristate
depends on INPUT
select INPUT_POLLDEV
default n
-menuconfig MISC_DEVICES
- bool "Misc devices"
- ---help---
- Say Y here to get to see options for device drivers from various
- different categories. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if MISC_DEVICES
-
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
depends on (I2C || SPI) && SYSFS
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
-
-endif # MISC_DEVICES
+endmenu
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/c2port.h>
#define DATA_PORT 0x325
if (err)
return err;
+ spin_lock_init(&chip->irq_lock);
chip->pdev = pdev;
chip->iobase = pcim_iomap_table(pdev)[0];
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
-static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
{
struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
unsigned long flags;
* fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page.
*/
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
unsigned int *hv_status)
{
unsigned long status, dummy;
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return false;
+ return -1;
STATS_INC(b->stats.lock);
*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status))
- return true;
+ return 0;
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail);
- return false;
+ return 1;
}
/*
struct page *page;
gfp_t flags;
unsigned int hv_status;
- bool locked = false;
+ int locked;
flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
do {
/* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
- if (!locked) {
+ if (locked > 0) {
STATS_INC(b->stats.refused_alloc);
if (hv_status == VMW_BALLOON_ERROR_RESET ||
if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
return -EIO;
}
- } while (!locked);
+ } while (locked != 0);
/* track allocated page */
list_add(&page->lru, &b->pages);
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
- select MISC_DEVICES
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
for (i = 0; i < dlc; i++)
cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+ /* Store echo skb before starting the transfer */
+ can_put_echo_skb(skb, dev, 0);
+
cc770_write_reg(priv, msgobj[mo].ctrl1,
RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
stats->tx_bytes += dlc;
- can_put_echo_skb(skb, dev, 0);
/*
* HM: We had some cases of repeated IRQs so make sure the
#define CC770_IOSIZE 0x20
#define CC770_IOSIZE_INDIRECT 0x02
+/* Spinlock for cc770_isa_port_write_reg_indirect
+ * and cc770_isa_port_read_reg_indirect
+ */
+static DEFINE_SPINLOCK(cc770_isa_port_lock);
+
static struct platform_device *cc770_isa_devs[MAXDEV];
static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
int reg)
{
unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+ u8 val;
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
outb(reg, base);
- return inb(base + 1);
+ val = inb(base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
+
+ return val;
}
static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
int reg, u8 val)
{
unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
outb(reg, base);
outb(val, base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
}
static int __devinit cc770_isa_probe(struct platform_device *pdev)
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
#define FLEXCAN_ESR_ERR_ALL \
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+#define FLEXCAN_ESR_ALL_INT \
+ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
/* FLEXCAN interrupt flag register (IFLAG) bits */
#define FLEXCAN_TX_BUF_ID 8
reg_iflag1 = flexcan_read(®s->iflag1);
reg_esr = flexcan_read(®s->esr);
- flexcan_write(FLEXCAN_ESR_ERR_INT, ®s->esr); /* ACK err IRQ */
+ /* ACK all bus error and state change IRQ sources */
+ if (reg_esr & FLEXCAN_ESR_ALL_INT)
+ flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr);
/*
* schedule NAPI in case of:
#define PCH_IF_CREQ_BUSY BIT(15)
#define PCH_STATUS_INT 0x8000
+#define PCH_RP 0x00008000
#define PCH_REC 0x00007f00
#define PCH_TEC 0x000000ff
priv->can.can_stats.error_passive++;
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 127)
+ if (errc & PCH_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
#define DRV_NAME "peak_pci"
struct peak_pci_chan {
- void __iomem *cfg_base; /* Common for all channels */
- struct net_device *next_dev; /* Chain of network devices */
- u16 icr_mask; /* Interrupt mask for fast ack */
+ void __iomem *cfg_base; /* Common for all channels */
+ struct net_device *prev_dev; /* Chain of network devices */
+ u16 icr_mask; /* Interrupt mask for fast ack */
};
#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev, *dev0 = NULL;
+ struct net_device *dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
}
/* Create chain of SJA1000 devices */
- if (i == 0)
- dev0 = dev;
- else
- chan->next_dev = dev;
+ chan->prev_dev = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, dev);
dev_info(&pdev->dev,
"%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
dev->name, priv->reg_base, chan->cfg_base, dev->irq);
}
- pci_set_drvdata(pdev, dev0);
-
/* Enable interrupts */
writew(icr, cfg_base + PITA_ICR + 2);
/* Disable interrupts */
writew(0x0, cfg_base + PITA_ICR + 2);
- for (dev = dev0; dev; dev = chan->next_dev) {
+ for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
unregister_sja1000dev(dev);
free_sja1000dev(dev);
priv = netdev_priv(dev);
chan = priv->priv;
- dev = chan->next_dev;
}
pci_iounmap(pdev, reg_base);
static void __devexit peak_pci_remove(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
+ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
struct sja1000_priv *priv = netdev_priv(dev);
struct peak_pci_chan *chan = priv->priv;
void __iomem *cfg_base = chan->cfg_base;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->next_dev;
+ dev = chan->prev_dev;
if (!dev)
break;
priv = netdev_priv(dev);
}
}
- netif_receive_skb(skb);
+ netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
return 0;
}
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
err);
return 0;
failed:
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
return err;
skb = build_skb(data);
if (likely(skb)) {
-
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
return;
}
-
+ kfree(new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
if (!flash_attr)
- return -ENOMEM;
+ return 0;
fcomp.bnad = bnad;
fcomp.comp_status = 0;
if (ret != BFA_STATUS_OK) {
spin_unlock_irqrestore(&bnad->bna_lock, flags);
kfree(flash_attr);
- goto out_err;
+ return 0;
}
spin_unlock_irqrestore(&bnad->bna_lock, flags);
wait_for_completion(&fcomp.comp);
}
kfree(flash_attr);
return flash_part;
-out_err:
- return -EINVAL;
}
static int
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
/* Query the flash partition based on the offset */
flash_part = bnad_get_flash_partition_by_offset(bnad,
eeprom->offset, &base_offset);
- if (flash_part <= 0)
+ if (flash_part == 0)
return -EFAULT;
fcomp.bnad = bnad;
be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
- char file_name[ETHTOOL_FLASH_MAX_FILENAME];
- file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
- strcpy(file_name, efl->data);
-
- return be_load_fw(adapter, file_name);
+ return be_load_fw(adapter, efl->data);
}
static int
phy_id = 0;
}
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
e1000_setup_rctl(adapter);
e1000_set_rx_mode(netdev);
+ rctl = er32(RCTL);
+
/* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = er32(RCTL);
+ if (wufc & E1000_WUFC_MC)
rctl |= E1000_RCTL_MPE;
- ew32(RCTL, rctl);
- }
+
+ /* enable receives in the hardware */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
if (hw->mac_type >= e1000_82540) {
ctrl = er32(CTRL);
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += vf_stride;
pvfdev = pci_get_device(hw->vendor_id,
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 - 2010 Intel Corporation.
+# Copyright(c) 2009 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
static const char igbvf_driver_string[] =
"Intel(R) Gigabit Virtual Function Network Driver";
static const char igbvf_copyright[] =
- "Copyright (c) 2009 - 2011 Intel Corporation.";
+ "Copyright (c) 2009 - 2012 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2010 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
u8 err = 0;
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
+ int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Fail command if not in CEE mode */
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
- if (state > 0)
+ if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- else
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ } else {
err = ixgbe_setup_tc(netdev, 0);
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
return err;
}
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
sizeof(((struct rtnl_link_stats64 *)0)->m), \
offsetof(struct rtnl_link_stats64, m)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
#endif /* IXGBE_FCOE */
};
-#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
+
+#define IXGBE_QUEUE_STATS_LEN ( \
+ (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
IXGBE_PB_STATS_LEN + \
IXGBE_QUEUE_STATS_LEN)
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxontxc[j];
- data[i++] = adapter->stats.pxofftxc[j];
- }
- for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxonrxc[j];
- data[i++] = adapter->stats.pxoffrxc[j];
- }
+
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
}
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_tx_queues; i++) {
+ for (i = 0; i < netdev->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
__stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2011 Intel Corporation.";
+ "Copyright (c) 1999-2012 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
- * than 65535
+ * than 65536
*/
if (ring_is_ps_enabled(ring)) {
-#if (MAX_SKB_FRAGS > 16)
+#if (PAGE_SIZE < 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
+#elif (PAGE_SIZE < 16384)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
+#elif (PAGE_SIZE < 32768)
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#else
rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif
} else {
- if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ if (rx_buf_len <= IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+ reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
adapter->num_tx_queues = 1;
done:
+ if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
+ (adapter->netdev->reg_state == NETREG_UNREGISTERING))
+ return 0;
+
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += 2;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
retval = ixgbe_set_vf_macvlan(adapter, vf, index,
(unsigned char *)(&msgbuf[1]));
+ if (retval == -ENOSPC)
+ e_warn(drv, "VF %d has requested a MACVLAN filter "
+ "but there is no space for it\n", vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
################################################################################
#
# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2010 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
#define DRV_VERSION "2.2.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2010 Intel Corporation.";
+ "Copyright (c) 2009 - 2012 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
if (msg & IXGBE_VT_MSGTYPE_NACK)
pr_warn("Last Request of type %2.2x to PF Nacked\n",
msg & 0xFF);
- goto out;
+ /*
+ * Restore the PFSTS bit in case someone is polling for a
+ * return message from the PF
+ */
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
}
/*
*/
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
-out:
+
return IRQ_HANDLED;
}
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
return ret_val;
}
+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, size);
+}
+
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 cnt, i;
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_VLAN;
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- return mbx->ops.write_posted(hw, msgbuf, 2);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+
+ return 0;
}
/**
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
}
/* Allocate and setup a new buffer for receiving */
-static int skge_rx_setup(struct pci_dev *pdev,
- struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- dma_addr_t map;
+ u64 map;
- map = pci_map_single(pdev, skb->data, bufsize,
+ map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, map))
- goto mapping_error;
- rd->dma_lo = lower_32_bits(map);
- rd->dma_hi = upper_32_bits(map);
+ rd->dma_lo = map;
+ rd->dma_hi = map >> 32;
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
- return 0;
-
-mapping_error:
- if (net_ratelimit())
- dev_warn(&pdev->dev, "%s: rx mapping error\n",
- skb->dev->name);
- return -EIO;
}
/* Resume receiving using existing skb,
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
- kfree_skb(skb);
- return -ENOMEM;
- }
-
+ skge_rx_setup(skge, e, skb, skge->rx_buf_size);
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
struct skge_tx_desc *td;
int i;
u32 control, len;
- dma_addr_t map;
+ u64 map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(hw->pdev, map))
- goto mapping_error;
-
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = lower_32_bits(map);
- td->dma_hi = upper_32_bits(map);
+ td->dma_lo = map;
+ td->dma_hi = map >> 32;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
- if (dma_mapping_error(&hw->pdev->dev, map))
- goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = lower_32_bits(map);
- tf->dma_hi = upper_32_bits(map);
+ tf->dma_lo = map;
+ tf->dma_hi = (u64) map >> 32;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
}
return NETDEV_TX_OK;
-
-mapping_unwind:
- /* unroll any pages that were already mapped. */
- if (e != skge->tx_ring.to_use) {
- struct skge_element *u;
-
- for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
- pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
- dma_unmap_len(u, maplen),
- PCI_DMA_TODEVICE);
- e = skge->tx_ring.to_use;
- }
- /* undo the mapping for the skb header */
- pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
- dma_unmap_len(e, maplen),
- PCI_DMA_TODEVICE);
-mapping_error:
- /* mapping error causes error message and packet to be discarded. */
- if (net_ratelimit())
- dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
}
if (!nskb)
goto resubmit;
- if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
- dev_kfree_skb(nskb);
- goto resubmit;
- }
-
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
+ skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
kfree(priv->mfunc.master.slave_state);
- iounmap(priv->mfunc.comm);
- dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
- priv->mfunc.vhcr,
- priv->mfunc.vhcr_dma);
- priv->mfunc.vhcr = NULL;
}
+
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
- mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
return 0;
err:
- while (i--)
+ while (i--) {
+ dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
+ pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
+ PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
+ }
return -ENOMEM;
}
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&ring->wqres.buf);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
u32 prot;
int err;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
if (!get_promisc_qp(dev, 0, steer, qpn))
bool ret = false;
int i;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
int loc, i;
int err;
- s_steer = &mlx4_priv(dev)->steer[0];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
pqp = get_promisc_qp(dev, 0, steer, qpn);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
if (mlx4_is_mfunc(dev))
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
if (mlx4_is_mfunc(dev))
struct mlx4_en_rx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring);
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
select NET_CORE
select MII
select CRC32
- select MISC_DEVICES
select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/sh_eth.h>
#include "sh_eth.h"
sh_eth_write(ndev, 0, TRIMD);
/* Recv frame limit set register */
- sh_eth_write(ndev, RFLR_VALUE, RFLR);
+ sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+ RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
RPADIR_PADR = 0x0003f,
};
-/* RFLR */
-#define RFLR_VALUE 0x1000
-
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
if (IS_ERR(priv->phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
+ ret = PTR_ERR(priv->phydev);
priv->phydev = NULL;
- return PTR_ERR(priv->phydev);
+ return ret;
}
priv->link = 0;
data->clk = clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
- data->clk = NULL;
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
+ data->clk = NULL;
goto bail_out;
}
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
if (dev->irq != 0)
free_irq(dev->irq, dev);
- /* Power down the chip */
- pci_set_power_state(vptr->pdev, PCI_D3hot);
-
velocity_free_rings(vptr);
vptr->flags &= (~VELOCITY_FLAGS_OPENED);
struct hv_device *device_obj = net_device_ctx->device_ctx;
int ret;
- netif_stop_queue(net);
+ netif_tx_disable(net);
ret = rndis_filter_close(device_obj);
if (ret != 0)
int ret;
unsigned int i, num_pages, npg_data;
- /* Add multipage for skb->data and additional one for RNDIS */
+ /* Add multipages for skb->data and additional 2 for RNDIS */
npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
>> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
- num_pages = skb_shinfo(skb)->nr_frags + npg_data + 1;
+ num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
- /* Setup the rndis header */
- packet->page_buf_cnt = num_pages;
+ /* If the rndis msg goes beyond 1 page, we will add 1 later */
+ packet->page_buf_cnt = num_pages - 1;
/* Initialize it from the skb */
packet->total_data_buflen = skb->len;
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
- netif_stop_queue(net);
+ netif_tx_disable(net);
}
}
skb->ip_summed = CHECKSUM_NONE;
net->stats.rx_packets++;
- net->stats.rx_bytes += skb->len;
+ net->stats.rx_bytes += packet->total_data_buflen;
/*
* Pass the skb back up. Network stack will deallocate the skb when it
nvdev->start_remove = true;
cancel_delayed_work_sync(&ndevctx->dwork);
- netif_stop_queue(ndev);
+ netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
cancel_delayed_work_sync(&ndev_ctx->dwork);
/* Stop outbound asap */
- netif_stop_queue(net);
+ netif_tx_disable(net);
unregister_netdev(net);
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
+
+ /*
+ * Make sure we got a valid RNDIS message, now total_data_buflen
+ * should be the data packet size plus the trailer padding size
+ */
+ if (pkt->total_data_buflen < rndis_pkt->data_len) {
+ netdev_err(dev->net_dev->ndev, "rndis message buffer "
+ "overflow detected (got %u, min %u)"
+ "...dropping this message!\n",
+ pkt->total_data_buflen, rndis_pkt->data_len);
+ return;
+ }
+
+ /*
+ * Remove the rndis trailer padding from rndis packet message
+ * rndis_pkt->data_len tell us the real data length, we only copy
+ * the data packet to the stack, without the rndis trailer padding
+ */
+ pkt->total_data_buflen = rndis_pkt->data_len;
pkt->data = (void *)((unsigned long)pkt->data + data_offset);
pkt->is_data_pkt = true;
(unsigned long)rndisMessage & (PAGE_SIZE-1);
pkt->page_buf[0].len = rndisMessageSize;
+ /* Add one page_buf if the rndis msg goes beyond page boundary */
+ if (pkt->page_buf[0].offset + rndisMessageSize > PAGE_SIZE) {
+ int i;
+ for (i = pkt->page_buf_cnt; i > 1; i--)
+ pkt->page_buf[i] = pkt->page_buf[i-1];
+ pkt->page_buf_cnt++;
+ pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
+ pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
+ rndisMessage + pkt->page_buf[0].len)) >> PAGE_SHIFT;
+ pkt->page_buf[1].offset = 0;
+ pkt->page_buf[1].len = rndisMessageSize - pkt->page_buf[0].len;
+ }
+
/* Save the packet send completion and context */
filterPacket->completion = pkt->completion.send.send_completion;
filterPacket->completion_ctx =
bool "Token Ring driver support"
depends on NETDEVICES && !UML
depends on (PCI || ISA || MCA || CCW || PCMCIA)
- select LLC
help
Token Ring is IBM's way of communication on a local network; the
rest of the world uses Ethernet. To participate on a Token Ring
if TR
+config WANT_LLC
+ def_bool y
+ select LLC
+
config PCMCIA_IBMTR
tristate "IBM PCMCIA tokenring adapter support"
depends on IBMTR!=y && PCMCIA
/*
* Workaround for early ACK timeouts, add an offset to match the
- * initval's 64us ack timeout value.
+ * initval's 64us ack timeout value. Use 48us for the CTS timeout.
* This was initially only meant to work around an issue with delayed
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
acktimeout += 64 - sifstime - ah->slottime;
+ ctstimeout += 48 - sifstime - ah->slottime;
+ }
+
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ARRAY_SIZE(ath9k_tpt_blink));
#endif
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+ INIT_WORK(&sc->hw_check_work, ath_hw_check);
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
goto error_world;
}
- INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
- INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
ath_init_leds(sc);
return rate;
/* This should not happen */
- WARN_ON(1);
+ WARN_ON_ONCE(1);
rate = ath_rc_priv->valid_rate_index[0];
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS));
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
if (!rx_stats->rs_datalen)
return false;
/*
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
else
sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
sta_priv->max_agg_bufsize;
- IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
return iwl_send_lq_cmd(priv, ctx,
u32 status = le16_to_cpu(tx_resp->status.status);
int i;
+ WARN_ON(tid == IWL_TID_NON_QOS);
+
if (agg->wait_for_ba)
IWL_DEBUG_TX_REPLY(priv,
"got tx response w/o block-ack\n");
}
__skb_queue_head_init(&skbs);
- priv->tid_data[sta_id][tid].next_reclaimed = next_reclaimed;
- IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
- next_reclaimed);
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+ next_reclaimed);
+ }
/*we can free until ssn % q.n_bd not inclusive */
WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
#define IWL_INVALID_STATION 255
#define IWL_MAX_TID_COUNT 8
+#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
txq->time_stamp = jiffies;
if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+ tid != IWL_TID_NON_QOS &&
txq_id != trans_pcie->agg_txq[sta_id][tid])) {
/*
* FIXME: this is a uCode bug which need to be addressed,
adapter->if_ops.cleanup_if(adapter);
- dev_kfree_skb_any(adapter->sleep_cfm);
+ if (adapter->sleep_cfm)
+ dev_kfree_skb_any(adapter->sleep_cfm);
}
/*
continue;
rtnl_lock();
- mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ if (priv->wdev && priv->netdev)
+ mwifiex_del_virtual_intf(priv->wdev->wiphy,
+ priv->netdev);
rtnl_unlock();
}
if (!priv)
goto exit_remove;
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
- kfree(priv->wdev);
+ if (priv->wdev) {
+ wiphy_unregister(priv->wdev->wiphy);
+ wiphy_free(priv->wdev->wiphy);
+ kfree(priv->wdev);
+ }
mwifiex_terminate_workqueue(adapter);
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
- int status = adapter->cmd_wait_q.status;
+ int status;
struct cmd_ctrl_node *cmd_queued;
if (!adapter->cmd_queued)
mwifiex_cancel_pending_ioctl(adapter);
dev_dbg(adapter->dev, "cmd cancel\n");
}
+
+ status = adapter->cmd_wait_q.status;
adapter->cmd_wait_q.status = 0;
return status;
if (!netif_queue_stopped(priv->netdev))
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
/* Clear any past association response stored for
* application retrieval */
if (!netif_queue_stopped(priv->netdev))
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
{
- int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
- int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
- int rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
+ s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
+ s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
+ s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
u16 eeprom;
u8 offset0;
u8 offset1;
* which gives less energy...
*/
rssi0 = max(rssi0, rssi1);
- return max(rssi0, rssi2);
+ return (int)max(rssi0, rssi2);
}
void rt2800_process_rxwi(struct queue_entry *entry,