# ip link delete vxlan0
3. Show vxlan info
- # ip -d show vxlan0
+ # ip -d link show vxlan0
It is possible to create, destroy and display the vxlan
forwarding table using the new bridge command.
# bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
2. Delete forwarding table entry
- # bridge fdb delete 00:17:42:8a:b4:05
+ # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
3. Show forwarding table
# bridge fdb show dev vxlan0
exynos_pdma1_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4210_pdma1_peri);
exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+ if (samsung_rev() == EXYNOS4210_REV_0)
+ exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
} else if (soc_is_exynos4212() || soc_is_exynos4412()) {
exynos_pdma0_pdata.nr_valid_peri =
ARRAY_SIZE(exynos4212_pdma0_peri);
#define EXYNOS4_PA_MDMA0 0x10810000
#define EXYNOS4_PA_MDMA1 0x12850000
+#define EXYNOS4_PA_S_MDMA1 0x12840000
#define EXYNOS4_PA_PDMA0 0x12680000
#define EXYNOS4_PA_PDMA1 0x12690000
#define EXYNOS5_PA_MDMA0 0x10800000
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/i2c-omap.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <mach/irqs.h>
#include <plat/i2c.h>
+#include <plat/omap-pm.h>
#include <plat/omap_device.h>
#define OMAP_I2C_SIZE 0x3f
#ifdef CONFIG_ARCH_OMAP2PLUS
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+ omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
static inline int omap2_i2c_add_bus(int bus_id)
{
int l;
dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
pdata->flags = dev_attr->flags;
+ /*
+ * When waiting for completion of a i2c transfer, we need to
+ * set a wake up latency constraint for the MPU. This is to
+ * ensure quick enough wakeup from idle, when transfer
+ * completes.
+ * Only omap3 has support for constraints
+ */
+ if (cpu_is_omap34xx())
+ pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
pdev = omap_device_build(name, bus_id, oh, pdata,
sizeof(struct omap_i2c_bus_platform_data),
NULL, 0, 0);
err |= restore_fpu_state(regs, fpu_save);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
- if (err)
+ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
goto segv;
err |= __get_user(rwin_save, &sf->rwin_save);
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+ bool is_pm_resume;
WARN_ON(irqs_disabled());
rq->rq_disk = bd_disk;
rq->end_io = done;
+ /*
+ * need to check this before __blk_run_queue(), because rq can
+ * be freed before that returns.
+ */
+ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
spin_lock_irq(q->queue_lock);
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
- if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+ if (is_pm_resume)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
if (ancestor)
error = dev_pm_qos_add_request(ancestor, req, value);
- if (error)
+ if (error < 0)
req->dev = NULL;
return error;
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
- q->request_fn(q);
+ __blk_run_queue(q);
}
static void
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
+ destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
}
put_disk(disks[drive]);
}
- destroy_workqueue(floppy_wq);
return err;
}
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
+ destroy_workqueue(floppy_wq);
+
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
- destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
struct mtip_cmd *command;
int tag, cmdto_cnt = 0;
unsigned int bit, group;
- unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+ num_command_slots = port->dd->slot_groups * 32;
for (tag = 0; tag < num_command_slots; tag++) {
/*
fis.device);
/* check for erase mode support during secure erase.*/
- if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
- && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+ (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
erasemode = 1;
}
* return value
* None
*/
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
void *data, int dir)
{
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ u64 start = sector;
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
fis->opts = 1 << 7;
fis->command =
(dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
- *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
- *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->lba_low = start & 0xFF;
+ fis->lba_mid = (start >> 8) & 0xFF;
+ fis->lba_hi = (start >> 16) & 0xFF;
+ fis->lba_low_ex = (start >> 24) & 0xFF;
+ fis->lba_mid_ex = (start >> 32) & 0xFF;
+ fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* check for erase mode support during secure erase */
-#define MTIP_SEC_ERASE_MODE 0x3
+#define MTIP_SEC_ERASE_MODE 0x2
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
MTIP_DDF_REBUILD_FAILED_BIT = 8,
};
-__packed struct smart_attr{
+struct smart_attr {
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
-};
+} __packed;
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
#define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
#define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
#define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
+#define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
#define AT91_TWI_SWRST 0x0080 /* Software Reset */
#define AT91_TWI_MMR 0x0004 /* Master Mode Register */
INIT_COMPLETION(dev->cmd_complete);
dev->transfer_status = 0;
- if (dev->msg->flags & I2C_M_RD) {
+
+ if (!dev->buf_len) {
+ at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+ at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+ } else if (dev->msg->flags & I2C_M_RD) {
unsigned start_flags = AT91_TWI_START;
if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
select_init_dma_fail:
dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
select_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
/* Write failpath. */
write_init_dma_fail:
dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
write_init_pio_fail:
+ dmaengine_terminate_all(i2c->dmach);
return -EINVAL;
}
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
int reg_shift; /* bit shift for I2C register addresses */
struct completion cmd_complete;
struct resource *ioarea;
- u32 latency; /* maximum MPU wkup latency */
- struct pm_qos_request pm_qos_request;
+ u32 latency; /* maximum mpu wkup latency */
+ void (*set_mpu_wkup_lat)(struct device *dev,
+ long latency);
u32 speed; /* Speed of bus in kHz */
u32 dtrev; /* extra revision from DT */
u32 flags;
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->threshold) /
+ (1000 * dev->speed / 8);
}
/*
dev->buf = msg->buf;
dev->buf_len = msg->len;
+ /* make sure writes to dev->buf_len are ordered */
+ barrier();
+
omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
/* Clear the FIFO Buffers */
*/
timeout = wait_for_completion_timeout(&dev->cmd_complete,
OMAP_I2C_TIMEOUT);
- dev->buf_len = 0;
if (timeout == 0) {
dev_err(dev->dev, "controller timed out\n");
omap_i2c_init(dev);
if (r < 0)
goto out;
- /*
- * When waiting for completion of a i2c transfer, we need to
- * set a wake up latency constraint for the MPU. This is to
- * ensure quick enough wakeup from idle, when transfer
- * completes.
- */
- if (dev->latency)
- pm_qos_add_request(&dev->pm_qos_request,
- PM_QOS_CPU_DMA_LATENCY,
- dev->latency);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, dev->latency);
for (i = 0; i < num; i++) {
r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
break;
}
- if (dev->latency)
- pm_qos_remove_request(&dev->pm_qos_request);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->set_mpu_wkup_lat(dev->dev, -1);
if (r == 0)
r = num;
} else if (pdata != NULL) {
dev->speed = pdata->clkrate;
dev->flags = pdata->flags;
+ dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
dev->dtrev = pdata->rev;
}
dev->b_hw = 1; /* Enable hardware fixes */
/* calculate wakeup latency constraint for MPU */
- dev->latency = (1000000 * dev->fifo_size) /
- (1000 * dev->speed / 8);
+ if (dev->set_mpu_wkup_lat != NULL)
+ dev->latency = (1000000 * dev->fifo_size) /
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
goto free_gpio;
}
+ i2c->gpios[idx] = gpio;
ret = gpio_request(gpio, "i2c-bus");
if (ret) {
if (!md_in_flight(md))
wake_up(&md->wait);
+ /*
+ * Run this off this callpath, as drivers could invoke end_io while
+ * inside their request_fn (and holding the queue lock). Calling
+ * back into ->request_fn() could deadlock attempting to grab the
+ * queue lock again.
+ */
if (run_queue)
- blk_run_queue(md->queue);
+ blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
memset(bbp, 0xff, PAGE_SIZE);
for (i = 0 ; i < bb->count ; i++) {
- u64 internal_bb = *p++;
+ u64 internal_bb = p[i];
u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
| BB_LEN(internal_bb));
- *bbp++ = cpu_to_le64(store_bb);
+ bbp[i] = cpu_to_le64(store_bb);
}
bb->changed = 0;
if (read_seqretry(&bb->lock, seq))
}
EXPORT_SYMBOL_GPL(md_stop_writes);
-void md_stop(struct mddev *mddev)
+static void __md_stop(struct mddev *mddev)
{
mddev->ready = 0;
mddev->pers->stop(mddev);
mddev->pers = NULL;
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
+ bitmap_destroy(mddev);
+ if (mddev->bio_set)
+ bioset_free(mddev->bio_set);
+}
+
EXPORT_SYMBOL_GPL(md_stop);
static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
set_disk_ro(disk, 0);
__md_stop_writes(mddev);
- md_stop(mddev);
+ __md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
sector_t *first_bad, int *bad_sectors)
{
int hi;
- int lo = 0;
+ int lo;
u64 *p = bb->page;
- int rv = 0;
+ int rv;
sector_t target = s + sectors;
unsigned seq;
retry:
seq = read_seqbegin(&bb->lock);
-
+ lo = 0;
+ rv = 0;
hi = bb->count;
/* Binary search between lo and hi for 'target'
*/
one_write_done(r10_bio);
if (dec_rdev)
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
}
/*
blocked_rdev = rrdev;
break;
}
+ if (rdev && (test_bit(Faulty, &rdev->flags)
+ || test_bit(Unmerged, &rdev->flags)))
+ rdev = NULL;
if (rrdev && (test_bit(Faulty, &rrdev->flags)
|| test_bit(Unmerged, &rrdev->flags)))
rrdev = NULL;
r10_bio->devs[i].bio = NULL;
r10_bio->devs[i].repl_bio = NULL;
- if (!rdev || test_bit(Faulty, &rdev->flags) ||
- test_bit(Unmerged, &rdev->flags)) {
+
+ if (!rdev && !rrdev) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
}
- if (test_bit(WriteErrorSeen, &rdev->flags)) {
+ if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
sector_t first_bad;
sector_t dev_sector = r10_bio->devs[i].addr;
int bad_sectors;
max_sectors = good_sectors;
}
}
- r10_bio->devs[i].bio = bio;
- atomic_inc(&rdev->nr_pending);
+ if (rdev) {
+ r10_bio->devs[i].bio = bio;
+ atomic_inc(&rdev->nr_pending);
+ }
if (rrdev) {
r10_bio->devs[i].repl_bio = bio;
atomic_inc(&rrdev->nr_pending);
for (i = 0; i < conf->copies; i++) {
struct bio *mbio;
int d = r10_bio->devs[i].devnum;
- if (!r10_bio->devs[i].bio)
- continue;
+ if (r10_bio->devs[i].bio) {
+ struct md_rdev *rdev = conf->mirrors[d].rdev;
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ choose_data_offset(r10_bio,
+ rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].bio = mbio;
+ atomic_inc(&r10_bio->remaining);
- mbio->bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- conf->mirrors[d].rdev));
- mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!plug)
+ md_wakeup_thread(mddev->thread);
+ }
- atomic_inc(&r10_bio->remaining);
+ if (r10_bio->devs[i].repl_bio) {
+ struct md_rdev *rdev = conf->mirrors[d].replacement;
+ if (rdev == NULL) {
+ /* Replacement just got moved to main 'rdev' */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ mbio->bi_sector = (r10_bio->devs[i].addr +
+ choose_data_offset(
+ r10_bio, rdev));
+ mbio->bi_bdev = rdev->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+ mbio->bi_private = r10_bio;
- cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
- if (cb)
- plug = container_of(cb, struct raid10_plug_cb, cb);
- else
- plug = NULL;
- spin_lock_irqsave(&conf->device_lock, flags);
- if (plug) {
- bio_list_add(&plug->pending, mbio);
- plug->pending_cnt++;
- } else {
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!plug)
- md_wakeup_thread(mddev->thread);
-
- if (!r10_bio->devs[i].repl_bio)
- continue;
-
- mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
- max_sectors);
- r10_bio->devs[i].repl_bio = mbio;
-
- /* We are actively writing to the original device
- * so it cannot disappear, so the replacement cannot
- * become NULL here
- */
- mbio->bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio,
- conf->mirrors[d].replacement));
- mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
- mbio->bi_end_io = raid10_end_write_request;
- mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
- mbio->bi_private = r10_bio;
-
- atomic_inc(&r10_bio->remaining);
- spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
- md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
dev = &sh->dev[i];
if (!test_bit(R5_LOCKED, &dev->flags) &&
(test_bit(R5_UPTODATE, &dev->flags) ||
- test_and_clear_bit(R5_Discard, &dev->flags))) {
+ test_bit(R5_Discard, &dev->flags))) {
/* We can return any write requests */
struct bio *wbi, *wbi2;
pr_debug("Return write for disc %d\n", i);
+ if (test_and_clear_bit(R5_Discard, &dev->flags))
+ clear_bit(R5_UPTODATE, &dev->flags);
wbi = dev->written;
dev->written = NULL;
while (wbi && wbi->bi_sector <
!test_bit(STRIPE_DEGRADED, &sh->state),
0);
}
- }
+ } else if (test_bit(R5_Discard, &sh->dev[i].flags))
+ clear_bit(R5_Discard, &sh->dev[i].flags);
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
handle_failed_sync(conf, sh, &s);
}
- /*
- * might be able to return some write requests if the parity blocks
- * are safe, or on a failed drive
- */
- pdev = &sh->dev[sh->pd_idx];
- s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
- qdev = &sh->dev[sh->qd_idx];
- s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
- || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
- || conf->level < 6;
-
- if (s.written &&
- (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
- && !test_bit(R5_LOCKED, &pdev->flags)
- && (test_bit(R5_UPTODATE, &pdev->flags) ||
- test_bit(R5_Discard, &pdev->flags))))) &&
- (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
- && !test_bit(R5_LOCKED, &qdev->flags)
- && (test_bit(R5_UPTODATE, &qdev->flags) ||
- test_bit(R5_Discard, &qdev->flags))))))
- handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
-
- /* Now we might consider reading some blocks, either to check/generate
- * parity, or to satisfy requests
- * or to load a block that is being partially written.
- */
- if (s.to_read || s.non_overwrite
- || (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks))
- || s.replacing
- || s.expanding)
- handle_stripe_fill(sh, &s, disks);
-
/* Now we check to see if any write operations have recently
* completed
*/
s.dec_preread_active = 1;
}
+ /*
+ * might be able to return some write requests if the parity blocks
+ * are safe, or on a failed drive
+ */
+ pdev = &sh->dev[sh->pd_idx];
+ s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+ qdev = &sh->dev[sh->qd_idx];
+ s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+ || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+ || conf->level < 6;
+
+ if (s.written &&
+ (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+ && !test_bit(R5_LOCKED, &pdev->flags)
+ && (test_bit(R5_UPTODATE, &pdev->flags) ||
+ test_bit(R5_Discard, &pdev->flags))))) &&
+ (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+ && !test_bit(R5_LOCKED, &qdev->flags)
+ && (test_bit(R5_UPTODATE, &qdev->flags) ||
+ test_bit(R5_Discard, &qdev->flags))))))
+ handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+
+ /* Now we might consider reading some blocks, either to check/generate
+ * parity, or to satisfy requests
+ * or to load a block that is being partially written.
+ */
+ if (s.to_read || s.non_overwrite
+ || (conf->level == 6 && s.to_write && s.failed)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
+ handle_stripe_fill(sh, &s, disks);
+
/* Now to consider new write requests and what else, if anything
* should be read. We do not handle new writes when:
* 1/ A 'write' operation (copy+xor) is already in flight.
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
+ /* Round up to power of 2, as discard handling
+ * currently assumes that */
+ while ((stripe-1) & stripe)
+ stripe = (stripe | (stripe-1)) + 1;
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
/*
*/
static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
- if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
- BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
- REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+ if (!CHIP_IS_E1x(bp)) {
+ u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+ BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+ 1 << BP_FUNC(bp));
+ }
}
}
cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
- cpw32_f(HiTxRingAddr, 0);
- cpw32_f(HiTxRingAddr + 4, 0);
-
- ring_dma = cp->ring_dma;
- cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
- ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
- cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
- cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
cp_start_hw(cp);
cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
cpw8(Config5, cpr8(Config5) & PMEStatus);
+ cpw32_f(HiTxRingAddr, 0);
+ cpw32_f(HiTxRingAddr + 4, 0);
+
+ ring_dma = cp->ring_dma;
+ cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+ ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+ cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+ cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
cpw16(MultiIntr, 0);
cpw8_f(Cfg9346, Cfg9346_Lock);
struct omap_dss_output *out;
enum omap_dss_output_id id;
- id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+ switch (module) {
+ case 0:
+ id = OMAP_DSS_OUTPUT_DSI1;
+ break;
+ case 1:
+ id = OMAP_DSS_OUTPUT_DSI2;
+ break;
+ default:
+ return NULL;
+ }
out = omap_dss_get_output(id);
- return out->pdev;
+ return out ? out->pdev : NULL;
}
static inline void dsi_write_reg(struct platform_device *dsidev,
dss.dss_clk = clk;
- clk = clk_get(NULL, dss.feat->clk_name);
- if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->clk_name);
- r = PTR_ERR(clk);
- goto err;
+ if (dss.feat->clk_name) {
+ clk = clk_get(NULL, dss.feat->clk_name);
+ if (IS_ERR(clk)) {
+ DSSERR("Failed to get %s\n", dss.feat->clk_name);
+ r = PTR_ERR(clk);
+ goto err;
+ }
+ } else {
+ clk = NULL;
}
dss.dpll4_m4_ck = clk;
if (cpu_is_omap24xx())
src = &omap24xx_dss_feats;
- else if (cpu_is_omap34xx())
- src = &omap34xx_dss_feats;
else if (cpu_is_omap3630())
src = &omap3630_dss_feats;
+ else if (cpu_is_omap34xx())
+ src = &omap34xx_dss_feats;
else if (cpu_is_omap44xx())
src = &omap44xx_dss_feats;
else if (soc_is_omap54xx())
{
mutex_lock(&hdmi.lock);
- if (hdmi_runtime_get())
+ if (hdmi_runtime_get()) {
+ mutex_unlock(&hdmi.lock);
return;
+ }
hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
case OMAPFB_WAITFORVSYNC:
DBG("ioctl WAITFORVSYNC\n");
- if (!display && !display->output && !display->output->manager) {
+ if (!display || !display->output || !display->output->manager) {
r = -EINVAL;
break;
}
u32 clkrate;
u32 rev;
u32 flags;
+ void (*set_mpu_wkup_lat)(struct device *dev, long set);
};
#endif
};
extern void xfrm_init(void);
-extern void xfrm4_init(int rt_hash_size);
+extern void xfrm4_init(void);
extern int xfrm_state_init(struct net *net);
extern void xfrm_state_fini(struct net *net);
extern void xfrm4_state_init(void);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
+ do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
fl4->flowi4_proto))
flags &= ~RTCF_LOCAL;
+ else
+ do_cache = false;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
* Yes, it is hack.
}
fnhe = NULL;
- do_cache = fi != NULL;
- if (fi) {
+ do_cache &= fi != NULL;
+ if (do_cache) {
struct rtable __rcu **prth;
struct fib_nh *nh = &FIB_RES_NH(*res);
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
- xfrm4_init(ip_rt_max_size);
+ xfrm4_init();
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
-void __init xfrm4_init(int rt_max_size)
+void __init xfrm4_init(void)
{
- /*
- * Select a default value for the gc_thresh based on the main route
- * table hash size. It seems to me the worst case scenario is when
- * we have ipsec operating in transport mode, in which we create a
- * dst_entry per socket. The xfrm gc algorithm starts trying to remove
- * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
- * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
- * That will let us store an ipsec connection per route table entry,
- * and start cleaning when were 1/2 full
- */
- xfrm4_dst_ops.gc_thresh = rt_max_size/2;
dst_entries_init(&xfrm4_dst_ops);
xfrm4_state_init();
return adtfn(set, &nip, timeout, flags);
}
+ ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
- ip = ntohl(data.ip);
+ ip_to = ip = ntohl(data.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
port_to = port = ntohs(data.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to = 0, p = 0, port, port_to;
+ u32 ip, ip_to, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
- ip = ntohl(data.ip);
+ ip_to = ip = ntohl(data.ip);
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (!cidr || cidr > 32)
return -IPSET_ERR_INVALID_CIDR;
ip_set_mask_from_to(ip, ip_to, cidr);
- } else
- ip_to = ip;
+ }
port_to = port = ntohs(data.port);
if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
- u32 ip, ip_to = 0, p = 0, port, port_to;
- u32 ip2_from = 0, ip2_to, ip2_last, ip2;
+ u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip2_from, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
u8 cidr;
return ip_set_eexist(ret, flags) ? 0 : ret;
}
+ ip_to = ip;
if (tb[IPSET_ATTR_IP_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
if (ret)
if (port > port_to)
swap(port, port_to);
}
+
+ ip2_to = ip2_from;
if (tb[IPSET_ATTR_IP2_TO]) {
ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
if (ret)
static LIST_HEAD(cttimeout_list);
static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
- [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
+ [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING,
+ .len = CTNL_TIMEOUT_NAME_MAX - 1},
[CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
[CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
[CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
}
mutex_unlock(&bus->cmd_mutex);
snd_hda_power_down(codec);
- if (res && *res == -1 && bus->rirb_error) {
+ if (!codec->in_pm && res && *res == -1 && bus->rirb_error) {
if (bus->response_reset) {
snd_printd("hda_codec: resetting BUS due to "
"fatal communication error\n");
goto again;
}
/* clear reset-flag when the communication gets recovered */
- if (!err)
+ if (!err || codec->in_pm)
bus->response_reset = 0;
return err;
}
{
unsigned int state;
+ codec->in_pm = 1;
+
if (codec->patch_ops.suspend)
codec->patch_ops.suspend(codec);
hda_cleanup_all_streams(codec);
codec->power_transition = 0;
codec->power_jiffies = jiffies;
spin_unlock(&codec->power_lock);
+ codec->in_pm = 0;
return state;
}
*/
static void hda_call_codec_resume(struct hda_codec *codec)
{
+ codec->in_pm = 1;
+
/* set as if powered on for avoiding re-entering the resume
* in the resume / power-save sequence
*/
snd_hda_codec_resume_cache(codec);
}
snd_hda_jack_report_sync(codec);
+
+ codec->in_pm = 0;
snd_hda_power_down(codec); /* flag down before returning */
}
#endif /* CONFIG_PM */
unsigned int power_on :1; /* current (global) power-state */
unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */
unsigned int pm_down_notified:1; /* PM notified to controller */
+ unsigned int in_pm:1; /* suspend/resume being performed */
int power_transition; /* power-state in transition */
int power_count; /* current (global) power refcount */
struct delayed_work power_work; /* delayed task for powerdown */
#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
+#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
+
+/* quirks for Intel PCH */
+#define AZX_DCAPS_INTEL_PCH \
+ (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
+ AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_PM_RUNTIME)
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
{
struct azx *chip = bus->private_data;
+ if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ return;
+
if (power_up)
pm_runtime_get_sync(&chip->pci->dev);
else
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
- if (!power_save_controller)
+ if (!power_save_controller ||
+ !(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return -EAGAIN;
azx_stop_chip(chip);
static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* PBG */
{ PCI_DEVICE(0x8086, 0x1d20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Panther Point */
{ PCI_DEVICE(0x8086, 0x1e20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point */
{ PCI_DEVICE(0x8086, 0x8c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point-LP */
{ PCI_DEVICE(0x8086, 0x9c20),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Lynx Point-LP */
{ PCI_DEVICE(0x8086, 0x9c21),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0c0c),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
{ PCI_DEVICE(0x8086, 0x0d0c),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
/* 5 Series/3400 */
{ PCI_DEVICE(0x8086, 0x3b56),
- .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE | AZX_DCAPS_COUNT_LPIB_DELAY },
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
memcpy(cfg->speaker_pins, cfg->line_out_pins,
sizeof(cfg->speaker_pins));
cfg->line_outs = 0;
+ memset(cfg->line_out_pins, 0, sizeof(cfg->line_out_pins));
}
return 0;
{ .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 },
{ .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
+ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
static unsigned int arizona_sysclk_48k_rates[] = {
6144000,
12288000,
- 22579200,
+ 24576000,
49152000,
73728000,
98304000,
static unsigned int arizona_sysclk_44k1_rates[] = {
5644800,
11289600,
- 24576000,
+ 22579200,
45158400,
67737600,
90316800,
gpio_nreset = cs4271plat->gpio_nreset;
if (gpio_nreset >= 0)
- if (gpio_request(gpio_nreset, "CS4271 Reset"))
+ if (devm_gpio_request(codec->dev, gpio_nreset, "CS4271 Reset"))
gpio_nreset = -EINVAL;
if (gpio_nreset >= 0) {
/* Reset codec */
static int cs4271_remove(struct snd_soc_codec *codec)
{
struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
- int gpio_nreset;
- gpio_nreset = cs4271->gpio_nreset;
-
- if (gpio_is_valid(gpio_nreset)) {
+ if (gpio_is_valid(cs4271->gpio_nreset))
/* Set codec to the reset state */
- gpio_set_value(gpio_nreset, 0);
- gpio_free(gpio_nreset);
- }
+ gpio_set_value(cs4271->gpio_nreset, 0);
return 0;
};
printk(KERN_WARNING "%s: got err interrupt 0x%lx\n",
__func__, cause);
writel(cause, priv->io + KIRKWOOD_ERR_CAUSE);
- return IRQ_HANDLED;
}
/* we've enabled only bytes interrupts ... */
}
dram = mv_mbus_dram_info();
- addr = virt_to_phys(substream->dma_buffer.area);
+ addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
prdata->play_stream = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
do {
cpu_relax();
value = readl(io + KIRKWOOD_DCO_SPCR_STATUS);
- value &= KIRKWOOD_DCO_SPCR_STATUS;
+ value &= KIRKWOOD_DCO_SPCR_STATUS_DCO_LOCK;
} while (value == 0);
}
int cmd, struct snd_soc_dai *dai)
{
struct kirkwood_dma_data *priv = snd_soc_dai_get_drvdata(dai);
- unsigned long value;
-
- /*
- * specs says KIRKWOOD_PLAYCTL must be read 2 times before
- * changing it. So read 1 time here and 1 later.
- */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
+ uint32_t ctl, value;
+
+ ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+ if (ctl & KIRKWOOD_PLAYCTL_PAUSE) {
+ unsigned timeout = 5000;
+ /*
+ * The Armada510 spec says that if we enter pause mode, the
+ * busy bit must be read back as clear _twice_. Make sure
+ * we respect that otherwise we get DMA underruns.
+ */
+ do {
+ value = ctl;
+ ctl = readl(priv->io + KIRKWOOD_PLAYCTL);
+ if (!((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY))
+ break;
+ udelay(1);
+ } while (timeout--);
+
+ if ((ctl | value) & KIRKWOOD_PLAYCTL_PLAY_BUSY)
+ dev_notice(dai->dev, "timed out waiting for busy to deassert: %08x\n",
+ ctl);
+ }
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- /* stop audio, enable interrupts */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
-
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_PLAY_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
/* configure audio & enable i2s playback */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
- value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
+ ctl &= ~KIRKWOOD_PLAYCTL_BURST_MASK;
+ ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE
| KIRKWOOD_PLAYCTL_SPDIF_EN);
if (priv->burst == 32)
- value |= KIRKWOOD_PLAYCTL_BURST_32;
+ ctl |= KIRKWOOD_PLAYCTL_BURST_32;
else
- value |= KIRKWOOD_PLAYCTL_BURST_128;
- value |= KIRKWOOD_PLAYCTL_I2S_EN;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_BURST_128;
+ ctl |= KIRKWOOD_PLAYCTL_I2S_EN;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_STOP:
/* stop audio, disable interrupts */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
value = readl(priv->io + KIRKWOOD_INT_MASK);
value &= ~KIRKWOOD_INT_CAUSE_PLAY_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
/* disable all playbacks */
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl |= KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE;
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
- writel(value, priv->io + KIRKWOOD_PLAYCTL);
+ ctl &= ~(KIRKWOOD_PLAYCTL_PAUSE | KIRKWOOD_PLAYCTL_I2S_MUTE);
+ writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
default:
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- /* stop audio, enable interrupts */
- value = readl(priv->io + KIRKWOOD_RECCTL);
- value |= KIRKWOOD_RECCTL_PAUSE;
- writel(value, priv->io + KIRKWOOD_RECCTL);
-
value = readl(priv->io + KIRKWOOD_INT_MASK);
value |= KIRKWOOD_INT_CAUSE_REC_BYTES;
writel(value, priv->io + KIRKWOOD_INT_MASK);
{
.name = "Sub",
.stream_name = "Sub",
- .cpu_dai_name = "wm5110-aif3",
+ .cpu_dai_name = "wm5102-aif3",
.codec_dai_name = "wm9081-hifi",
.codec_name = "wm9081.1-006c",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
{
.name = "Sub",
.stream_name = "Sub",
- .cpu_dai_name = "wm5102-aif3",
+ .cpu_dai_name = "wm5110-aif3",
.codec_dai_name = "wm9081-hifi",
.codec_name = "wm9081.1-006c",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
struct snd_usb_midi_out_endpoint* ep;
struct snd_rawmidi_substream *substream;
int active;
+ bool autopm_reference;
uint8_t cable; /* cable number << 4 */
uint8_t state;
#define STATE_UNKNOWN 0
return -ENXIO;
}
err = usb_autopm_get_interface(umidi->iface);
- if (err < 0)
+ port->autopm_reference = err >= 0;
+ if (err < 0 && err != -EACCES)
return -EIO;
substream->runtime->private_data = port;
port->state = STATE_UNKNOWN;
static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream)
{
struct snd_usb_midi* umidi = substream->rmidi->private_data;
+ struct usbmidi_out_port *port = substream->runtime->private_data;
substream_open(substream, 0);
- usb_autopm_put_interface(umidi->iface);
+ if (port->autopm_reference)
+ usb_autopm_put_interface(umidi->iface);
return 0;
}
return ret;
if (subs->sync_endpoint)
- ret = snd_usb_endpoint_set_params(subs->data_endpoint,
+ ret = snd_usb_endpoint_set_params(subs->sync_endpoint,
subs->pcm_format,
subs->channels,
subs->period_bytes,