S: D-64295
S: Germany
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
N: Andi Kleen
E: andi@firstfloor.org
U: http://www.halobates.de
F: arch/x86/include/asm/geode.h
AMD IOMMU (AMD-VI)
-M: Joerg Roedel <joerg.roedel@amd.com>
+M: Joerg Roedel <joro@8bytes.org>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S: Supported
+S: Maintained
F: drivers/iommu/amd_iommu*.[ch]
F: include/linux/amd-iommu.h
F: include/linux/sunrpc/
KERNEL VIRTUAL MACHINE (KVM)
-M: Avi Kivity <avi@redhat.com>
M: Marcelo Tosatti <mtosatti@redhat.com>
+M: Gleb Natapov <gleb@redhat.com>
L: kvm@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
interrupts = <2 7 0>;
};
+ sclpc@3c00 {
+ compatible = "fsl,mpc5200-lpbfifo";
+ reg = <0x3c00 0x60>;
+ interrupts = <2 23 0>;
+ };
+
i2c@3d00 {
#address-cells = <1>;
#size-cells = <0>;
reg = <0>;
};
};
-
- sclpc@3c00 {
- compatible = "fsl,mpc5200-lpbfifo";
- reg = <0x3c00 0x60>;
- interrupts = <3 23 0>;
- };
};
localbus {
#gpio-cells = <2>;
};
- psc@2000 { /* PSC1 in ac97 mode */
+ audioplatform: psc@2000 { /* PSC1 in ac97 mode */
compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
cell-index = <0>;
};
localbus {
status = "disabled";
};
+
+ sound {
+ compatible = "phytec,pcm030-audio-fabric";
+ asoc-platform = <&audioplatform>;
+ };
};
case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
- default:
- pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
- __func__, virq, l1irq, l2irq);
- return -EINVAL;
+ case MPC52xx_IRQ_L1_CRIT:
+ pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+ __func__, l2irq);
+ irq_set_chip(virq, &no_irq_chip);
+ return 0;
}
irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
static int intel_iommu_add_device(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_dev *bridge, *dma_pdev;
+ struct pci_dev *bridge, *dma_pdev = NULL;
struct iommu_group *group;
int ret;
dma_pdev = pci_get_domain_bus_and_slot(
pci_domain_nr(pdev->bus),
bridge->subordinate->number, 0);
- else
+ if (!dma_pdev)
dma_pdev = pci_dev_get(bridge);
} else
dma_pdev = pci_dev_get(pdev);
stats[i], val, offs);
}
seq_printf(s, "\n");
+ dput(dent);
return 0;
}
ports of 8 GPIO pins each.
config PINCTRL_SAMSUNG
- bool "Samsung pinctrl driver"
+ bool
depends on OF && GPIOLIB
select PINMUX
select PINCONF
down_write(&mm->mmap_sem);
vma = find_vma(mm, m.addr);
- ret = -EINVAL;
if (!vma ||
vma->vm_ops != &privcmd_vm_ops ||
(m.addr != vma->vm_start) ||
((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
!privcmd_enforce_singleshot_mapping(vma)) {
up_write(&mm->mmap_sem);
+ ret = -EINVAL;
goto out;
}
up_write(&mm->mmap_sem);
- if (state.global_error && (version == 1)) {
- /* Write back errors in second pass. */
- state.user_mfn = (xen_pfn_t *)m.arr;
- state.err = err_array;
- ret = traverse_pages(m.num, sizeof(xen_pfn_t),
- &pagelist, mmap_return_errors_v1, &state);
+ if (version == 1) {
+ if (state.global_error) {
+ /* Write back errors in second pass. */
+ state.user_mfn = (xen_pfn_t *)m.arr;
+ state.err = err_array;
+ ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+ &pagelist, mmap_return_errors_v1, &state);
+ } else
+ ret = 0;
+
} else if (version == 2) {
ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
if (ret)
end = start + (range->len >> sb->s_blocksize_bits) - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
- if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
- unlikely(start >= max_blks))
+ if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
+ start >= max_blks ||
+ range->len < sb->s_blocksize)
return -EINVAL;
if (end >= max_blks)
end = max_blks - 1;
BUG_ON(!th->t_trans_id);
- dquot_initialize(inode);
+ reiserfs_write_unlock(inode->i_sb);
err = dquot_alloc_inode(inode);
+ reiserfs_write_lock(inode->i_sb);
if (err)
goto out_end_trans;
if (!dir->i_nlink) {
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
+ reiserfs_write_unlock(inode->i_sb);
/* Drop can be outside and it needs more credits so it's better to have it outside */
dquot_drop(inode);
+ reiserfs_write_lock(inode->i_sb);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
- depth = reiserfs_write_lock_once(inode->i_sb);
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
-
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
error = journal_begin(&th, inode->i_sb, jbegin_count);
if (error)
goto out;
+ reiserfs_write_unlock_once(inode->i_sb, depth);
error = dquot_transfer(inode, attr);
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (error) {
journal_end(&th, inode->i_sb, jbegin_count);
goto out;
key2type(&(key->on_disk_key)));
#endif
+ reiserfs_write_unlock(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, pasted_size);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(search_path);
return retval;
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
+ reiserfs_write_unlock(inode->i_sb);
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
pathrelse(path);
return retval;
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
+ reiserfs_write_unlock(s);
dquot_initialize(inode);
+ reiserfs_write_lock(s);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
kfree(qf_names[i]);
#endif
err = -EINVAL;
- goto out_err;
+ goto out_unlock;
}
#ifdef CONFIG_QUOTA
handle_quota_files(s, qf_names, &qfmt);
if (blocks) {
err = reiserfs_resize(s, blocks);
if (err != 0)
- goto out_err;
+ goto out_unlock;
}
if (*mount_flags & MS_RDONLY) {
/* it is read-only already */
goto out_ok;
+ /*
+ * Drop write lock. Quota will retake it when needed and lock
+ * ordering requires calling dquot_suspend() without it.
+ */
+ reiserfs_write_unlock(s);
err = dquot_suspend(s, -1);
if (err < 0)
goto out_err;
+ reiserfs_write_lock(s);
/* try to remount file system with read-only permissions */
if (sb_umount_state(rs) == REISERFS_VALID_FS
err = journal_begin(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
if (reiserfs_is_journal_aborted(journal)) {
err = journal->j_errno;
- goto out_err;
+ goto out_unlock;
}
handle_data_mode(s, mount_options);
s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
err = journal_begin(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
SB_JOURNAL(s)->j_must_wait = 1;
err = journal_end(&th, s, 10);
if (err)
- goto out_err;
+ goto out_unlock;
if (!(*mount_flags & MS_RDONLY)) {
+ /*
+ * Drop write lock. Quota will retake it when needed and lock
+ * ordering requires calling dquot_resume() without it.
+ */
+ reiserfs_write_unlock(s);
dquot_resume(s, -1);
+ reiserfs_write_lock(s);
finish_unfinished(s);
reiserfs_xattr_init(s, *mount_flags);
}
reiserfs_write_unlock(s);
return 0;
+out_unlock:
+ reiserfs_write_unlock(s);
out_err:
kfree(new_opts);
- reiserfs_write_unlock(s);
return err;
}
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
+ reiserfs_write_unlock(dquot->dq_sb);
ret = dquot_commit(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(dquot->dq_sb);
return ret;
}
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
+ reiserfs_write_unlock(dquot->dq_sb);
ret = dquot_acquire(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(dquot->dq_sb);
return ret;
}
ret =
journal_begin(&th, dquot->dq_sb,
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+ reiserfs_write_unlock(dquot->dq_sb);
if (ret) {
/* Release dquot anyway to avoid endless cycle in dqput() */
dquot_release(dquot);
goto out;
}
ret = dquot_release(dquot);
+ reiserfs_write_lock(dquot->dq_sb);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
if (!ret && err)
ret = err;
- out:
reiserfs_write_unlock(dquot->dq_sb);
+out:
return ret;
}
ret = journal_begin(&th, sb, 2);
if (ret)
goto out;
+ reiserfs_write_unlock(sb);
ret = dquot_commit_info(sb, type);
+ reiserfs_write_lock(sb);
err = journal_end(&th, sb, 2);
if (!ret && err)
ret = err;
- out:
+out:
reiserfs_write_unlock(sb);
return ret;
}
struct reiserfs_transaction_handle th;
int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
- return -EINVAL;
+ reiserfs_write_lock(sb);
+ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) {
+ err = -EINVAL;
+ goto out;
+ }
/* Quotafile not on the same filesystem? */
if (path->dentry->d_sb != sb) {
if (err)
goto out;
}
- err = dquot_quota_on(sb, type, format_id, path);
+ reiserfs_write_unlock(sb);
+ return dquot_quota_on(sb, type, format_id, path);
out:
+ reiserfs_write_unlock(sb);
return err;
}
tocopy = sb->s_blocksize - offset < towrite ?
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
+ reiserfs_write_lock(sb);
err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+ reiserfs_write_unlock(sb);
if (err)
goto out;
if (offset || tocopy != sb->s_blocksize)
flush_dcache_page(bh->b_page);
set_buffer_uptodate(bh);
unlock_buffer(bh);
+ reiserfs_write_lock(sb);
reiserfs_prepare_for_journal(sb, bh, 1);
journal_mark_dirty(current->journal_info, sb, bh);
if (!journal_quota)
reiserfs_add_ordered_list(inode, bh);
+ reiserfs_write_unlock(sb);
brelse(bh);
offset = 0;
towrite -= tocopy;
if ($l == 0x1) {
$len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1));
- } elsif ($l = 0x2) {
+ } elsif ($l == 0x2) {
$len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2));
- } elsif ($l = 0x3) {
+ } elsif ($l == 0x3) {
$len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16;
$len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2));
- } elsif ($l = 0x4) {
+ } elsif ($l == 0x4) {
$len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4));
} else {
die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n";
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
- rcu_dereference(sel_netnode_hash[idx].list.prev),
+ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+ lockdep_is_held(&sel_netnode_lock)),
struct sel_netnode, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);