summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@google.com>2018-08-15 18:23:58 +0200
committerGreg Kroah-Hartman <gregkh@google.com>2018-08-15 18:23:58 +0200
commitff21484341cce8ddfebd43d90c2efa609077fc34 (patch)
treec506157c07c89b103598bbc164ee6b9168af9a5f /fs
parent5f8dcf617235a9a30f8ce3e80de4c0379187ad6c (diff)
parent30a97c1e2dc39f45d9deeeccc2733278fc285d5e (diff)
Merge 4.4.148 into android-4.4-p
Changes in 4.4.148 ext4: fix check to prevent initializing reserved inodes tpm: fix race condition in tpm_common_write() ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV fork: unconditionally clear stack on fork parisc: Enable CONFIG_MLONGCALLS by default parisc: Define mb() and add memory barriers to assembler unlock sequences xen/netfront: don't cache skb_shinfo() ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled root dentries need RCU-delayed freeing fix mntput/mntput race fix __legitimize_mnt()/mntput() race IB/core: Make testing MR flags for writability a static inline function IB/mlx4: Mark user MR as writable if actual virtual memory is writable IB/ocrdma: fix out of bounds access to local buffer ARM: dts: imx6sx: fix irq for pcie bridge x86/paravirt: Fix spectre-v2 mitigations for paravirt guests x86/speculation: Protect against userspace-userspace spectreRSB kprobes/x86: Fix %p uses in error messages x86/irqflags: Provide a declaration for native_save_fl x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT x86/mm: Move swap offset/type up in PTE to work around erratum x86/mm: Fix swap entry comment and macro mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/speculation/l1tf: Change order of offset/type in swap entry x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Add sysfs reporting for l1tf mm: Add vm_insert_pfn_prot() mm: fix cache mode tracking in vm_insert_mixed() x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/bugs: Move the l1tf function and define pr_fmt properly x86/speculation/l1tf: Extend 64bit swap file size limit x86/cpufeatures: Add detection of L1D cache flush support. x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/speculation/l1tf: Fix up pte->pfn conversion for PAE x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/mm/pat: Make set_memory_np() L1TF safe x86/mm/kmmio: Make the tracer robust against L1TF x86/speculation/l1tf: Fix up CPU feature flags x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures Linux 4.4.148 Change-Id: Id593840e382389d43e5e54f9d1cfa1d679d8d8be Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/dcache.c6
-rw-r--r--fs/ext4/ialloc.c5
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/namespace.c28
4 files changed, 35 insertions, 12 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index a69e9b42814a..0f3937dd29f6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1954,10 +1954,12 @@ struct dentry *d_make_root(struct inode *root_inode)
static const struct qstr name = QSTR_INIT("/", 1);
res = __d_alloc(root_inode->i_sb, &name);
- if (res)
+ if (res) {
+ res->d_flags |= DCACHE_RCUACCESS;
d_instantiate(res, root_inode);
- else
+ } else {
iput(root_inode);
+ }
}
return res;
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 041117fd8fd7..0963213e9cd3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1308,7 +1308,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp)) <
+ EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a26b71d04ab1..71c9104a7779 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2874,14 +2874,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
if (!gdp)
continue;
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
- continue;
- if (group != 0)
+ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
break;
- ext4_error(sb, "Inode table for bg 0 marked as "
- "needing zeroing");
- if (sb->s_flags & MS_RDONLY)
- return ngroups;
}
return group;
diff --git a/fs/namespace.c b/fs/namespace.c
index ce282400d13e..996e1046e1ff 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -605,12 +605,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
+ smp_mb(); // see mntput_no_expire()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
+ lock_mount_hash();
+ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ mnt_add_count(mnt, -1);
+ unlock_mount_hash();
+ return 1;
+ }
+ unlock_mount_hash();
+ /* caller will mntput() */
return -1;
}
@@ -1142,12 +1151,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
- mnt_add_count(mnt, -1);
- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
+ /*
+ * Since we don't do lock_mount_hash() here,
+ * ->mnt_ns can change under us. However, if it's
+ * non-NULL, then there's a reference that won't
+ * be dropped until after an RCU delay done after
+ * turning ->mnt_ns NULL. So if we observe it
+ * non-NULL under rcu_read_lock(), the reference
+ * we are dropping is not the final one.
+ */
+ mnt_add_count(mnt, -1);
rcu_read_unlock();
return;
}
lock_mount_hash();
+ /*
+ * make sure that if __legitimize_mnt() has not seen us grab
+ * mount_lock, we'll see their refcount increment here.
+ */
+ smp_mb();
+ mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
rcu_read_unlock();
unlock_mount_hash();