diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-04-04 00:33:49 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2014-04-04 00:33:49 +0100 |
commit | bce5669be3a8946952258a064ef26defeb887138 (patch) | |
tree | 117386b9909882c000f822011c5ea6fdcbab3273 /mm | |
parent | 95959e6a06720834fc80a210e37898341c63cb91 (diff) | |
parent | 566b60c04ab230b8cc3845f964306f99504b18df (diff) |
Merge branch 'devel-stable' into for-next
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 18 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/memory-failure.c | 6 | ||||
-rw-r--r-- | mm/memory.c | 15 | ||||
-rw-r--r-- | mm/mprotect.c | 25 | ||||
-rw-r--r-- | mm/slub.c | 38 | ||||
-rw-r--r-- | mm/vmpressure.c | 1 |
7 files changed, 50 insertions, 59 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 82166bf974e1..4df39b1bde91 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1166,8 +1166,10 @@ alloc: } else { ret = do_huge_pmd_wp_page_fallback(mm, vma, address, pmd, orig_pmd, page, haddr); - if (ret & VM_FAULT_OOM) + if (ret & VM_FAULT_OOM) { split_huge_page(page); + ret |= VM_FAULT_FALLBACK; + } put_page(page); } count_vm_event(THP_FAULT_FALLBACK); @@ -1179,9 +1181,10 @@ alloc: if (page) { split_huge_page(page); put_page(page); - } + } else + split_huge_page_pmd(vma, address, pmd); + ret |= VM_FAULT_FALLBACK; count_vm_event(THP_FAULT_FALLBACK); - ret |= VM_FAULT_OOM; goto out; } @@ -1545,6 +1548,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, newprot); ret = HPAGE_PMD_NR; + set_pmd_at(mm, addr, pmd, entry); BUG_ON(pmd_write(entry)); } else { struct page *page = pmd_page(*pmd); @@ -1557,16 +1561,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, */ if (!is_huge_zero_page(page) && !pmd_numa(*pmd)) { - entry = *pmd; - entry = pmd_mknuma(entry); + pmdp_set_numa(mm, addr, pmd); ret = HPAGE_PMD_NR; } } - - /* Set PMD if cleared earlier */ - if (ret == HPAGE_PMD_NR) - set_pmd_at(mm, addr, pmd, entry); - spin_unlock(ptl); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 53385cd4e6f0..ce7a8cc7b404 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1687,7 +1687,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) * protects memcg_name and makes sure that parallel ooms do not * interleave */ - static DEFINE_SPINLOCK(oom_info_lock); + static DEFINE_MUTEX(oom_info_lock); struct cgroup *task_cgrp; struct cgroup *mem_cgrp; static char memcg_name[PATH_MAX]; @@ -1698,7 +1698,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) if (!p) return; - spin_lock(&oom_info_lock); + mutex_lock(&oom_info_lock); rcu_read_lock(); mem_cgrp = memcg->css.cgroup; @@ -1767,7 +1767,7 @@ done: pr_cont("\n"); } - spin_unlock(&oom_info_lock); + mutex_unlock(&oom_info_lock); } /* diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4f08a2d61487..2f2f34a4e77d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, * to it. Similarly, page lock is shifted. */ if (hpage != p) { - put_page(hpage); - get_page(p); + if (!(flags & MF_COUNT_INCREASED)) { + put_page(hpage); + get_page(p); + } lock_page(p); unlock_page(hpage); *hpagep = p; diff --git a/mm/memory.c b/mm/memory.c index be6a0c0d4ae0..22dfa617bddb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3348,6 +3348,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (ret & VM_FAULT_LOCKED) unlock_page(vmf.page); ret = VM_FAULT_HWPOISON; + page_cache_release(vmf.page); goto uncharge_out; } @@ -3703,7 +3704,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); -retry: pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) @@ -3741,20 +3741,13 @@ retry: if (dirty && !pmd_write(orig_pmd)) { ret = do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd); - /* - * If COW results in an oom, the huge pmd will - * have been split, so retry the fault on the - * pte for a smaller charge. - */ - if (unlikely(ret & VM_FAULT_OOM)) - goto retry; - return ret; + if (!(ret & VM_FAULT_FALLBACK)) + return ret; } else { huge_pmd_set_accessed(mm, vma, address, pmd, orig_pmd, dirty); + return 0; } - - return 0; } } diff --git a/mm/mprotect.c b/mm/mprotect.c index 7332c1785744..769a67a15803 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (pte_numa(ptent)) ptent = pte_mknonnuma(ptent); ptent = pte_modify(ptent, newprot); + /* + * Avoid taking write faults for pages we + * know to be dirty. + */ + if (dirty_accountable && pte_dirty(ptent)) + ptent = pte_mkwrite(ptent); + ptep_modify_prot_commit(mm, addr, pte, ptent); updated = true; } else { struct page *page; - ptent = *pte; page = vm_normal_page(vma, addr, oldpte); if (page && !PageKsm(page)) { if (!pte_numa(oldpte)) { - ptent = pte_mknuma(ptent); - set_pte_at(mm, addr, pte, ptent); + ptep_set_numa(mm, addr, pte); updated = true; } } } - - /* - * Avoid taking write faults for pages we know to be - * dirty. - */ - if (dirty_accountable && pte_dirty(ptent)) { - ptent = pte_mkwrite(ptent); - updated = true; - } - if (updated) pages++; - - /* Only !prot_numa always clears the pte */ - if (!prot_numa) - ptep_modify_prot_commit(mm, addr, pte, ptent); } else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) { swp_entry_t entry = pte_to_swp_entry(oldpte); diff --git a/mm/slub.c b/mm/slub.c index 7e3e0458bce4..25f14ad8f817 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x) static void add_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { - lockdep_assert_held(&n->list_lock); - if (!(s->flags & SLAB_STORE_USER)) return; + lockdep_assert_held(&n->list_lock); list_add(&page->lru, &n->full); } static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page) { - lockdep_assert_held(&n->list_lock); - if (!(s->flags & SLAB_STORE_USER)) return; + lockdep_assert_held(&n->list_lock); list_del(&page->lru); } @@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page) /* * Management of partially allocated slabs. */ -static inline void add_partial(struct kmem_cache_node *n, - struct page *page, int tail) +static inline void +__add_partial(struct kmem_cache_node *n, struct page *page, int tail) { - lockdep_assert_held(&n->list_lock); - n->nr_partial++; if (tail == DEACTIVATE_TO_TAIL) list_add_tail(&page->lru, &n->partial); @@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n, list_add(&page->lru, &n->partial); } -static inline void remove_partial(struct kmem_cache_node *n, - struct page *page) +static inline void add_partial(struct kmem_cache_node *n, + struct page *page, int tail) { lockdep_assert_held(&n->list_lock); + __add_partial(n, page, tail); +} +static inline void +__remove_partial(struct kmem_cache_node *n, struct page *page) +{ list_del(&page->lru); n->nr_partial--; } +static inline void remove_partial(struct kmem_cache_node *n, + struct page *page) +{ + lockdep_assert_held(&n->list_lock); + __remove_partial(n, page); +} + /* * Remove slab from the partial list, freeze it and * return the pointer to the freelist. @@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node) inc_slabs_node(kmem_cache_node, node, page->objects); /* - * the lock is for lockdep's sake, not for any actual - * race protection + * No locks need to be taken here as it has just been + * initialized and there is no concurrent access. */ - spin_lock(&n->list_lock); - add_partial(n, page, DEACTIVATE_TO_HEAD); - spin_unlock(&n->list_lock); + __add_partial(n, page, DEACTIVATE_TO_HEAD); } static void free_kmem_cache_nodes(struct kmem_cache *s) @@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { - remove_partial(n, page); + __remove_partial(n, page); discard_slab(s, page); } else { list_slab_objects(s, page, diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 196970a4541f..d4042e75f7c7 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -19,6 +19,7 @@ #include <linux/mm.h> #include <linux/vmstat.h> #include <linux/eventfd.h> +#include <linux/slab.h> #include <linux/swap.h> #include <linux/printk.h> #include <linux/vmpressure.h> |