diff options
author | Trilok Soni <tsoni@codeaurora.org> | 2016-08-25 19:05:37 -0700 |
---|---|---|
committer | Trilok Soni <tsoni@codeaurora.org> | 2016-08-26 14:34:05 -0700 |
commit | 5ab1e18aa3913d454e1bd1498b20ee581aae2c6b (patch) | |
tree | 42bd10ef0bf5cdb8deb05656bf802c77dc580ff7 /mm | |
parent | e97b6a0e0217f7c072fdad6c50673cd7a64348e1 (diff) |
Revert "Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4"
This reverts commit 9d6fd2c3e9fcfb ("Merge remote-tracking branch
'msm-4.4/tmp-510d0a3f' into msm-4.4"), because it breaks the
dump parsing tools due to kernel can be loaded anywhere in the memory
now and not fixed at linear mapping.
Change-Id: Id416f0a249d803442847d09ac47781147b0d0ee6
Signed-off-by: Trilok Soni <tsoni@codeaurora.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 10 | ||||
-rw-r--r-- | mm/huge_memory.c | 6 | ||||
-rw-r--r-- | mm/memcontrol.c | 36 | ||||
-rw-r--r-- | mm/memory.c | 40 | ||||
-rw-r--r-- | mm/migrate.c | 8 | ||||
-rw-r--r-- | mm/page-writeback.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 22 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/zsmalloc.c | 7 | ||||
-rw-r--r-- | mm/zswap.c | 8 |
11 files changed, 49 insertions, 98 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 7f9e60489d67..e0d4a58bcee4 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -905,8 +905,16 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, ISOLATE_UNEVICTABLE); - if (!pfn) + /* + * In case of fatal failure, release everything that might + * have been isolated in the previous iteration, and signal + * the failure back to caller. + */ + if (!pfn) { + putback_movable_pages(&cc->migratepages); + cc->nr_migratepages = 0; break; + } if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) break; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 530e6427f823..62fe06bb7d04 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2134,9 +2134,10 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma, * page fault if needed. */ return 0; - if (vma->vm_ops || (vm_flags & VM_NO_THP)) + if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; + VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -2497,7 +2498,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - return !(vma->vm_flags & VM_NO_THP); + VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); + return true; } static void collapse_huge_page(struct mm_struct *mm, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d1f6dc5a715d..7535ef32a75b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -196,7 +196,6 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); /* "mc" and its members are protected by cgroup_mutex */ static struct move_charge_struct { spinlock_t lock; /* for from, to */ - struct mm_struct *mm; struct mem_cgroup *from; struct mem_cgroup *to; unsigned long flags; @@ -4801,8 +4800,6 @@ static void __mem_cgroup_clear_mc(void) static void mem_cgroup_clear_mc(void) { - struct mm_struct *mm = mc.mm; - /* * we must clear moving_task before waking up waiters at the end of * task migration. @@ -4812,10 +4809,7 @@ static void mem_cgroup_clear_mc(void) spin_lock(&mc.lock); mc.from = NULL; mc.to = NULL; - mc.mm = NULL; spin_unlock(&mc.lock); - - mmput(mm); } static int mem_cgroup_can_attach(struct cgroup_taskset *tset) @@ -4872,7 +4866,6 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) VM_BUG_ON(mc.moved_swap); spin_lock(&mc.lock); - mc.mm = mm; mc.from = from; mc.to = memcg; mc.flags = move_flags; @@ -4882,9 +4875,8 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) ret = mem_cgroup_precharge_mc(mm); if (ret) mem_cgroup_clear_mc(); - } else { - mmput(mm); } + mmput(mm); return ret; } @@ -4998,11 +4990,11 @@ put: /* get_mctgt_type() gets the page */ return ret; } -static void mem_cgroup_move_charge(void) +static void mem_cgroup_move_charge(struct mm_struct *mm) { struct mm_walk mem_cgroup_move_charge_walk = { .pmd_entry = mem_cgroup_move_charge_pte_range, - .mm = mc.mm, + .mm = mm, }; lru_add_drain_all(); @@ -5014,7 +5006,7 @@ static void mem_cgroup_move_charge(void) atomic_inc(&mc.from->moving_account); synchronize_rcu(); retry: - if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) { + if (unlikely(!down_read_trylock(&mm->mmap_sem))) { /* * Someone who are holding the mmap_sem might be waiting in * waitq. So we cancel all extra charges, wake up all waiters, @@ -5031,16 +5023,23 @@ retry: * additional charge, the page walk just aborts. */ walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk); - up_read(&mc.mm->mmap_sem); + up_read(&mm->mmap_sem); atomic_dec(&mc.from->moving_account); } -static void mem_cgroup_move_task(void) +static void mem_cgroup_move_task(struct cgroup_taskset *tset) { - if (mc.to) { - mem_cgroup_move_charge(); - mem_cgroup_clear_mc(); + struct cgroup_subsys_state *css; + struct task_struct *p = cgroup_taskset_first(tset, &css); + struct mm_struct *mm = get_task_mm(p); + + if (mm) { + if (mc.to) + mem_cgroup_move_charge(mm); + mmput(mm); } + if (mc.to) + mem_cgroup_clear_mc(); } #else /* !CONFIG_MMU */ static int mem_cgroup_can_attach(struct cgroup_taskset *tset) @@ -5054,7 +5053,7 @@ static int mem_cgroup_allow_attach(struct cgroup_taskset *tset) static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { } -static void mem_cgroup_move_task(void) +static void mem_cgroup_move_task(struct cgroup_taskset *tset) { } #endif @@ -5270,7 +5269,6 @@ struct cgroup_subsys memory_cgrp_subsys = { .cancel_attach = mem_cgroup_cancel_attach, .attach = mem_cgroup_move_task, .allow_attach = mem_cgroup_allow_attach, - .post_attach = mem_cgroup_move_task, .bind = mem_cgroup_bind, .dfl_cftypes = memory_files, .legacy_cftypes = mem_cgroup_legacy_files, diff --git a/mm/memory.c b/mm/memory.c index b536e3d60fc7..6098837a4e5e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -797,46 +797,6 @@ out: return pfn_to_page(pfn); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t pmd) -{ - unsigned long pfn = pmd_pfn(pmd); - - /* - * There is no pmd_special() but there may be special pmds, e.g. - * in a direct-access (dax) mapping, so let's just replicate the - * !HAVE_PTE_SPECIAL case from vm_normal_page() here. - */ - if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) { - if (vma->vm_flags & VM_MIXEDMAP) { - if (!pfn_valid(pfn)) - return NULL; - goto out; - } else { - unsigned long off; - off = (addr - vma->vm_start) >> PAGE_SHIFT; - if (pfn == vma->vm_pgoff + off) - return NULL; - if (!is_cow_mapping(vma->vm_flags)) - return NULL; - } - } - - if (is_zero_pfn(pfn)) - return NULL; - if (unlikely(pfn > highest_memmap_pfn)) - return NULL; - - /* - * NOTE! We still have PageReserved() pages in the page tables. - * eg. VDSO mappings can cause them to exist. - */ -out: - return pfn_to_page(pfn); -} -#endif - /* * copy one vm_area from one task to the other. Assumes the page tables * already present in the new task to be cleared in the whole range diff --git a/mm/migrate.c b/mm/migrate.c index 3db1b0277eb4..cd1e63062459 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -963,13 +963,7 @@ out: dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); /* Soft-offlined page shouldn't go through lru cache list */ - if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) { - /* - * With this release, we free successfully migrated - * page and set PG_HWPoison on just freed page - * intentionally. Although it's rather weird, it's how - * HWPoison flag works at the moment. - */ + if (reason == MR_MEMORY_FAILURE) { put_page(page); if (!test_set_page_hwpoison(page)) num_poisoned_pages_inc(); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 8bf8e06a56a6..112c0bebfff3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1899,8 +1899,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (gdtc->dirty > gdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > - wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) + if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) return true; if (mdtc) { @@ -1914,8 +1913,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (mdtc->dirty > mdtc->bg_thresh) return true; - if (wb_stat(wb, WB_RECLAIMABLE) > - wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) + if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) return true; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c8a31783c2d6..ffcb2b56f6c1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6294,7 +6294,7 @@ int __meminit init_per_zone_wmark_min(void) setup_per_zone_inactive_ratio(); return 0; } -core_initcall(init_per_zone_wmark_min) +module_init(init_per_zone_wmark_min) /* * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so diff --git a/mm/slub.c b/mm/slub.c index fdc0721ebc31..2d5bbea0f0e8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2832,7 +2832,6 @@ struct detached_freelist { void *tail; void *freelist; int cnt; - struct kmem_cache *s; }; /* @@ -2847,9 +2846,8 @@ struct detached_freelist { * synchronization primitive. Look ahead in the array is limited due * to performance reasons. */ -static inline -int build_detached_freelist(struct kmem_cache *s, size_t size, - void **p, struct detached_freelist *df) +static int build_detached_freelist(struct kmem_cache *s, size_t size, + void **p, struct detached_freelist *df) { size_t first_skipped_index = 0; int lookahead = 3; @@ -2865,11 +2863,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, if (!object) return 0; - /* Support for memcg, compiler can optimize this out */ - df->s = cache_from_obj(s, object); - /* Start new detached freelist */ - set_freepointer(df->s, object, NULL); + set_freepointer(s, object, NULL); df->page = virt_to_head_page(object); df->tail = object; df->freelist = object; @@ -2884,7 +2879,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, /* df->page is always set at this point */ if (df->page == virt_to_head_page(object)) { /* Opportunity build freelist */ - set_freepointer(df->s, object, df->freelist); + set_freepointer(s, object, df->freelist); df->freelist = object; df->cnt++; p[size] = NULL; /* mark object processed */ @@ -2903,20 +2898,25 @@ int build_detached_freelist(struct kmem_cache *s, size_t size, return first_skipped_index; } + /* Note that interrupts must be enabled when calling this function. */ -void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) +void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) { if (WARN_ON(!size)) return; do { struct detached_freelist df; + struct kmem_cache *s; + + /* Support for memcg */ + s = cache_from_obj(orig_s, p[size - 1]); size = build_detached_freelist(s, size, p, &df); if (unlikely(!df.page)) continue; - slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_); + slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_); } while (likely(size)); } EXPORT_SYMBOL(kmem_cache_free_bulk); diff --git a/mm/vmscan.c b/mm/vmscan.c index d5c3ef60a71e..73f5cec91063 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2683,7 +2683,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) sc->gfp_mask |= __GFP_HIGHMEM; for_each_zone_zonelist_nodemask(zone, z, zonelist, - gfp_zone(sc->gfp_mask), sc->nodemask) { + requested_highidx, sc->nodemask) { enum zone_type classzone_idx; if (!populated_zone(zone)) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c1ea19478119..fc083996e40a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1732,13 +1732,10 @@ static struct page *isolate_source_page(struct size_class *class) static unsigned long zs_can_compact(struct size_class *class) { unsigned long obj_wasted; - unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); - unsigned long obj_used = zs_stat_get(class, OBJ_USED); - if (obj_allocated <= obj_used) - return 0; + obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) - + zs_stat_get(class, OBJ_USED); - obj_wasted = obj_allocated - obj_used; obj_wasted /= get_maxobj_per_zspage(class->size, class->pages_per_zspage); diff --git a/mm/zswap.c b/mm/zswap.c index 340261946fda..bf14508afd64 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -170,8 +170,6 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; static LIST_HEAD(zswap_pools); /* protects zswap_pools list modification */ static DEFINE_SPINLOCK(zswap_pools_lock); -/* pool counter to provide unique names to zpool */ -static atomic_t zswap_pools_count = ATOMIC_INIT(0); /* used by param callback function */ static bool zswap_init_started; @@ -567,7 +565,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) static struct zswap_pool *zswap_pool_create(char *type, char *compressor) { struct zswap_pool *pool; - char name[38]; /* 'zswap' + 32 char (max) num + \0 */ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; pool = kzalloc(sizeof(*pool), GFP_KERNEL); @@ -576,10 +573,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) return NULL; } - /* unique name for each pool specifically required by zsmalloc */ - snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); - - pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); + pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); if (!pool->zpool) { pr_err("%s zpool not available\n", type); goto error; |