summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMichael Bestas <mkbestas@lineageos.org>2021-03-17 21:27:13 +0200
committerMichael Bestas <mkbestas@lineageos.org>2021-03-17 21:27:13 +0200
commit183f2051d299eb7b35d8e37d01be9077dd809839 (patch)
tree4937226d67edb7c827ce7a360d1f83ff07dd36e9 /mm
parentc66c1e4925165243b33081fd609c4a8a7d35e2fc (diff)
parent58bc8e0469d0808754b42e423934da639c07b6ba (diff)
Merge branch 'android-4.4-p' of https://android.googlesource.com/kernel/common into lineage-18.1-caf-msm8998
This brings LA.UM.9.2.r1-02700-SDMxx0.0 up to date with https://android.googlesource.com/kernel/common/ android-4.4-p at commit: 58bc8e0469d08 Merge 4.4.261 into android-4.4-p Conflicts: drivers/block/zram/zram_drv.c drivers/block/zram/zram_drv.h mm/zsmalloc.c Change-Id: I451bffa685eaaea04938bc6d0b8e3f4bb0f869e9
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c71
-rw-r--r--mm/memory.c6
-rw-r--r--mm/page_io.c11
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/zsmalloc.c17
5 files changed, 76 insertions, 31 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index dc877712ef1f..e933cae307bf 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -66,6 +66,21 @@ DEFINE_SPINLOCK(hugetlb_lock);
static int num_fault_mutexes;
struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
+static inline bool PageHugeFreed(struct page *head)
+{
+ return page_private(head + 4) == -1UL;
+}
+
+static inline void SetPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, -1UL);
+}
+
+static inline void ClearPageHugeFreed(struct page *head)
+{
+ set_page_private(head + 4, 0);
+}
+
/* Forward declaration */
static int hugetlb_acct_memory(struct hstate *h, long delta);
@@ -841,6 +856,7 @@ static void enqueue_huge_page(struct hstate *h, struct page *page)
list_move(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
+ SetPageHugeFreed(page);
}
static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
@@ -858,6 +874,7 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
return NULL;
list_move(&page->lru, &h->hugepage_activelist);
set_page_refcounted(page);
+ ClearPageHugeFreed(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
@@ -1142,14 +1159,16 @@ static inline int alloc_fresh_gigantic_page(struct hstate *h,
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
+ struct page *subpage = page;
if (hstate_is_gigantic(h) && !gigantic_page_supported())
return;
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
- for (i = 0; i < pages_per_huge_page(h); i++) {
- page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
+ for (i = 0; i < pages_per_huge_page(h);
+ i++, subpage = mem_map_next(subpage, page, i)) {
+ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_private |
1 << PG_writeback);
@@ -1266,6 +1285,7 @@ static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
set_hugetlb_cgroup(page, NULL);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
+ ClearPageHugeFreed(page);
spin_unlock(&hugetlb_lock);
put_page(page); /* free it into the hugepage allocator */
}
@@ -1424,11 +1444,32 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
*/
static void dissolve_free_huge_page(struct page *page)
{
+retry:
spin_lock(&hugetlb_lock);
if (PageHuge(page) && !page_count(page)) {
struct page *head = compound_head(page);
struct hstate *h = page_hstate(head);
int nid = page_to_nid(head);
+
+ /*
+ * We should make sure that the page is already on the free list
+ * when it is dissolved.
+ */
+ if (unlikely(!PageHugeFreed(head))) {
+ spin_unlock(&hugetlb_lock);
+ cond_resched();
+
+ /*
+ * Theoretically, we should return -EBUSY when we
+ * encounter this race. In fact, we have a chance
+ * to successfully dissolve the page if we do a
+ * retry. Because the race window is quite small.
+ * If we seize this opportunity, it is an optimization
+ * for increasing the success rate of dissolving page.
+ */
+ goto retry;
+ }
+
list_del(&head->lru);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
@@ -2485,8 +2526,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
- if (retval)
+ if (retval) {
kobject_put(hstate_kobjs[hi]);
+ hstate_kobjs[hi] = NULL;
+ }
return retval;
}
@@ -4279,21 +4322,23 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end)
{
- unsigned long a_start, a_end;
+ unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
+ v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
- if (!(vma->vm_flags & VM_MAYSHARE))
+ /*
+ * vma need span at least one aligned PUD size and the start,end range
+ * must at least partialy within it.
+ */
+ if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
+ (*end <= v_start) || (*start >= v_end))
return;
/* Extend the range to be PUD aligned for a worst case scenario */
- a_start = ALIGN_DOWN(*start, PUD_SIZE);
- a_end = ALIGN(*end, PUD_SIZE);
+ if (*start > v_start)
+ *start = ALIGN_DOWN(*start, PUD_SIZE);
- /*
- * Intersect the range with the vma range, since pmd sharing won't be
- * across vma after all
- */
- *start = max(vma->vm_start, a_start);
- *end = min(vma->vm_end, a_end);
+ if (*end < v_end)
+ *end = ALIGN(*end, PUD_SIZE);
}
/*
diff --git a/mm/memory.c b/mm/memory.c
index 5dfc9fac8b74..8e41c675f7af 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1686,11 +1686,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
- pte_t *pte;
+ pte_t *pte, *mapped_pte;
spinlock_t *ptl;
int err = 0;
- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
arch_enter_lazy_mmu_mode();
@@ -1704,7 +1704,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
- pte_unmap_unlock(pte - 1, ptl);
+ pte_unmap_unlock(mapped_pte, ptl);
return err;
}
diff --git a/mm/page_io.c b/mm/page_io.c
index b995a5ba5e8f..ab92cd559404 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -32,7 +32,6 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio = bio_alloc(gfp_flags, 1);
if (bio) {
bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
- bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_end_io = end_io;
bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -244,11 +243,6 @@ out:
return ret;
}
-static sector_t swap_page_sector(struct page *page)
-{
- return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9);
-}
-
int __swap_writepage(struct page *page, struct writeback_control *wbc,
bio_end_io_t end_write_func)
{
@@ -297,7 +291,8 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
return ret;
}
- ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
+ ret = bdev_write_page(sis->bdev, map_swap_page(page, &sis->bdev),
+ page, wbc);
if (!ret) {
count_vm_event(PSWPOUT);
return 0;
@@ -345,7 +340,7 @@ int swap_readpage(struct page *page)
return ret;
}
- ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
+ ret = bdev_read_page(sis->bdev, map_swap_page(page, &sis->bdev), page);
if (!ret) {
count_vm_event(PSWPIN);
return 0;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index feb9abb8e8cc..204224d8e0d1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1710,7 +1710,7 @@ sector_t map_swap_page(struct page *page, struct block_device **bdev)
{
swp_entry_t entry;
entry.val = page_private(page);
- return map_swap_entry(entry, bdev);
+ return map_swap_entry(entry, bdev) << (PAGE_SHIFT - 9);
}
/*
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 3f1b584bd5d0..174c08cf5270 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -2284,11 +2284,13 @@ static unsigned long zs_can_compact(struct size_class *class)
return obj_wasted * class->pages_per_zspage;
}
-static void __zs_compact(struct zs_pool *pool, struct size_class *class)
+static unsigned long __zs_compact(struct zs_pool *pool,
+ struct size_class *class)
{
struct zs_compact_control cc;
struct zspage *src_zspage;
struct zspage *dst_zspage = NULL;
+ unsigned long pages_freed = 0;
spin_lock(&class->lock);
while ((src_zspage = isolate_zspage(class, true))) {
@@ -2318,7 +2320,7 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, dst_zspage);
if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
free_zspage(pool, class, src_zspage);
- pool->stats.pages_compacted += class->pages_per_zspage;
+ pages_freed += class->pages_per_zspage;
}
spin_unlock(&class->lock);
cond_resched();
@@ -2329,12 +2331,15 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
+
+ return pages_freed;
}
unsigned long zs_compact(struct zs_pool *pool)
{
int i;
struct size_class *class;
+ unsigned long pages_freed = 0;
for (i = zs_size_classes - 1; i >= 0; i--) {
class = pool->size_class[i];
@@ -2342,10 +2347,11 @@ unsigned long zs_compact(struct zs_pool *pool)
continue;
if (class->index != i)
continue;
- __zs_compact(pool, class);
+ pages_freed += __zs_compact(pool, class);
}
+ atomic_long_add(pages_freed, &pool->stats.pages_compacted);
- return pool->stats.pages_compacted;
+ return pages_freed;
}
EXPORT_SYMBOL_GPL(zs_compact);
@@ -2362,13 +2368,12 @@ static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
struct zs_pool *pool = container_of(shrinker, struct zs_pool,
shrinker);
- pages_freed = pool->stats.pages_compacted;
/*
* Compact classes and calculate compaction delta.
* Can run concurrently with a manually triggered
* (by user) compaction.
*/
- pages_freed = zs_compact(pool) - pages_freed;
+ pages_freed = zs_compact(pool);
return pages_freed ? pages_freed : SHRINK_STOP;
}