diff options
author | Srinivasarao P <spathi@codeaurora.org> | 2017-12-26 19:12:47 +0530 |
---|---|---|
committer | Srinivasarao P <spathi@codeaurora.org> | 2017-12-26 19:13:02 +0530 |
commit | 00a5539a807c39d9051cf71056350b7615e4af21 (patch) | |
tree | ac47f357266b85c866f07bbc8b23a15ddfbe6105 /mm | |
parent | f373da7543ac64952bc98652646712a694b942ac (diff) | |
parent | f0b9d2d0acea78a0d77181974f94d595df58aa88 (diff) |
Merge android-4.4.101 (f0b9d2d) into msm-4.4
* refs/heads/tmp-f0b9d2d
Linux 4.4.101
mm/pagewalk.c: report holes in hugetlb ranges
mm/page_ext.c: check if page_ext is not prepared
mm: check the return value of lookup_page_ext for all call sites
coda: fix 'kernel memory exposure attempt' in fsync
mm/page_alloc.c: broken deferred calculation
ipmi: fix unsigned long underflow
ocfs2: should wait dio before inode lock in ocfs2_setattr()
nvme: Fix memory order on async queue deletion
arm64: fix dump_instr when PAN and UAO are in use
serial: omap: Fix EFR write on RTS deassertion
ima: do not update security.ima if appraisal status is not INTEGRITY_PASS
net/sctp: Always set scope_id in sctp_inet6_skb_msgname
fealnx: Fix building error on MIPS
sctp: do not peel off an assoc from one netns to another one
af_netlink: ensure that NLMSG_DONE never fails in dumps
vlan: fix a use-after-free in vlan_device_event()
bonding: discard lowest hash bit for 802.3ad layer3+4
netfilter/ipvs: clear ipvs_property flag when SKB net namespace changed
tcp: do not mangle skb->cb[] in tcp_make_synack()
Conflicts:
mm/debug-pagealloc.c
mm/page_ext.c
mm/page_owner.c
Change-Id: I551aff1b4c8a0d72f64a234abb8ac88990fbc9e5
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 27 | ||||
-rw-r--r-- | mm/page_ext.c | 4 | ||||
-rw-r--r-- | mm/pagewalk.c | 6 |
3 files changed, 23 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 915c60258935..2ea77b967709 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -286,28 +286,37 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + +/* + * Determine how many pages need to be initialized durig early boot + * (non-deferred initialization). + * The value of first_deferred_pfn will be set later, once non-deferred pages + * are initialized, but for now set it ULONG_MAX. + */ static inline void reset_deferred_meminit(pg_data_t *pgdat) { - unsigned long max_initialise; - unsigned long reserved_lowmem; + phys_addr_t start_addr, end_addr; + unsigned long max_pgcnt; + unsigned long reserved; /* * Initialise at least 2G of a node but also take into account that * two large system hashes that can take up 1GB for 0.25TB/node. */ - max_initialise = max(2UL << (30 - PAGE_SHIFT), - (pgdat->node_spanned_pages >> 8)); + max_pgcnt = max(2UL << (30 - PAGE_SHIFT), + (pgdat->node_spanned_pages >> 8)); /* * Compensate the all the memblock reservations (e.g. crash kernel) * from the initial estimation to make sure we will initialize enough * memory to boot. */ - reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, - pgdat->node_start_pfn + max_initialise); - max_initialise += reserved_lowmem; + start_addr = PFN_PHYS(pgdat->node_start_pfn); + end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt); + reserved = memblock_reserved_memory_within(start_addr, end_addr); + max_pgcnt += PHYS_PFN(reserved); - pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); + pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages); pgdat->first_deferred_pfn = ULONG_MAX; } @@ -343,7 +352,7 @@ static inline bool update_defer_init(pg_data_t *pgdat, return true; /* Initialise at least 2G of the highest zone */ (*nr_initialised)++; - if ((*nr_initialised > pgdat->static_init_size) && + if ((*nr_initialised > pgdat->static_init_pgcnt) && (pfn & (PAGES_PER_SECTION - 1)) == 0) { pgdat->first_deferred_pfn = pfn; return false; diff --git a/mm/page_ext.c b/mm/page_ext.c index 916accfec86a..f02ad1cc7d24 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -103,7 +103,6 @@ struct page_ext *lookup_page_ext(struct page *page) struct page_ext *base; base = NODE_DATA(page_to_nid(page))->node_page_ext; -#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -115,7 +114,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (unlikely(!base)) return NULL; -#endif offset = pfn - round_down(node_start_pfn(page_to_nid(page)), MAX_ORDER_NR_PAGES); return base + offset; @@ -180,7 +178,6 @@ struct page_ext *lookup_page_ext(struct page *page) { unsigned long pfn = page_to_pfn(page); struct mem_section *section = __pfn_to_section(pfn); -#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAGE_POISONING) /* * The sanity checks the page allocator does upon freeing a * page can reach here before the page_ext arrays are @@ -192,7 +189,6 @@ struct page_ext *lookup_page_ext(struct page *page) */ if (!section->page_ext) return NULL; -#endif return section->page_ext + pfn; } diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 29f2f8b853ae..c2cbd2620169 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -142,8 +142,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask); - if (pte && walk->hugetlb_entry) + + if (pte) err = walk->hugetlb_entry(pte, hmask, addr, next, walk); + else if (walk->pte_hole) + err = walk->pte_hole(addr, next, walk); + if (err) break; } while (addr = next, addr != end); |