Lines Matching refs:zone

145 	struct zone *zone;  member
582 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
590 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
591 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
592 sp = zone->spanned_pages; in page_outside_zone_boundaries()
593 if (!zone_spans_pfn(zone, pfn)) in page_outside_zone_boundaries()
595 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
599 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
605 static int page_is_consistent(struct zone *zone, struct page *page) in page_is_consistent() argument
609 if (zone != page_zone(page)) in page_is_consistent()
617 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
619 if (page_outside_zone_boundaries(zone, page)) in bad_range()
621 if (!page_is_consistent(zone, page)) in bad_range()
627 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
744 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
757 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
762 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
772 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
775 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
777 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
873 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
880 capc->cc->zone == zone ? capc : NULL; in task_capc()
909 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
923 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
926 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
933 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
936 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
947 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
950 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
955 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
965 zone->free_area[order].nr_free--; in del_page_from_free_list()
1024 struct zone *zone, unsigned int order, in __free_one_page() argument
1027 struct capture_control *capc = task_capc(zone); in __free_one_page()
1036 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
1041 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1044 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1049 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1065 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1067 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1082 if (unlikely(has_isolate_pageblock(zone))) { in __free_one_page()
1109 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1111 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1393 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1456 spin_lock(&zone->lock); in free_pcppages_bulk()
1457 isolated_pageblocks = has_isolate_pageblock(zone); in free_pcppages_bulk()
1471 __free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE); in free_pcppages_bulk()
1474 spin_unlock(&zone->lock); in free_pcppages_bulk()
1477 static void free_one_page(struct zone *zone, in free_one_page() argument
1482 spin_lock(&zone->lock); in free_one_page()
1483 if (unlikely(has_isolate_pageblock(zone) || in free_one_page()
1487 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1488 spin_unlock(&zone->lock); in free_one_page()
1492 unsigned long zone, int nid) in __init_single_page() argument
1495 set_page_links(page, zone, nid, pfn); in __init_single_page()
1504 if (!is_highmem_idx(zone)) in __init_single_page()
1522 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() local
1524 if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) in init_reserved_page()
1690 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1705 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1717 void set_zone_contiguous(struct zone *zone) in set_zone_contiguous() argument
1719 unsigned long block_start_pfn = zone->zone_start_pfn; in set_zone_contiguous()
1723 for (; block_start_pfn < zone_end_pfn(zone); in set_zone_contiguous()
1727 block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); in set_zone_contiguous()
1730 block_end_pfn, zone)) in set_zone_contiguous()
1736 zone->contiguous = true; in set_zone_contiguous()
1739 void clear_zone_contiguous(struct zone *zone) in clear_zone_contiguous() argument
1741 zone->contiguous = false; in clear_zone_contiguous()
1830 static unsigned long __init deferred_init_pages(struct zone *zone, in deferred_init_pages() argument
1835 int nid = zone_to_nid(zone); in deferred_init_pages()
1837 int zid = zone_idx(zone); in deferred_init_pages()
1862 deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone, in deferred_init_mem_pfn_range_in_zone() argument
1873 for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) { in deferred_init_mem_pfn_range_in_zone()
1896 deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn, in deferred_init_maxorder() argument
1905 for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) { in deferred_init_maxorder()
1912 nr_pages += deferred_init_pages(zone, *start_pfn, t); in deferred_init_maxorder()
1923 for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) { in deferred_init_maxorder()
1944 struct zone *zone = arg; in deferred_init_memmap_chunk() local
1947 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn); in deferred_init_memmap_chunk()
1954 deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_init_memmap_chunk()
1974 struct zone *zone; in deferred_init_memmap() local
2004 zone = pgdat->node_zones + zid; in deferred_init_memmap()
2005 if (first_init_pfn < zone_end_pfn(zone)) in deferred_init_memmap()
2010 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2020 .fn_arg = zone, in deferred_init_memmap()
2029 deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_init_memmap()
2034 WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); in deferred_init_memmap()
2059 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2062 pg_data_t *pgdat = zone->zone_pgdat; in deferred_grow_zone()
2069 if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat)) in deferred_grow_zone()
2084 if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, in deferred_grow_zone()
2101 nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn); in deferred_grow_zone()
2126 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2128 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2135 struct zone *zone; in page_alloc_init_late() local
2154 for_each_populated_zone(zone) in page_alloc_init_late()
2155 zone_pcp_update(zone); in page_alloc_init_late()
2175 for_each_populated_zone(zone) in page_alloc_init_late()
2176 set_zone_contiguous(zone); in page_alloc_init_late()
2225 static inline void expand(struct zone *zone, struct page *page, in expand() argument
2233 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
2241 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
2244 add_to_free_list(&page[size], zone, high, migratetype); in expand()
2380 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2389 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
2393 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
2394 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2420 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2423 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2426 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
2435 static int move_freepages(struct zone *zone, in move_freepages() argument
2465 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
2466 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
2469 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2477 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2490 if (!zone_spans_pfn(zone, start_pfn)) in move_freepages_block()
2492 if (!zone_spans_pfn(zone, end_pfn)) in move_freepages_block()
2495 return move_freepages(zone, start_pfn, end_pfn, migratetype, in move_freepages_block()
2543 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
2555 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
2558 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2574 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2588 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
2615 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in steal_suitable_fallback()
2616 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
2622 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
2661 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
2705 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone, in reserve_highatomic_pageblock() argument
2715 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()
2716 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2719 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
2722 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
2729 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
2731 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
2735 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
2753 struct zone *zone; in unreserve_highatomic_pageblock() local
2758 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
2764 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
2768 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
2770 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2791 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
2793 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
2806 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2809 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2813 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2830 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
2855 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2881 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2897 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2912 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2924 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2925 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2926 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2932 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2935 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2937 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2952 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2958 spin_lock(&zone->lock); in rmqueue_bulk()
2960 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2981 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, in rmqueue_bulk()
2991 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2992 spin_unlock(&zone->lock); in rmqueue_bulk()
3005 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
3014 free_pcppages_bulk(zone, to_drain, pcp); in drain_zone_pages()
3026 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
3033 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
3037 free_pcppages_bulk(zone, pcp->count, pcp); in drain_pages_zone()
3050 struct zone *zone; in drain_pages() local
3052 for_each_populated_zone(zone) { in drain_pages()
3053 drain_pages_zone(cpu, zone); in drain_pages()
3063 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
3067 if (zone) in drain_local_pages()
3068 drain_pages_zone(cpu, zone); in drain_local_pages()
3087 drain_local_pages(drain->zone); in drain_local_pages_wq()
3101 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) in __drain_all_pages() argument
3124 if (!zone) in __drain_all_pages()
3137 struct zone *z; in __drain_all_pages()
3146 } else if (zone) { in __drain_all_pages()
3147 pcp = per_cpu_ptr(zone->pageset, cpu); in __drain_all_pages()
3169 drain->zone = zone; in __drain_all_pages()
3186 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
3188 __drain_all_pages(zone, false); in drain_all_pages()
3198 void mark_free_pages(struct zone *zone) in mark_free_pages() argument
3205 if (zone_is_empty(zone)) in mark_free_pages()
3208 spin_lock_irqsave(&zone->lock, flags); in mark_free_pages()
3210 max_zone_pfn = zone_end_pfn(zone); in mark_free_pages()
3211 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) in mark_free_pages()
3220 if (page_zone(page) != zone) in mark_free_pages()
3229 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
3242 spin_unlock_irqrestore(&zone->lock, flags); in mark_free_pages()
3260 struct zone *zone = page_zone(page); in free_unref_page_commit() local
3276 free_one_page(zone, page, pfn, 0, migratetype, in free_unref_page_commit()
3283 pcp = &this_cpu_ptr(zone->pageset)->pcp; in free_unref_page_commit()
3287 free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp); in free_unref_page_commit()
3369 struct zone *zone; in __isolate_free_page() local
3374 zone = page_zone(page); in __isolate_free_page()
3384 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3385 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
3388 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3393 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3425 struct zone *zone = page_zone(page); in __putback_isolated_page() local
3428 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
3431 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3440 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z) in zone_statistics()
3464 struct page *__rmqueue_pcplist(struct zone *zone, int migratetype, in __rmqueue_pcplist() argument
3473 pcp->count += rmqueue_bulk(zone, 0, in __rmqueue_pcplist()
3489 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3490 struct zone *zone, gfp_t gfp_flags, in rmqueue_pcplist() argument
3499 pcp = &this_cpu_ptr(zone->pageset)->pcp; in rmqueue_pcplist()
3501 page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3504 zone_statistics(preferred_zone, zone); in rmqueue_pcplist()
3514 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3515 struct zone *zone, unsigned int order, in rmqueue() argument
3529 page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, in rmqueue()
3540 spin_lock_irqsave(&zone->lock, flags); in rmqueue()
3551 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3556 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue()
3558 spin_unlock(&zone->lock); in rmqueue()
3561 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3565 zone_statistics(preferred_zone, zone); in rmqueue()
3570 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) { in rmqueue()
3571 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3572 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3575 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3657 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3686 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3750 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3757 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3797 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe()
3810 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3812 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3816 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3831 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3842 if (!zone) in alloc_flags_nofragment()
3845 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3854 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3882 struct zone *zone; in get_page_from_freelist() local
3893 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3900 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3922 if (last_pgdat_dirty_limit == zone->zone_pgdat) in get_page_from_freelist()
3925 if (!node_dirty_ok(zone->zone_pgdat)) { in get_page_from_freelist()
3926 last_pgdat_dirty_limit = zone->zone_pgdat; in get_page_from_freelist()
3932 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3940 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3941 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3947 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3948 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3959 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3969 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3972 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3982 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3991 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4001 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4008 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4221 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
4223 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
4224 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4333 struct zone *zone; in should_compact_retry() local
4345 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4347 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4477 struct zone *zone; in wake_all_kswapds() local
4481 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4483 if (last_pgdat != zone->zone_pgdat) in wake_all_kswapds()
4484 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4485 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4588 struct zone *zone; in should_reclaim_retry() local
4617 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4621 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4624 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4625 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4631 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4645 write_pending = zone_page_state_snapshot(zone, in should_reclaim_retry()
4752 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
5040 struct zone *zone; in __alloc_pages_bulk() local
5071 for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) { in __alloc_pages_bulk()
5075 !__cpuset_zone_allowed(zone, gfp)) { in __alloc_pages_bulk()
5079 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && in __alloc_pages_bulk()
5080 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { in __alloc_pages_bulk()
5084 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; in __alloc_pages_bulk()
5085 if (zone_watermark_fast(zone, 0, mark, in __alloc_pages_bulk()
5096 if (unlikely(!zone)) in __alloc_pages_bulk()
5101 pcp = &this_cpu_ptr(zone->pageset)->pcp; in __alloc_pages_bulk()
5112 page = __rmqueue_pcplist(zone, ac.migratetype, alloc_flags, in __alloc_pages_bulk()
5127 __count_zid_vm_events(PGALLOC, zone_idx(zone), 1); in __alloc_pages_bulk()
5128 zone_statistics(ac.preferred_zoneref->zone, zone); in __alloc_pages_bulk()
5197 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); in __alloc_pages()
5516 struct zone *zone; in nr_free_zone_pages() local
5523 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5524 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5525 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5548 static inline void show_node(struct zone *zone) in show_node() argument
5551 printk("Node %d ", zone_to_nid(zone)); in show_node()
5561 struct zone *zone; in si_mem_available() local
5567 for_each_zone(zone) in si_mem_available()
5568 wmark_low += low_wmark_pages(zone); in si_mem_available()
5629 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() local
5631 if (is_highmem(zone)) { in si_meminfo_node()
5632 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()
5633 free_highpages += zone_page_state(zone, NR_FREE_PAGES); in si_meminfo_node()
5708 struct zone *zone; in show_free_areas() local
5711 for_each_populated_zone(zone) { in show_free_areas()
5712 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5716 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5800 for_each_populated_zone(zone) { in show_free_areas()
5803 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5808 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
5810 show_node(zone); in show_free_areas()
5832 zone->name, in show_free_areas()
5833 K(zone_page_state(zone, NR_FREE_PAGES)), in show_free_areas()
5834 K(min_wmark_pages(zone)), in show_free_areas()
5835 K(low_wmark_pages(zone)), in show_free_areas()
5836 K(high_wmark_pages(zone)), in show_free_areas()
5837 K(zone->nr_reserved_highatomic), in show_free_areas()
5838 K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), in show_free_areas()
5839 K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), in show_free_areas()
5840 K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), in show_free_areas()
5841 K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), in show_free_areas()
5842 K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), in show_free_areas()
5843 K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), in show_free_areas()
5844 K(zone->present_pages), in show_free_areas()
5845 K(zone_managed_pages(zone)), in show_free_areas()
5846 K(zone_page_state(zone, NR_MLOCK)), in show_free_areas()
5847 K(zone_page_state(zone, NR_BOUNCE)), in show_free_areas()
5849 K(this_cpu_read(zone->pageset->pcp.count)), in show_free_areas()
5850 K(zone_page_state(zone, NR_FREE_CMA_PAGES))); in show_free_areas()
5853 printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); in show_free_areas()
5857 for_each_populated_zone(zone) { in show_free_areas()
5862 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
5864 show_node(zone); in show_free_areas()
5865 printk(KERN_CONT "%s: ", zone->name); in show_free_areas()
5867 spin_lock_irqsave(&zone->lock, flags); in show_free_areas()
5869 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5881 spin_unlock_irqrestore(&zone->lock, flags); in show_free_areas()
5898 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5900 zoneref->zone = zone; in zoneref_set_zone()
5901 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5911 struct zone *zone; in build_zonerefs_node() local
5917 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5918 if (managed_zone(zone)) { in build_zonerefs_node()
5919 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
6042 zonerefs->zone = NULL; in build_zonelists_in_node_order()
6057 zonerefs->zone = NULL; in build_thisnode_zonelists()
6114 return zone_to_nid(z->zone); in local_memory_node()
6155 zonerefs->zone = NULL; in build_zonelists()
6294 overlap_memmap_init(unsigned long zone, unsigned long *pfn) in overlap_memmap_init() argument
6298 if (mirrored_kernelcore && zone == ZONE_MOVABLE) { in overlap_memmap_init()
6323 void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone, in memmap_init_range() argument
6342 if (zone == ZONE_DEVICE) { in memmap_init_range()
6358 if (overlap_memmap_init(zone, &pfn)) in memmap_init_range()
6365 __init_single_page(page, pfn, zone, nid); in memmap_init_range()
6383 void __ref memmap_init_zone_device(struct zone *zone, in memmap_init_zone_device() argument
6389 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device()
6391 unsigned long zone_idx = zone_idx(zone); in memmap_init_zone_device()
6395 if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE)) in memmap_init_zone_device()
6451 static void __meminit zone_init_free_lists(struct zone *zone) in zone_init_free_lists() argument
6455 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6456 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6485 int zone, int node) in init_unavailable_range() argument
6496 __init_single_page(pfn_to_page(pfn), pfn, zone, node); in init_unavailable_range()
6505 int zone, int node) in init_unavailable_range() argument
6511 void __meminit __weak memmap_init_zone(struct zone *zone) in memmap_init_zone() argument
6513 unsigned long zone_start_pfn = zone->zone_start_pfn; in memmap_init_zone()
6514 unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages; in memmap_init_zone()
6515 int i, nid = zone_to_nid(zone), zone_id = zone_idx(zone); in memmap_init_zone()
6550 zone->name, pgcnt); in memmap_init_zone()
6553 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
6562 batch = zone_managed_pages(zone) / 1024; in zone_batchsize()
6646 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high, in __zone_set_pageset_high_and_batch() argument
6653 p = per_cpu_ptr(zone->pageset, cpu); in __zone_set_pageset_high_and_batch()
6662 static void zone_set_pageset_high_and_batch(struct zone *zone) in zone_set_pageset_high_and_batch() argument
6667 new_high = zone_managed_pages(zone) / percpu_pagelist_fraction; in zone_set_pageset_high_and_batch()
6672 new_batch = zone_batchsize(zone); in zone_set_pageset_high_and_batch()
6677 if (zone->pageset_high == new_high && in zone_set_pageset_high_and_batch()
6678 zone->pageset_batch == new_batch) in zone_set_pageset_high_and_batch()
6681 zone->pageset_high = new_high; in zone_set_pageset_high_and_batch()
6682 zone->pageset_batch = new_batch; in zone_set_pageset_high_and_batch()
6684 __zone_set_pageset_high_and_batch(zone, new_high, new_batch); in zone_set_pageset_high_and_batch()
6687 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
6692 zone->pageset = alloc_percpu(struct per_cpu_pageset); in setup_zone_pageset()
6694 p = per_cpu_ptr(zone->pageset, cpu); in setup_zone_pageset()
6698 zone_set_pageset_high_and_batch(zone); in setup_zone_pageset()
6708 struct zone *zone; in setup_per_cpu_pageset() local
6711 for_each_populated_zone(zone) in setup_per_cpu_pageset()
6712 setup_zone_pageset(zone); in setup_per_cpu_pageset()
6733 static __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
6740 zone->pageset = &boot_pageset; in zone_pcp_init()
6741 zone->pageset_high = BOOT_PAGESET_HIGH; in zone_pcp_init()
6742 zone->pageset_batch = BOOT_PAGESET_BATCH; in zone_pcp_init()
6744 if (populated_zone(zone)) in zone_pcp_init()
6746 zone->name, zone->present_pages, in zone_pcp_init()
6747 zone_batchsize(zone)); in zone_pcp_init()
6750 void __meminit init_currently_empty_zone(struct zone *zone, in init_currently_empty_zone() argument
6754 struct pglist_data *pgdat = zone->zone_pgdat; in init_currently_empty_zone()
6755 int zone_idx = zone_idx(zone) + 1; in init_currently_empty_zone()
6760 zone->zone_start_pfn = zone_start_pfn; in init_currently_empty_zone()
6765 (unsigned long)zone_idx(zone), in init_currently_empty_zone()
6768 zone_init_free_lists(zone); in init_currently_empty_zone()
6769 zone->initialized = 1; in init_currently_empty_zone()
6987 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() local
7005 zone->zone_start_pfn = zone_start_pfn; in calculate_node_totalpages()
7007 zone->zone_start_pfn = 0; in calculate_node_totalpages()
7008 zone->spanned_pages = size; in calculate_node_totalpages()
7009 zone->present_pages = real_size; in calculate_node_totalpages()
7042 static void __ref setup_usemap(struct zone *zone) in setup_usemap() argument
7044 unsigned long usemapsize = usemap_size(zone->zone_start_pfn, in setup_usemap()
7045 zone->spanned_pages); in setup_usemap()
7046 zone->pageblock_flags = NULL; in setup_usemap()
7048 zone->pageblock_flags = in setup_usemap()
7050 zone_to_nid(zone)); in setup_usemap()
7051 if (!zone->pageblock_flags) in setup_usemap()
7053 usemapsize, zone->name, zone_to_nid(zone)); in setup_usemap()
7057 static inline void setup_usemap(struct zone *zone) {} in setup_usemap() argument
7153 static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid, in zone_init_internals() argument
7156 atomic_long_set(&zone->managed_pages, remaining_pages); in zone_init_internals()
7157 zone_set_nid(zone, nid); in zone_init_internals()
7158 zone->name = zone_names[idx]; in zone_init_internals()
7159 zone->zone_pgdat = NODE_DATA(nid); in zone_init_internals()
7160 spin_lock_init(&zone->lock); in zone_init_internals()
7161 zone_seqlock_init(zone); in zone_init_internals()
7162 zone_pcp_init(zone); in zone_init_internals()
7202 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() local
7205 size = zone->spanned_pages; in free_area_init_core()
7206 freesize = zone->present_pages; in free_area_init_core()
7245 zone_init_internals(zone, j, nid, freesize); in free_area_init_core()
7251 setup_usemap(zone); in free_area_init_core()
7252 init_currently_empty_zone(zone, zone->zone_start_pfn, size); in free_area_init_core()
7253 memmap_init_zone(zone); in free_area_init_core()
7661 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() local
7662 if (populated_zone(zone)) { in check_for_memory()
7697 int i, nid, zone; in free_area_init() local
7711 zone = MAX_NR_ZONES - i - 1; in free_area_init()
7713 zone = i; in free_area_init()
7715 if (zone == ZONE_MOVABLE) in free_area_init()
7718 end_pfn = max(max_zone_pfn[zone], start_pfn); in free_area_init()
7719 arch_zone_lowest_possible_pfn[zone] = start_pfn; in free_area_init()
7720 arch_zone_highest_possible_pfn[zone] = end_pfn; in free_area_init()
8021 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
8023 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
8027 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
8028 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
8032 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
8058 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
8060 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
8065 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
8067 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
8070 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
8084 struct zone *zone; in __setup_per_zone_wmarks() local
8088 for_each_zone(zone) { in __setup_per_zone_wmarks()
8089 if (!is_highmem(zone)) in __setup_per_zone_wmarks()
8090 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
8093 for_each_zone(zone) { in __setup_per_zone_wmarks()
8096 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
8097 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
8099 if (is_highmem(zone)) { in __setup_per_zone_wmarks()
8111 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
8113 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
8119 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
8128 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
8131 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
8132 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
8133 zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2; in __setup_per_zone_wmarks()
8135 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
8255 struct zone *zone; in setup_min_unmapped_ratio() local
8260 for_each_zone(zone) in setup_min_unmapped_ratio()
8261 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
8283 struct zone *zone; in setup_min_slab_ratio() local
8288 for_each_zone(zone) in setup_min_slab_ratio()
8289 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
8341 struct zone *zone; in percpu_pagelist_fraction_sysctl_handler() local
8364 for_each_populated_zone(zone) in percpu_pagelist_fraction_sysctl_handler()
8365 zone_set_pageset_high_and_batch(zone); in percpu_pagelist_fraction_sysctl_handler()
8526 struct page *has_unmovable_pages(struct zone *zone, struct page *page, in has_unmovable_pages() argument
8565 if (zone_idx(zone) == ZONE_MOVABLE) in has_unmovable_pages()
8679 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
8703 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
8758 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range()
8796 drain_all_pages(cc.zone); in alloc_contig_range()
8888 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
8908 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
8913 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
8942 struct zone *zone; in alloc_contig_pages() local
8946 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages()
8948 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8950 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
8951 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages()
8952 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages()
8960 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8965 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
8969 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
8993 void __meminit zone_pcp_update(struct zone *zone) in zone_pcp_update() argument
8996 zone_set_pageset_high_and_batch(zone); in zone_pcp_update()
9008 void zone_pcp_disable(struct zone *zone) in zone_pcp_disable() argument
9011 __zone_set_pageset_high_and_batch(zone, 0, 1); in zone_pcp_disable()
9012 __drain_all_pages(zone, true); in zone_pcp_disable()
9015 void zone_pcp_enable(struct zone *zone) in zone_pcp_enable() argument
9017 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); in zone_pcp_enable()
9021 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
9026 if (zone->pageset != &boot_pageset) { in zone_pcp_reset()
9028 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
9029 drain_zonestat(zone, pset); in zone_pcp_reset()
9031 free_percpu(zone->pageset); in zone_pcp_reset()
9032 zone->pageset = &boot_pageset; in zone_pcp_reset()
9045 struct zone *zone; in __offline_isolated_pages() local
9050 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
9051 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
9076 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9079 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
9085 struct zone *zone = page_zone(page); in is_free_buddy_page() local
9090 spin_lock_irqsave(&zone->lock, flags); in is_free_buddy_page()
9097 spin_unlock_irqrestore(&zone->lock, flags); in is_free_buddy_page()
9107 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
9126 if (set_page_guard(zone, current_buddy, high, migratetype)) in break_down_buddy_pages()
9130 add_to_free_list(current_buddy, zone, high, migratetype); in break_down_buddy_pages()
9142 struct zone *zone = page_zone(page); in take_page_off_buddy() local
9148 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
9158 del_page_from_free_list(page_head, zone, page_order); in take_page_off_buddy()
9159 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
9167 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()