mm: page_alloc: refactor setup_per_zone_lowmem_reserve()
[ Upstream commit 470c61d702 ]
setup_per_zone_lowmem_reserve() iterates through each zone setting
zone->lowmem_reserve[j] = 0 (where j is the zone's index) then iterates
backwards through all preceding zones, setting
lower_zone->lowmem_reserve[j] = sum(managed pages of higher zones) /
lowmem_reserve_ratio[idx] for each (where idx is the lower zone's index).
If the lower zone has no managed pages or its ratio is 0 then all of its
lowmem_reserve[] entries are effectively zeroed.
As these arrays are only assigned here and all lowmem_reserve[] entries
for index < this zone's index are implicitly assumed to be 0 (as these are
specifically output in show_free_areas() and zoneinfo_show_print() for
example) there is no need to additionally zero index == this zone's index
too. This patch avoids zeroing unnecessarily.
Rather than iterating through zones and setting lowmem_reserve[j] for each
lower zone this patch reverse the process and populates each zone's
lowmem_reserve[] values in ascending order.
This clarifies what is going on especially in the case of zero managed
pages or ratio which is now explicitly shown to clear these values.
Link: https://lkml.kernel.org/r/20201129162758.115907-1-lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Roman Gushchin <guro@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
5458985533
commit
d7deea31ed
1 changed files with 13 additions and 20 deletions
|
|
@ -7788,31 +7788,24 @@ static void calculate_totalreserve_pages(void)
|
|||
static void setup_per_zone_lowmem_reserve(void)
|
||||
{
|
||||
struct pglist_data *pgdat;
|
||||
enum zone_type j, idx;
|
||||
enum zone_type i, j;
|
||||
|
||||
for_each_online_pgdat(pgdat) {
|
||||
for (j = 0; j < MAX_NR_ZONES; j++) {
|
||||
struct zone *zone = pgdat->node_zones + j;
|
||||
unsigned long managed_pages = zone_managed_pages(zone);
|
||||
for (i = 0; i < MAX_NR_ZONES - 1; i++) {
|
||||
struct zone *zone = &pgdat->node_zones[i];
|
||||
int ratio = sysctl_lowmem_reserve_ratio[i];
|
||||
bool clear = !ratio || !zone_managed_pages(zone);
|
||||
unsigned long managed_pages = 0;
|
||||
|
||||
zone->lowmem_reserve[j] = 0;
|
||||
|
||||
idx = j;
|
||||
while (idx) {
|
||||
struct zone *lower_zone;
|
||||
|
||||
idx--;
|
||||
lower_zone = pgdat->node_zones + idx;
|
||||
|
||||
if (!sysctl_lowmem_reserve_ratio[idx] ||
|
||||
!zone_managed_pages(lower_zone)) {
|
||||
lower_zone->lowmem_reserve[j] = 0;
|
||||
continue;
|
||||
for (j = i + 1; j < MAX_NR_ZONES; j++) {
|
||||
if (clear) {
|
||||
zone->lowmem_reserve[j] = 0;
|
||||
} else {
|
||||
lower_zone->lowmem_reserve[j] =
|
||||
managed_pages / sysctl_lowmem_reserve_ratio[idx];
|
||||
struct zone *upper_zone = &pgdat->node_zones[j];
|
||||
|
||||
managed_pages += zone_managed_pages(upper_zone);
|
||||
zone->lowmem_reserve[j] = managed_pages / ratio;
|
||||
}
|
||||
managed_pages += zone_managed_pages(lower_zone);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue