aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch
diff options
context:
space:
mode:
authorChristian Marangi <ansuelsmth@gmail.com>2022-10-22 00:24:49 +0200
committerChristian Marangi <ansuelsmth@gmail.com>2022-10-24 21:10:48 +0200
commit2a7bdde29bebcaeb18c8595d31ca0d89b5719a1a (patch)
tree133c8eccb3ddf4ee68a72fa693e9e0e5f3271c77 /target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch
parentccedd7175d19b03dadf4b2bcd85f5c762a41b363 (diff)
downloadupstream-2a7bdde29bebcaeb18c8595d31ca0d89b5719a1a.tar.gz
upstream-2a7bdde29bebcaeb18c8595d31ca0d89b5719a1a.tar.bz2
upstream-2a7bdde29bebcaeb18c8595d31ca0d89b5719a1a.zip
generic: 5.15: move MGLRU patches from pending to backport
Move MGLRU patches from pending to backport as they got merged upstream. These are direct porting from one of the dev so it's better to just move than trying to backport them again from upstream. Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Diffstat (limited to 'target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch')
-rw-r--r--target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch224
1 files changed, 224 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch
new file mode 100644
index 0000000000..f466f1105c
--- /dev/null
+++ b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch
@@ -0,0 +1,224 @@
+From a810f8e2f1bdd0707eaf05c8b4ba84a3ff2801bd Mon Sep 17 00:00:00 2001
+From: Yu Zhao <yuzhao@google.com>
+Date: Sun, 27 Sep 2020 20:49:08 -0600
+Subject: [PATCH 03/10] mm/vmscan.c: refactor shrink_node()
+
+This patch refactors shrink_node(). This will make the upcoming
+changes to mm/vmscan.c more readable.
+
+Signed-off-by: Yu Zhao <yuzhao@google.com>
+Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
+Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04
+---
+ mm/vmscan.c | 186 +++++++++++++++++++++++++++-------------------------
+ 1 file changed, 98 insertions(+), 88 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -2562,6 +2562,103 @@ enum scan_balance {
+ SCAN_FILE,
+ };
+
++static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
++{
++ unsigned long file;
++ struct lruvec *target_lruvec;
++
++ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
++
++ /*
++ * Determine the scan balance between anon and file LRUs.
++ */
++ spin_lock_irq(&target_lruvec->lru_lock);
++ sc->anon_cost = target_lruvec->anon_cost;
++ sc->file_cost = target_lruvec->file_cost;
++ spin_unlock_irq(&target_lruvec->lru_lock);
++
++ /*
++ * Target desirable inactive:active list ratios for the anon
++ * and file LRU lists.
++ */
++ if (!sc->force_deactivate) {
++ unsigned long refaults;
++
++ refaults = lruvec_page_state(target_lruvec,
++ WORKINGSET_ACTIVATE_ANON);
++ if (refaults != target_lruvec->refaults[0] ||
++ inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
++ sc->may_deactivate |= DEACTIVATE_ANON;
++ else
++ sc->may_deactivate &= ~DEACTIVATE_ANON;
++
++ /*
++ * When refaults are being observed, it means a new
++ * workingset is being established. Deactivate to get
++ * rid of any stale active pages quickly.
++ */
++ refaults = lruvec_page_state(target_lruvec,
++ WORKINGSET_ACTIVATE_FILE);
++ if (refaults != target_lruvec->refaults[1] ||
++ inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
++ sc->may_deactivate |= DEACTIVATE_FILE;
++ else
++ sc->may_deactivate &= ~DEACTIVATE_FILE;
++ } else
++ sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
++
++ /*
++ * If we have plenty of inactive file pages that aren't
++ * thrashing, try to reclaim those first before touching
++ * anonymous pages.
++ */
++ file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
++ if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
++ sc->cache_trim_mode = 1;
++ else
++ sc->cache_trim_mode = 0;
++
++ /*
++ * Prevent the reclaimer from falling into the cache trap: as
++ * cache pages start out inactive, every cache fault will tip
++ * the scan balance towards the file LRU. And as the file LRU
++ * shrinks, so does the window for rotation from references.
++ * This means we have a runaway feedback loop where a tiny
++ * thrashing file LRU becomes infinitely more attractive than
++ * anon pages. Try to detect this based on file LRU size.
++ */
++ if (!cgroup_reclaim(sc)) {
++ unsigned long total_high_wmark = 0;
++ unsigned long free, anon;
++ int z;
++
++ free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
++ file = node_page_state(pgdat, NR_ACTIVE_FILE) +
++ node_page_state(pgdat, NR_INACTIVE_FILE);
++
++ for (z = 0; z < MAX_NR_ZONES; z++) {
++ struct zone *zone = &pgdat->node_zones[z];
++
++ if (!managed_zone(zone))
++ continue;
++
++ total_high_wmark += high_wmark_pages(zone);
++ }
++
++ /*
++ * Consider anon: if that's low too, this isn't a
++ * runaway file reclaim problem, but rather just
++ * extreme pressure. Reclaim as per usual then.
++ */
++ anon = node_page_state(pgdat, NR_INACTIVE_ANON);
++
++ sc->file_is_tiny =
++ file + free <= total_high_wmark &&
++ !(sc->may_deactivate & DEACTIVATE_ANON) &&
++ anon >> sc->priority;
++ }
++}
++
+ /*
+ * Determine how aggressively the anon and file LRU lists should be
+ * scanned. The relative value of each set of LRU lists is determined
+@@ -3032,7 +3129,6 @@ static void shrink_node(pg_data_t *pgdat
+ unsigned long nr_reclaimed, nr_scanned;
+ struct lruvec *target_lruvec;
+ bool reclaimable = false;
+- unsigned long file;
+
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+
+@@ -3048,93 +3144,7 @@ again:
+ nr_reclaimed = sc->nr_reclaimed;
+ nr_scanned = sc->nr_scanned;
+
+- /*
+- * Determine the scan balance between anon and file LRUs.
+- */
+- spin_lock_irq(&target_lruvec->lru_lock);
+- sc->anon_cost = target_lruvec->anon_cost;
+- sc->file_cost = target_lruvec->file_cost;
+- spin_unlock_irq(&target_lruvec->lru_lock);
+-
+- /*
+- * Target desirable inactive:active list ratios for the anon
+- * and file LRU lists.
+- */
+- if (!sc->force_deactivate) {
+- unsigned long refaults;
+-
+- refaults = lruvec_page_state(target_lruvec,
+- WORKINGSET_ACTIVATE_ANON);
+- if (refaults != target_lruvec->refaults[0] ||
+- inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+- sc->may_deactivate |= DEACTIVATE_ANON;
+- else
+- sc->may_deactivate &= ~DEACTIVATE_ANON;
+-
+- /*
+- * When refaults are being observed, it means a new
+- * workingset is being established. Deactivate to get
+- * rid of any stale active pages quickly.
+- */
+- refaults = lruvec_page_state(target_lruvec,
+- WORKINGSET_ACTIVATE_FILE);
+- if (refaults != target_lruvec->refaults[1] ||
+- inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
+- sc->may_deactivate |= DEACTIVATE_FILE;
+- else
+- sc->may_deactivate &= ~DEACTIVATE_FILE;
+- } else
+- sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
+-
+- /*
+- * If we have plenty of inactive file pages that aren't
+- * thrashing, try to reclaim those first before touching
+- * anonymous pages.
+- */
+- file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
+- if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
+- sc->cache_trim_mode = 1;
+- else
+- sc->cache_trim_mode = 0;
+-
+- /*
+- * Prevent the reclaimer from falling into the cache trap: as
+- * cache pages start out inactive, every cache fault will tip
+- * the scan balance towards the file LRU. And as the file LRU
+- * shrinks, so does the window for rotation from references.
+- * This means we have a runaway feedback loop where a tiny
+- * thrashing file LRU becomes infinitely more attractive than
+- * anon pages. Try to detect this based on file LRU size.
+- */
+- if (!cgroup_reclaim(sc)) {
+- unsigned long total_high_wmark = 0;
+- unsigned long free, anon;
+- int z;
+-
+- free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
+- file = node_page_state(pgdat, NR_ACTIVE_FILE) +
+- node_page_state(pgdat, NR_INACTIVE_FILE);
+-
+- for (z = 0; z < MAX_NR_ZONES; z++) {
+- struct zone *zone = &pgdat->node_zones[z];
+- if (!managed_zone(zone))
+- continue;
+-
+- total_high_wmark += high_wmark_pages(zone);
+- }
+-
+- /*
+- * Consider anon: if that's low too, this isn't a
+- * runaway file reclaim problem, but rather just
+- * extreme pressure. Reclaim as per usual then.
+- */
+- anon = node_page_state(pgdat, NR_INACTIVE_ANON);
+-
+- sc->file_is_tiny =
+- file + free <= total_high_wmark &&
+- !(sc->may_deactivate & DEACTIVATE_ANON) &&
+- anon >> sc->priority;
+- }
++ prepare_scan_count(pgdat, sc);
+
+ shrink_node_memcgs(pgdat, sc);
+