From 0d0928f58795e336646ad31ea96d2919b5328f39 Mon Sep 17 00:00:00 2001 From: Kazuki H Date: Tue, 21 Mar 2023 06:51:03 +0900 Subject: kernel: Update MGLRU patchset The current patches are old, update them from mainline. Backports taken from https://github.com/yuzhaogoogle/linux/commits/mglru-5.15 Tested-by: Kazuki H #mt7622/Linksys E8450 UBI Signed-off-by: Kazuki H --- ...-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch | 78 +++++++++++++++++++--- 1 file changed, 67 insertions(+), 11 deletions(-) (limited to 'target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch') diff --git a/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch index 31161e2a0f..1b9a70dbc1 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch @@ -1,21 +1,60 @@ -From a810f8e2f1bdd0707eaf05c8b4ba84a3ff2801bd Mon Sep 17 00:00:00 2001 +From 9e17efd11450d3d2069adaa3c58db9ac8ebd1c66 Mon Sep 17 00:00:00 2001 From: Yu Zhao -Date: Sun, 27 Sep 2020 20:49:08 -0600 -Subject: [PATCH 03/10] mm/vmscan.c: refactor shrink_node() +Date: Sun, 18 Sep 2022 02:00:00 -0600 +Subject: [PATCH 03/29] mm/vmscan.c: refactor shrink_node() +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit -This patch refactors shrink_node(). This will make the upcoming -changes to mm/vmscan.c more readable. +This patch refactors shrink_node() to improve readability for the upcoming +changes to mm/vmscan.c. +Link: https://lkml.kernel.org/r/20220918080010.2920238-4-yuzhao@google.com Signed-off-by: Yu Zhao +Reviewed-by: Barry Song +Reviewed-by: Miaohe Lin +Acked-by: Brian Geffon +Acked-by: Jan Alexander Steffens (heftig) +Acked-by: Oleksandr Natalenko +Acked-by: Steven Barrett +Acked-by: Suleiman Souhlal +Tested-by: Daniel Byrne +Tested-by: Donald Carr +Tested-by: Holger Hoffstätte Tested-by: Konstantin Kharlamov -Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04 +Tested-by: Shuang Zhai +Tested-by: Sofia Trinh +Tested-by: Vaibhav Jain +Cc: Andi Kleen +Cc: Aneesh Kumar K.V +Cc: Catalin Marinas +Cc: Dave Hansen +Cc: Hillf Danton +Cc: Jens Axboe +Cc: Johannes Weiner +Cc: Jonathan Corbet +Cc: Linus Torvalds +Cc: Matthew Wilcox +Cc: Mel Gorman +Cc: Michael Larabel +Cc: Michal Hocko +Cc: Mike Rapoport +Cc: Mike Rapoport +Cc: Peter Zijlstra +Cc: Qi Zheng +Cc: Tejun Heo +Cc: Vlastimil Babka +Cc: Will Deacon +Signed-off-by: Andrew Morton --- - mm/vmscan.c | 186 +++++++++++++++++++++++++++------------------------- - 1 file changed, 98 insertions(+), 88 deletions(-) + mm/vmscan.c | 198 +++++++++++++++++++++++++++------------------------- + 1 file changed, 104 insertions(+), 94 deletions(-) +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 201acea81804..dc5f0381513f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -2497,6 +2497,103 @@ enum scan_balance { +@@ -2497,6 +2497,109 @@ enum scan_balance { SCAN_FILE, }; @@ -27,6 +66,12 @@ Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04 + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); + + /* ++ * Flush the memory cgroup stats, so that we read accurate per-memcg ++ * lruvec stats for heuristics. ++ */ ++ mem_cgroup_flush_stats(); ++ ++ /* + * Determine the scan balance between anon and file LRUs. + */ + spin_lock_irq(&target_lruvec->lru_lock); @@ -119,7 +164,7 @@ Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04 /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined -@@ -2965,7 +3062,6 @@ static void shrink_node(pg_data_t *pgdat +@@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) unsigned long nr_reclaimed, nr_scanned; struct lruvec *target_lruvec; bool reclaimable = false; @@ -127,7 +172,15 @@ Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); -@@ -2981,93 +3077,7 @@ again: + again: +- /* +- * Flush the memory cgroup stats, so that we read accurate per-memcg +- * lruvec stats for heuristics. +- */ +- mem_cgroup_flush_stats(); +- + memset(&sc->nr, 0, sizeof(sc->nr)); + nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; @@ -222,3 +275,6 @@ Change-Id: Iae734b5b4030205b7db6e8c841f747b6f6ae1a04 shrink_node_memcgs(pgdat, sc); +-- +2.40.0 + -- cgit v1.2.3