aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2021-04-10 13:20:04 +0200
committerFelix Fietkau <nbd@nbd.name>2021-04-10 16:14:34 +0200
commitf07fe36f22fcf3f3da4e0440dfc5c39516e2cb55 (patch)
treedae926ce58c604551a2e1ac09834cae4c222ef30 /target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch
parent012a9aa00b3e193c93600ac707dfb5bfb1bd4609 (diff)
downloadupstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.tar.gz
upstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.tar.bz2
upstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.zip
kernel: update flow offload patches to upstream version
Move patches to backport-5.10, since the series was accepted upstream Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch')
-rw-r--r--target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch98
1 files changed, 98 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch b/target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch
new file mode 100644
index 0000000000..86184dd450
--- /dev/null
+++ b/target/linux/generic/backport-5.10/610-v5.13-01-netfilter-flowtable-separate-replace-destroy-and-sta.patch
@@ -0,0 +1,98 @@
+From: Oz Shlomo <ozsh@nvidia.com>
+Date: Tue, 23 Mar 2021 00:56:19 +0100
+Subject: [PATCH] netfilter: flowtable: separate replace, destroy and
+ stats to different workqueues
+
+Currently the flow table offload replace, destroy and stats work items are
+executed on a single workqueue. As such, DESTROY and STATS commands may
+be backloged after a burst of REPLACE work items. This scenario can bloat
+up memory and may cause active connections to age.
+
+Instatiate add, del and stats workqueues to avoid backlogs of non-dependent
+actions. Provide sysfs control over the workqueue attributes, allowing
+userspace applications to control the workqueue cpumask.
+
+Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
+Reviewed-by: Paul Blakey <paulb@nvidia.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -13,7 +13,9 @@
+ #include <net/netfilter/nf_conntrack_core.h>
+ #include <net/netfilter/nf_conntrack_tuple.h>
+
+-static struct workqueue_struct *nf_flow_offload_wq;
++static struct workqueue_struct *nf_flow_offload_add_wq;
++static struct workqueue_struct *nf_flow_offload_del_wq;
++static struct workqueue_struct *nf_flow_offload_stats_wq;
+
+ struct flow_offload_work {
+ struct list_head list;
+@@ -826,7 +828,12 @@ static void flow_offload_work_handler(st
+
+ static void flow_offload_queue_work(struct flow_offload_work *offload)
+ {
+- queue_work(nf_flow_offload_wq, &offload->work);
++ if (offload->cmd == FLOW_CLS_REPLACE)
++ queue_work(nf_flow_offload_add_wq, &offload->work);
++ else if (offload->cmd == FLOW_CLS_DESTROY)
++ queue_work(nf_flow_offload_del_wq, &offload->work);
++ else
++ queue_work(nf_flow_offload_stats_wq, &offload->work);
+ }
+
+ static struct flow_offload_work *
+@@ -898,8 +905,11 @@ void nf_flow_offload_stats(struct nf_flo
+
+ void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
+ {
+- if (nf_flowtable_hw_offload(flowtable))
+- flush_workqueue(nf_flow_offload_wq);
++ if (nf_flowtable_hw_offload(flowtable)) {
++ flush_workqueue(nf_flow_offload_add_wq);
++ flush_workqueue(nf_flow_offload_del_wq);
++ flush_workqueue(nf_flow_offload_stats_wq);
++ }
+ }
+
+ static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
+@@ -1011,15 +1021,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_
+
+ int nf_flow_table_offload_init(void)
+ {
+- nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
+- WQ_UNBOUND, 0);
+- if (!nf_flow_offload_wq)
++ nf_flow_offload_add_wq = alloc_workqueue("nf_ft_offload_add",
++ WQ_UNBOUND | WQ_SYSFS, 0);
++ if (!nf_flow_offload_add_wq)
+ return -ENOMEM;
+
++ nf_flow_offload_del_wq = alloc_workqueue("nf_ft_offload_del",
++ WQ_UNBOUND | WQ_SYSFS, 0);
++ if (!nf_flow_offload_del_wq)
++ goto err_del_wq;
++
++ nf_flow_offload_stats_wq = alloc_workqueue("nf_ft_offload_stats",
++ WQ_UNBOUND | WQ_SYSFS, 0);
++ if (!nf_flow_offload_stats_wq)
++ goto err_stats_wq;
++
+ return 0;
++
++err_stats_wq:
++ destroy_workqueue(nf_flow_offload_del_wq);
++err_del_wq:
++ destroy_workqueue(nf_flow_offload_add_wq);
++ return -ENOMEM;
+ }
+
+ void nf_flow_table_offload_exit(void)
+ {
+- destroy_workqueue(nf_flow_offload_wq);
++ destroy_workqueue(nf_flow_offload_add_wq);
++ destroy_workqueue(nf_flow_offload_del_wq);
++ destroy_workqueue(nf_flow_offload_stats_wq);
+ }