aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2021-04-10 13:20:04 +0200
committerFelix Fietkau <nbd@nbd.name>2021-04-10 16:14:34 +0200
commitf07fe36f22fcf3f3da4e0440dfc5c39516e2cb55 (patch)
treedae926ce58c604551a2e1ac09834cae4c222ef30 /target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
parent012a9aa00b3e193c93600ac707dfb5bfb1bd4609 (diff)
downloadupstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.tar.gz
upstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.tar.bz2
upstream-f07fe36f22fcf3f3da4e0440dfc5c39516e2cb55.zip
kernel: update flow offload patches to upstream version
Move patches to backport-5.10, since the series was accepted upstream Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch')
-rw-r--r--target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch310
1 files changed, 310 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch b/target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
new file mode 100644
index 0000000000..7b6ec68d55
--- /dev/null
+++ b/target/linux/generic/backport-5.10/610-v5.13-25-netfilter-flowtable-add-offload-support-for-xmit-pat.patch
@@ -0,0 +1,310 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Wed, 24 Mar 2021 02:30:46 +0100
+Subject: [PATCH] netfilter: flowtable: add offload support for xmit path
+ types
+
+When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the
+dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields
+need to be used.
+
+This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN
+tag to the driver.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nf_flow_table_offload.c
++++ b/net/netfilter/nf_flow_table_offload.c
+@@ -177,28 +177,45 @@ static int flow_offload_eth_src(struct n
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+- struct net_device *dev;
++ const struct flow_offload_tuple *other_tuple, *this_tuple;
++ struct net_device *dev = NULL;
++ const unsigned char *addr;
+ u32 mask, val;
+ u16 val16;
+
+- dev = dev_get_by_index(net, tuple->iifidx);
+- if (!dev)
+- return -ENOENT;
++ this_tuple = &flow->tuplehash[dir].tuple;
++
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ addr = this_tuple->out.h_source;
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ dev = dev_get_by_index(net, other_tuple->iifidx);
++ if (!dev)
++ return -ENOENT;
++
++ addr = dev->dev_addr;
++ break;
++ default:
++ return -EOPNOTSUPP;
++ }
+
+ mask = ~0xffff0000;
+- memcpy(&val16, dev->dev_addr, 2);
++ memcpy(&val16, addr, 2);
+ val = val16 << 16;
+ flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ &val, &mask);
+
+ mask = ~0xffffffff;
+- memcpy(&val, dev->dev_addr + 2, 4);
++ memcpy(&val, addr + 2, 4);
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
+ &val, &mask);
+- dev_put(dev);
++
++ if (dev)
++ dev_put(dev);
+
+ return 0;
+ }
+@@ -210,27 +227,40 @@ static int flow_offload_eth_dst(struct n
+ {
+ struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
+- const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
++ const struct flow_offload_tuple *other_tuple, *this_tuple;
+ const struct dst_entry *dst_cache;
+ unsigned char ha[ETH_ALEN];
+ struct neighbour *n;
++ const void *daddr;
+ u32 mask, val;
+ u8 nud_state;
+ u16 val16;
+
+- dst_cache = flow->tuplehash[dir].tuple.dst_cache;
+- n = dst_neigh_lookup(dst_cache, daddr);
+- if (!n)
+- return -ENOENT;
+-
+- read_lock_bh(&n->lock);
+- nud_state = n->nud_state;
+- ether_addr_copy(ha, n->ha);
+- read_unlock_bh(&n->lock);
++ this_tuple = &flow->tuplehash[dir].tuple;
+
+- if (!(nud_state & NUD_VALID)) {
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ ether_addr_copy(ha, this_tuple->out.h_dest);
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ daddr = &other_tuple->src_v4;
++ dst_cache = this_tuple->dst_cache;
++ n = dst_neigh_lookup(dst_cache, daddr);
++ if (!n)
++ return -ENOENT;
++
++ read_lock_bh(&n->lock);
++ nud_state = n->nud_state;
++ ether_addr_copy(ha, n->ha);
++ read_unlock_bh(&n->lock);
+ neigh_release(n);
+- return -ENOENT;
++
++ if (!(nud_state & NUD_VALID))
++ return -ENOENT;
++ break;
++ default:
++ return -EOPNOTSUPP;
+ }
+
+ mask = ~0xffffffff;
+@@ -243,7 +273,6 @@ static int flow_offload_eth_dst(struct n
+ val = val16;
+ flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
+ &val, &mask);
+- neigh_release(n);
+
+ return 0;
+ }
+@@ -465,27 +494,52 @@ static void flow_offload_ipv4_checksum(s
+ }
+ }
+
+-static void flow_offload_redirect(const struct flow_offload *flow,
++static void flow_offload_redirect(struct net *net,
++ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+- struct rtable *rt;
++ const struct flow_offload_tuple *this_tuple, *other_tuple;
++ struct flow_action_entry *entry;
++ struct net_device *dev;
++ int ifindex;
++
++ this_tuple = &flow->tuplehash[dir].tuple;
++ switch (this_tuple->xmit_type) {
++ case FLOW_OFFLOAD_XMIT_DIRECT:
++ this_tuple = &flow->tuplehash[dir].tuple;
++ ifindex = this_tuple->out.ifidx;
++ break;
++ case FLOW_OFFLOAD_XMIT_NEIGH:
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ ifindex = other_tuple->iifidx;
++ break;
++ default:
++ return;
++ }
+
+- rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
++ dev = dev_get_by_index(net, ifindex);
++ if (!dev)
++ return;
++
++ entry = flow_action_entry_next(flow_rule);
+ entry->id = FLOW_ACTION_REDIRECT;
+- entry->dev = rt->dst.dev;
+- dev_hold(rt->dst.dev);
++ entry->dev = dev;
+ }
+
+ static void flow_offload_encap_tunnel(const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *this_tuple;
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+- dst = flow->tuplehash[dir].tuple.dst_cache;
++ this_tuple = &flow->tuplehash[dir].tuple;
++ if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
++ return;
++
++ dst = this_tuple->dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+@@ -502,10 +556,15 @@ static void flow_offload_decap_tunnel(co
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *other_tuple;
+ struct flow_action_entry *entry;
+ struct dst_entry *dst;
+
+- dst = flow->tuplehash[!dir].tuple.dst_cache;
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
++ return;
++
++ dst = other_tuple->dst_cache;
+ if (dst && dst->lwtstate) {
+ struct ip_tunnel_info *tun_info;
+
+@@ -517,10 +576,14 @@ static void flow_offload_decap_tunnel(co
+ }
+ }
+
+-int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+- enum flow_offload_tuple_dir dir,
+- struct nf_flow_rule *flow_rule)
++static int
++nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
++ enum flow_offload_tuple_dir dir,
++ struct nf_flow_rule *flow_rule)
+ {
++ const struct flow_offload_tuple *other_tuple;
++ int i;
++
+ flow_offload_decap_tunnel(flow, dir, flow_rule);
+ flow_offload_encap_tunnel(flow, dir, flow_rule);
+
+@@ -528,6 +591,26 @@ int nf_flow_rule_route_ipv4(struct net *
+ flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
+ return -1;
+
++ other_tuple = &flow->tuplehash[!dir].tuple;
++
++ for (i = 0; i < other_tuple->encap_num; i++) {
++ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
++
++ entry->id = FLOW_ACTION_VLAN_PUSH;
++ entry->vlan.vid = other_tuple->encap[i].id;
++ entry->vlan.proto = other_tuple->encap[i].proto;
++ }
++
++ return 0;
++}
++
++int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
++ enum flow_offload_tuple_dir dir,
++ struct nf_flow_rule *flow_rule)
++{
++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
++ return -1;
++
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
+ flow_offload_ipv4_snat(net, flow, dir, flow_rule);
+ flow_offload_port_snat(net, flow, dir, flow_rule);
+@@ -540,7 +623,7 @@ int nf_flow_rule_route_ipv4(struct net *
+ test_bit(NF_FLOW_DNAT, &flow->flags))
+ flow_offload_ipv4_checksum(net, flow, flow_rule);
+
+- flow_offload_redirect(flow, dir, flow_rule);
++ flow_offload_redirect(net, flow, dir, flow_rule);
+
+ return 0;
+ }
+@@ -550,11 +633,7 @@ int nf_flow_rule_route_ipv6(struct net *
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule)
+ {
+- flow_offload_decap_tunnel(flow, dir, flow_rule);
+- flow_offload_encap_tunnel(flow, dir, flow_rule);
+-
+- if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
+- flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
++ if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
+ return -1;
+
+ if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
+@@ -566,7 +645,7 @@ int nf_flow_rule_route_ipv6(struct net *
+ flow_offload_port_dnat(net, flow, dir, flow_rule);
+ }
+
+- flow_offload_redirect(flow, dir, flow_rule);
++ flow_offload_redirect(net, flow, dir, flow_rule);
+
+ return 0;
+ }
+@@ -580,10 +659,10 @@ nf_flow_offload_rule_alloc(struct net *n
+ enum flow_offload_tuple_dir dir)
+ {
+ const struct nf_flowtable *flowtable = offload->flowtable;
++ const struct flow_offload_tuple *tuple, *other_tuple;
+ const struct flow_offload *flow = offload->flow;
+- const struct flow_offload_tuple *tuple;
++ struct dst_entry *other_dst = NULL;
+ struct nf_flow_rule *flow_rule;
+- struct dst_entry *other_dst;
+ int err = -ENOMEM;
+
+ flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
+@@ -599,7 +678,10 @@ nf_flow_offload_rule_alloc(struct net *n
+ flow_rule->rule->match.key = &flow_rule->match.key;
+
+ tuple = &flow->tuplehash[dir].tuple;
+- other_dst = flow->tuplehash[!dir].tuple.dst_cache;
++ other_tuple = &flow->tuplehash[!dir].tuple;
++ if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
++ other_dst = other_tuple->dst_cache;
++
+ err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
+ if (err < 0)
+ goto err_flow_match;