kernel: update 5.10 flow offload patches

Includes PPPoE support and VLAN related fixes

Signed-off-by: Felix Fietkau <nbd@nbd.name>
This commit is contained in:
Felix Fietkau 2021-03-08 17:20:20 +01:00
parent e90e75b12c
commit 0d5bf53197
25 changed files with 845 additions and 473 deletions

View File

@ -1,6 +1,6 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 20 Feb 2018 15:56:02 +0100
Subject: [PATCH] netfilter: add xt_OFFLOAD target
Subject: [PATCH] netfilter: add xt_FLOWOFFLOAD target
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
@ -98,7 +98,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
--- /dev/null
+++ b/net/netfilter/xt_FLOWOFFLOAD.c
@@ -0,0 +1,660 @@
@@ -0,0 +1,658 @@
+/*
+ * Copyright (C) 2018-2021 Felix Fietkau <nbd@nbd.name>
+ *
@ -265,20 +265,14 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+xt_flowoffload_check_hook(struct flow_offload *flow, void *data)
+{
+ struct xt_flowoffload_table *table = data;
+ struct flow_offload_tuple *tuple = &flow->tuplehash[0].tuple;
+ struct flow_offload_tuple *tuple0 = &flow->tuplehash[0].tuple;
+ struct flow_offload_tuple *tuple1 = &flow->tuplehash[1].tuple;
+ struct xt_flowoffload_hook *hook;
+
+ spin_lock_bh(&hooks_lock);
+ hlist_for_each_entry(hook, &table->hooks, list) {
+ int ifindex;
+
+ if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
+ ifindex = tuple->out.ifidx;
+ else
+ ifindex = tuple->dst_cache->dev->ifindex;
+
+ if (hook->ops.dev->ifindex != tuple->iifidx &&
+ hook->ops.dev->ifindex != ifindex)
+ if (hook->ops.dev->ifindex != tuple0->iifidx &&
+ hook->ops.dev->ifindex != tuple1->iifidx)
+ continue;
+
+ hook->used = true;
@ -357,6 +351,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ int i;
+
+ route->tuple[!dir].in.ifindex = dev->ifindex;
+ route->tuple[dir].out.ifindex = dev->ifindex;
+
+ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_XFRM)
+ return;
@ -386,52 +381,54 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ prev_type = DEV_PATH_ETHERNET;
+ for (i = 0; i <= stack.num_paths; i++) {
+ const struct net_device_path *path = &stack.path[i];
+ int n_vlans = route->tuple[!dir].in.num_vlans;
+ int n_encaps = route->tuple[!dir].in.num_encaps;
+
+ dev = (struct net_device *)path->dev;
+ if (flow_is_valid_ether_device(dev)) {
+ if (route->tuple[dir].xmit_type != FLOW_OFFLOAD_XMIT_DIRECT)
+ if (route->tuple[dir].xmit_type != FLOW_OFFLOAD_XMIT_DIRECT) {
+ memcpy(route->tuple[dir].out.h_source,
+ dev->dev_addr, ETH_ALEN);
+ route->tuple[dir].out.ifindex = dev->ifindex;
+ }
+ route->tuple[dir].xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
+ route->tuple[dir].out.ifindex = dev->ifindex;
+ }
+
+ switch (path->type) {
+ case DEV_PATH_PPPOE:
+ case DEV_PATH_VLAN:
+ if (n_vlans >= NF_FLOW_TABLE_VLAN_MAX ||
+ if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX ||
+ i == stack.num_paths) {
+ last = true;
+ break;
+ }
+
+ route->tuple[!dir].in.num_vlans++;
+ route->tuple[!dir].in.vid[n_vlans] = path->vlan.id;
+ route->tuple[!dir].in.vproto[n_vlans] = path->vlan.proto;
+ route->tuple[!dir].in.num_encaps++;
+ route->tuple[!dir].in.encap[n_encaps].id = path->encap.id;
+ route->tuple[!dir].in.encap[n_encaps].proto = path->encap.proto;
+ if (path->type == DEV_PATH_PPPOE)
+ memcpy(route->tuple[dir].out.h_dest,
+ path->encap.h_dest, ETH_ALEN);
+ break;
+ case DEV_PATH_BRIDGE:
+ switch (path->bridge.vlan_mode) {
+ case DEV_PATH_BR_VLAN_TAG:
+ if (n_vlans >= NF_FLOW_TABLE_VLAN_MAX ||
+ if (n_encaps >= NF_FLOW_TABLE_ENCAP_MAX ||
+ i == stack.num_paths) {
+ last = true;
+ break;
+ }
+
+ route->tuple[!dir].in.num_vlans++;
+ route->tuple[!dir].in.vid[n_vlans] =
+ route->tuple[!dir].in.num_encaps++;
+ route->tuple[!dir].in.encap[n_encaps].id =
+ path->bridge.vlan_id;
+ route->tuple[!dir].in.vproto[n_vlans] =
+ route->tuple[!dir].in.encap[n_encaps].proto =
+ path->bridge.vlan_proto;
+ break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ route->tuple[!dir].in.pvid.id =
+ route->tuple[!dir].in.vid[n_vlans - 1];
+ route->tuple[!dir].in.pvid.proto =
+ route->tuple[!dir].in.vproto[n_vlans - 1];
+ fallthrough;
+ case DEV_PATH_BR_VLAN_UNTAG:
+ route->tuple[!dir].in.num_vlans--;
+ route->tuple[!dir].in.num_encaps--;
+ break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ route->tuple[!dir].in.ingress_vlans |= BIT(n_encaps - 1);
+ break;
+ case DEV_PATH_BR_VLAN_KEEP:
+ break;
@ -447,6 +444,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+ }
+
+ *out_dev = dev;
+ route->tuple[dir].out.hw_ifindex = dev->ifindex;
+ route->tuple[!dir].in.ifindex = dev->ifindex;
+}
+
@ -769,7 +767,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -355,8 +354,7 @@ flow_offload_lookup(struct nf_flowtable
@@ -356,8 +355,7 @@ flow_offload_lookup(struct nf_flowtable
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
@ -779,7 +777,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
void (*iter)(struct flow_offload *flow, void *data),
void *data)
{
@@ -388,6 +386,7 @@ nf_flow_table_iterate(struct nf_flowtabl
@@ -389,6 +387,7 @@ nf_flow_table_iterate(struct nf_flowtabl
return err;
}
@ -809,7 +807,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
+#endif /* _XT_FLOWOFFLOAD_H */
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -265,6 +265,10 @@ void nf_flow_table_free(struct nf_flowta
@@ -266,6 +266,10 @@ void nf_flow_table_free(struct nf_flowta
void flow_offload_teardown(struct flow_offload *flow);

View File

@ -1,5 +1,5 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Sun, 10 Jan 2021 15:53:58 +0100
Date: Thu, 4 Mar 2021 23:18:11 +0100
Subject: [PATCH] net: resolve forwarding path from virtual netdevice and
HW destination address
@ -91,7 +91,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
* If a device is paired with a peer device, return the peer instance.
* The caller must be under RCU read context.
+ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
+ * Get the forwarding path to reach the real device from the HW destination address
+ * Get the forwarding path to reach the real device from the HW destination address
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@ -99,8 +99,8 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm *p, int cmd);
struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
+ struct net_device_path *path);
};
/**

View File

@ -42,7 +42,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ struct {
+ u16 id;
+ __be16 proto;
+ } vlan;
+ } encap;
+ };
};
@ -59,8 +59,8 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ struct vlan_dev_priv *vlan = vlan_dev_priv(ctx->dev);
+
+ path->type = DEV_PATH_VLAN;
+ path->vlan.id = vlan->vlan_id;
+ path->vlan.proto = vlan->vlan_proto;
+ path->encap.id = vlan->vlan_id;
+ path->encap.proto = vlan->vlan_proto;
+ path->dev = ctx->dev;
+ ctx->dev = vlan->real_dev;
+

View File

@ -89,7 +89,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+}
+
+struct nft_forward_info {
+ const struct net_device *dev;
+ const struct net_device *indev;
+};
+
+static void nft_dev_path_info(const struct net_device_path_stack *stack,
@ -102,12 +102,12 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ path = &stack->path[i];
+ switch (path->type) {
+ case DEV_PATH_ETHERNET:
+ info->dev = path->dev;
+ info->indev = path->dev;
+ break;
+ case DEV_PATH_VLAN:
+ case DEV_PATH_BRIDGE:
+ default:
+ info->dev = NULL;
+ info->indev = NULL;
+ break;
+ }
+ }
@ -142,10 +142,10 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
+ nft_dev_path_info(&stack, &info);
+
+ if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
+ if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
+ return;
+
+ route->tuple[!dir].in.ifindex = info.dev->ifindex;
+ route->tuple[!dir].in.ifindex = info.indev->ifindex;
+}
+
static int nft_flow_route(const struct nft_pktinfo *pkt,

View File

@ -1,5 +1,5 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Fri, 20 Nov 2020 13:49:19 +0100
Date: Thu, 4 Mar 2021 03:26:35 +0100
Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to
obtain egress device
@ -271,10 +271,11 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
struct neighbour *n;
u8 nud_state;
@@ -66,22 +65,35 @@ static int nft_dev_fill_forward_path(con
@@ -66,27 +65,43 @@ static int nft_dev_fill_forward_path(con
struct nft_forward_info {
const struct net_device *dev;
const struct net_device *indev;
+ const struct net_device *outdev;
+ u8 h_source[ETH_ALEN];
+ u8 h_dest[ETH_ALEN];
+ enum flow_offload_xmit_type xmit_type;
@ -294,7 +295,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
path = &stack->path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
info->dev = path->dev;
info->indev = path->dev;
+ if (is_zero_ether_addr(info->h_source))
+ memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
break;
@ -307,9 +308,16 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ break;
+ case DEV_PATH_VLAN:
default:
info->dev = NULL;
info->indev = NULL;
break;
@@ -114,14 +126,22 @@ static void nft_dev_forward_path(struct
}
}
+ if (!info->outdev)
+ info->outdev = info->indev;
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
@@ -114,14 +129,22 @@ static void nft_dev_forward_path(struct
const struct dst_entry *dst = route->tuple[dir].dst;
struct net_device_path_stack stack;
struct nft_forward_info info = {};
@ -320,15 +328,15 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
+ nft_dev_path_info(&stack, &info, ha);
if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
route->tuple[!dir].in.ifindex = info.dev->ifindex;
route->tuple[!dir].in.ifindex = info.indev->ifindex;
+
+ if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
+ memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
+ memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
+ route->tuple[dir].out.ifindex = info.dev->ifindex;
+ route->tuple[dir].out.ifindex = info.outdev->ifindex;
+ route->tuple[dir].xmit_type = info.xmit_type;
+ }
}

View File

@ -16,7 +16,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
FLOW_OFFLOAD_XMIT_DIRECT,
};
+#define NF_FLOW_TABLE_VLAN_MAX 2
+#define NF_FLOW_TABLE_ENCAP_MAX 2
+
struct flow_offload_tuple {
union {
@ -28,7 +28,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ struct {
+ u16 id;
+ __be16 proto;
+ } in_vlan[NF_FLOW_TABLE_VLAN_MAX];
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
@ -38,17 +38,19 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
-
+ u8 dir:4,
+ xmit_type:2,
+ in_vlan_num:2;
+ encap_num:2;
u16 mtu;
union {
struct dst_entry *dst_cache;
@@ -174,6 +180,9 @@ struct nf_flow_route {
@@ -174,6 +180,11 @@ struct nf_flow_route {
struct dst_entry *dst;
struct {
u32 ifindex;
+ u16 vid[NF_FLOW_TABLE_VLAN_MAX];
+ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
+ u8 num_vlans;
+ struct {
+ u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps;
} in;
struct {
u32 ifindex;
@ -66,33 +68,36 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
}
flow_tuple->iifidx = route->tuple[dir].in.ifindex;
+ for (i = route->tuple[dir].in.num_vlans - 1; i >= 0; i--) {
+ flow_tuple->in_vlan[j].id = route->tuple[dir].in.vid[i];
+ flow_tuple->in_vlan[j].proto = route->tuple[dir].in.vproto[i];
+ for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
+ flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
+ flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
+ j++;
+ }
+ flow_tuple->in_vlan_num = route->tuple[dir].in.num_vlans;
+ flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
switch (route->tuple[dir].xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -159,17 +159,35 @@ static bool ip_has_options(unsigned int
@@ -159,17 +159,38 @@ static bool ip_has_options(unsigned int
return thoff != sizeof(struct iphdr);
}
+static void nf_flow_tuple_vlan(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple)
+static void nf_flow_tuple_encap(struct sk_buff *skb,
+ struct flow_offload_tuple *tuple)
+{
+ int i = 0;
+
+ if (skb_vlan_tag_present(skb)) {
+ tuple->in_vlan[0].id = skb_vlan_tag_get(skb);
+ tuple->in_vlan[0].proto = skb->vlan_proto;
+ tuple->encap[i].id = skb_vlan_tag_get(skb);
+ tuple->encap[i].proto = skb->vlan_proto;
+ i++;
+ }
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
+
+ tuple->in_vlan[1].id = ntohs(veth->h_vlan_TCI);
+ tuple->in_vlan[1].proto = skb->protocol;
+ tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
+ tuple->encap[i].proto = skb->protocol;
+ }
+}
+
@ -116,7 +121,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
thoff = iph->ihl * 4;
if (ip_is_fragment(iph) ||
@@ -191,11 +209,11 @@ static int nf_flow_tuple_ip(struct sk_bu
@@ -191,11 +212,11 @@ static int nf_flow_tuple_ip(struct sk_bu
return -1;
thoff = iph->ihl * 4;
@ -131,19 +136,19 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
tuple->src_v4.s_addr = iph->saddr;
tuple->dst_v4.s_addr = iph->daddr;
@@ -204,6 +222,7 @@ static int nf_flow_tuple_ip(struct sk_bu
@@ -204,6 +225,7 @@ static int nf_flow_tuple_ip(struct sk_bu
tuple->l3proto = AF_INET;
tuple->l4proto = iph->protocol;
tuple->iifidx = dev->ifindex;
+ nf_flow_tuple_vlan(skb, tuple);
+ nf_flow_tuple_encap(skb, tuple);
return 0;
}
@@ -248,6 +267,37 @@ static unsigned int nf_flow_xmit_xfrm(st
@@ -248,6 +270,40 @@ static unsigned int nf_flow_xmit_xfrm(st
return NF_STOLEN;
}
+static bool nf_flow_skb_vlan_protocol(const struct sk_buff *skb, __be16 proto)
+static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto)
+{
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veth;
@ -156,29 +161,37 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ return false;
+}
+
+static void nf_flow_vlan_pop(struct sk_buff *skb,
+ struct flow_offload_tuple_rhash *tuplehash)
+static void nf_flow_encap_pop(struct sk_buff *skb,
+ struct flow_offload_tuple_rhash *tuplehash)
+{
+ struct vlan_hdr *vlan_hdr;
+ int i;
+
+ for (i = 0; i < tuplehash->tuple.in_vlan_num; i++) {
+ for (i = 0; i < tuplehash->tuple.encap_num; i++) {
+ if (skb_vlan_tag_present(skb)) {
+ __vlan_hwaccel_clear_tag(skb);
+ continue;
+ }
+ vlan_hdr = (struct vlan_hdr *)skb->data;
+ __skb_pull(skb, VLAN_HLEN);
+ vlan_set_encap_proto(skb, vlan_hdr);
+ skb_reset_network_header(skb);
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ vlan_hdr = (struct vlan_hdr *)skb->data;
+ __skb_pull(skb, VLAN_HLEN);
+ vlan_set_encap_proto(skb, vlan_hdr);
+ skb_reset_network_header(skb);
+ break;
+ }
+ }
+}
+
static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb,
const struct flow_offload_tuple_rhash *tuplehash,
unsigned short type)
@@ -280,9 +330,11 @@ nf_flow_offload_ip_hook(void *priv, stru
unsigned int thoff;
@@ -276,13 +332,15 @@ nf_flow_offload_ip_hook(void *priv, stru
enum flow_offload_tuple_dir dir;
struct flow_offload *flow;
struct net_device *outdev;
+ unsigned int thoff, mtu;
struct rtable *rt;
- unsigned int thoff;
struct iphdr *iph;
__be32 nexthop;
+ u32 offset = 0;
@ -186,12 +199,17 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
- if (skb->protocol != htons(ETH_P_IP))
+ if (skb->protocol != htons(ETH_P_IP) &&
+ !nf_flow_skb_vlan_protocol(skb, htons(ETH_P_IP)))
+ !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP)))
return NF_ACCEPT;
if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
@@ -298,11 +350,15 @@ nf_flow_offload_ip_hook(void *priv, stru
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
@@ -295,14 +353,19 @@ nf_flow_offload_ip_hook(void *priv, stru
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ mtu = flow->tuplehash[dir].tuple.mtu + offset;
+ if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return NF_ACCEPT;
- if (skb_try_make_writable(skb, sizeof(*iph)))
@ -209,17 +227,17 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
return NF_ACCEPT;
flow_offload_refresh(flow_table, flow);
@@ -312,6 +368,9 @@ nf_flow_offload_ip_hook(void *priv, stru
@@ -312,6 +375,9 @@ nf_flow_offload_ip_hook(void *priv, stru
return NF_ACCEPT;
}
+ nf_flow_vlan_pop(skb, tuplehash);
+ nf_flow_encap_pop(skb, tuplehash);
+ thoff -= offset;
+
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
return NF_DROP;
@@ -479,14 +538,17 @@ static int nf_flow_nat_ipv6(const struct
@@ -479,14 +545,17 @@ static int nf_flow_nat_ipv6(const struct
static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
struct flow_offload_tuple *tuple)
{
@ -240,7 +258,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
switch (ip6h->nexthdr) {
case IPPROTO_TCP:
@@ -503,11 +565,11 @@ static int nf_flow_tuple_ipv6(struct sk_
@@ -503,11 +572,11 @@ static int nf_flow_tuple_ipv6(struct sk_
return -1;
thoff = sizeof(*ip6h);
@ -255,29 +273,35 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
tuple->src_v6 = ip6h->saddr;
tuple->dst_v6 = ip6h->daddr;
@@ -516,6 +578,7 @@ static int nf_flow_tuple_ipv6(struct sk_
@@ -516,6 +585,7 @@ static int nf_flow_tuple_ipv6(struct sk_
tuple->l3proto = AF_INET6;
tuple->l4proto = ip6h->nexthdr;
tuple->iifidx = dev->ifindex;
+ nf_flow_tuple_vlan(skb, tuple);
+ nf_flow_tuple_encap(skb, tuple);
return 0;
}
@@ -533,9 +596,11 @@ nf_flow_offload_ipv6_hook(void *priv, st
@@ -533,9 +603,12 @@ nf_flow_offload_ipv6_hook(void *priv, st
struct net_device *outdev;
struct ipv6hdr *ip6h;
struct rt6_info *rt;
+ unsigned int mtu;
+ u32 offset = 0;
int ret;
- if (skb->protocol != htons(ETH_P_IPV6))
+ if (skb->protocol != htons(ETH_P_IPV6) &&
+ !nf_flow_skb_vlan_protocol(skb, htons(ETH_P_IPV6)))
+ !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6)))
return NF_ACCEPT;
if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
@@ -551,8 +616,11 @@ nf_flow_offload_ipv6_hook(void *priv, st
if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
@@ -548,11 +621,15 @@ nf_flow_offload_ipv6_hook(void *priv, st
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
- if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ mtu = flow->tuplehash[dir].tuple.mtu + offset;
+ if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return NF_ACCEPT;
- if (nf_flow_state_check(flow, ipv6_hdr(skb)->nexthdr, skb,
@ -290,33 +314,35 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
return NF_ACCEPT;
flow_offload_refresh(flow_table, flow);
@@ -562,6 +630,8 @@ nf_flow_offload_ipv6_hook(void *priv, st
@@ -562,6 +639,8 @@ nf_flow_offload_ipv6_hook(void *priv, st
return NF_ACCEPT;
}
+ nf_flow_vlan_pop(skb, tuplehash);
+ nf_flow_encap_pop(skb, tuplehash);
+
if (skb_try_make_writable(skb, sizeof(*ip6h)))
return NF_DROP;
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -65,6 +65,9 @@ static int nft_dev_fill_forward_path(con
@@ -66,6 +66,11 @@ static int nft_dev_fill_forward_path(con
struct nft_forward_info {
const struct net_device *dev;
+ __u16 vid[NF_FLOW_TABLE_VLAN_MAX];
+ __be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
+ u8 num_vlans;
const struct net_device *indev;
const struct net_device *outdev;
+ struct id {
+ __u16 id;
+ __be16 proto;
+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
+ u8 num_encaps;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
enum flow_offload_xmit_type xmit_type;
@@ -83,9 +86,22 @@ static void nft_dev_path_info(const stru
@@ -84,9 +89,23 @@ static void nft_dev_path_info(const stru
path = &stack->path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
+ case DEV_PATH_VLAN:
info->dev = path->dev;
info->indev = path->dev;
if (is_zero_ether_addr(info->h_source))
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+
@ -324,25 +350,26 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ break;
+
+ /* DEV_PATH_VLAN */
+ if (info->num_vlans >= NF_FLOW_TABLE_VLAN_MAX) {
+ info->dev = NULL;
+ if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
+ info->indev = NULL;
+ break;
+ }
+ info->vid[info->num_vlans] = path->vlan.id;
+ info->vproto[info->num_vlans] = path->vlan.proto;
+ info->num_vlans++;
+ info->outdev = path->dev;
+ info->encap[info->num_encaps].id = path->encap.id;
+ info->encap[info->num_encaps].proto = path->encap.proto;
+ info->num_encaps++;
break;
case DEV_PATH_BRIDGE:
if (is_zero_ether_addr(info->h_source))
@@ -93,7 +109,6 @@ static void nft_dev_path_info(const stru
@@ -94,7 +113,6 @@ static void nft_dev_path_info(const stru
info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
break;
- case DEV_PATH_VLAN:
default:
info->dev = NULL;
info->indev = NULL;
break;
@@ -127,6 +142,7 @@ static void nft_dev_forward_path(struct
@@ -130,6 +148,7 @@ static void nft_dev_forward_path(struct
struct net_device_path_stack stack;
struct nft_forward_info info = {};
unsigned char ha[ETH_ALEN];
@ -350,15 +377,15 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
nft_dev_path_info(&stack, &info, ha);
@@ -135,6 +151,11 @@ static void nft_dev_forward_path(struct
@@ -138,6 +157,11 @@ static void nft_dev_forward_path(struct
return;
route->tuple[!dir].in.ifindex = info.dev->ifindex;
+ for (i = 0; i < info.num_vlans; i++) {
+ route->tuple[!dir].in.vid[i] = info.vid[i];
+ route->tuple[!dir].in.vproto[i] = info.vproto[i];
route->tuple[!dir].in.ifindex = info.indev->ifindex;
+ for (i = 0; i < info.num_encaps; i++) {
+ route->tuple[!dir].in.encap[i].id = info.encap[i].id;
+ route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
+ }
+ route->tuple[!dir].in.num_vlans = info.num_vlans;
+ route->tuple[!dir].in.num_encaps = info.num_encaps;
if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);

View File

@ -18,7 +18,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
@@ -847,10 +847,20 @@ struct net_device_path {
u16 id;
__be16 proto;
} vlan;
} encap;
+ struct {
+ enum {
+ DEV_PATH_BR_VLAN_KEEP,
@ -52,7 +52,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -777,6 +777,12 @@ static int vlan_dev_fill_forward_path(st
path->vlan.proto = vlan->vlan_proto;
path->encap.proto = vlan->vlan_proto;
path->dev = ctx->dev;
ctx->dev = vlan->real_dev;
+ if (ctx->num_vlans >= ARRAY_SIZE(ctx->vlan))

View File

@ -10,18 +10,18 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -112,6 +112,18 @@ static void nft_dev_path_info(const stru
@@ -111,6 +111,18 @@ static void nft_dev_path_info(const stru
if (is_zero_ether_addr(info->h_source))
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
+ switch (path->bridge.vlan_mode) {
+ case DEV_PATH_BR_VLAN_TAG:
+ info->vid[info->num_vlans] = path->vlan.id;
+ info->vproto[info->num_vlans] = path->vlan.proto;
+ info->num_vlans++;
+ info->encap[info->num_encaps].id = path->bridge.vlan_id;
+ info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
+ info->num_encaps++;
+ break;
+ case DEV_PATH_BR_VLAN_UNTAG:
+ info->num_vlans--;
+ info->num_encaps--;
+ break;
+ case DEV_PATH_BR_VLAN_KEEP:
+ break;

View File

@ -0,0 +1,100 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Tue, 2 Mar 2021 21:45:16 +0100
Subject: [PATCH] net: ppp: resolve forwarding path for bridge pppoe
devices
Pass on the PPPoE session ID and the real device.
---
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1450,12 +1450,34 @@ static void ppp_dev_priv_destructor(stru
ppp_destroy_interface(ppp);
}
+static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct ppp *ppp = netdev_priv(path->dev);
+ struct ppp_channel *chan;
+ struct channel *pch;
+
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+ if (list_empty(&ppp->channels))
+ return -ENODEV;
+
+ pch = list_first_entry(&ppp->channels, struct channel, clist);
+ chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+ return chan->ops->fill_forward_path(ctx, path, chan);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
.ndo_start_xmit = ppp_start_xmit,
.ndo_do_ioctl = ppp_net_ioctl,
.ndo_get_stats64 = ppp_get_stats64,
+ .ndo_fill_forward_path = ppp_fill_forward_path,
};
static struct device_type ppp_type = {
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -972,8 +972,30 @@ static int pppoe_xmit(struct ppp_channel
return __pppoe_xmit(sk, skb);
}
+static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
+ struct net_device_path *path,
+ const struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ struct net_device *dev = po->pppoe_dev;
+
+ if (sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED) || !dev)
+ return -1;
+
+ path->type = DEV_PATH_PPPOE;
+ path->encap.proto = htons(ETH_P_PPP_SES);
+ path->encap.id = be16_to_cpu(po->num);
+ path->dev = ctx->dev;
+ ctx->dev = dev;
+
+ return 0;
+}
+
static const struct ppp_channel_ops pppoe_chan_ops = {
.start_xmit = pppoe_xmit,
+ .fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -837,6 +837,7 @@ enum net_device_path_type {
DEV_PATH_ETHERNET = 0,
DEV_PATH_VLAN,
DEV_PATH_BRIDGE,
+ DEV_PATH_PPPOE,
};
struct net_device_path {
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -28,6 +28,9 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ int (*fill_forward_path)(struct net_device_path_ctx *,
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {

View File

@ -1,23 +1,21 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Mon, 7 Dec 2020 20:31:48 +0100
Date: Thu, 4 Mar 2021 23:19:06 +0100
Subject: [PATCH] net: dsa: resolve forwarding path for dsa slave ports
Add .ndo_fill_forward_path for dsa slave port devices
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -837,6 +837,7 @@ enum net_device_path_type {
DEV_PATH_ETHERNET = 0,
@@ -838,6 +838,7 @@ enum net_device_path_type {
DEV_PATH_VLAN,
DEV_PATH_BRIDGE,
DEV_PATH_PPPOE,
+ DEV_PATH_DSA,
};
struct net_device_path {
@@ -856,6 +857,10 @@ struct net_device_path {
@@ -857,6 +858,10 @@ struct net_device_path {
u16 vlan_id;
__be16 vlan_proto;
} bridge;

View File

@ -0,0 +1,263 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Mon, 1 Mar 2021 23:52:49 +0100
Subject: [PATCH] netfilter: flowtable: add pppoe support
---
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1453,7 +1453,7 @@ static void ppp_dev_priv_destructor(stru
static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
struct net_device_path *path)
{
- struct ppp *ppp = netdev_priv(path->dev);
+ struct ppp *ppp = netdev_priv(ctx->dev);
struct ppp_channel *chan;
struct channel *pch;
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -987,6 +987,7 @@ static int pppoe_fill_forward_path(struc
path->type = DEV_PATH_PPPOE;
path->encap.proto = htons(ETH_P_PPP_SES);
path->encap.id = be16_to_cpu(po->num);
+ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
path->dev = ctx->dev;
ctx->dev = dev;
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -848,6 +848,7 @@ struct net_device_path {
struct {
u16 id;
__be16 proto;
+ u8 h_dest[ETH_ALEN];
} encap;
struct {
enum {
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -7,6 +7,9 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <net/ip6_route.h>
@@ -162,6 +165,8 @@ static bool ip_has_options(unsigned int
static void nf_flow_tuple_encap(struct sk_buff *skb,
struct flow_offload_tuple *tuple)
{
+ struct vlan_ethhdr *veth;
+ struct pppoe_hdr *phdr;
int i = 0;
if (skb_vlan_tag_present(skb)) {
@@ -169,23 +174,35 @@ static void nf_flow_tuple_encap(struct s
tuple->encap[i].proto = skb->vlan_proto;
i++;
}
- if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb);
-
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
+ veth = (struct vlan_ethhdr *)skb_mac_header(skb);
tuple->encap[i].id = ntohs(veth->h_vlan_TCI);
tuple->encap[i].proto = skb->protocol;
+ break;
+ case htons(ETH_P_PPP_SES):
+ phdr = (struct pppoe_hdr *)skb_mac_header(skb);
+ tuple->encap[i].id = ntohs(phdr->sid);
+ tuple->encap[i].proto = skb->protocol;
+ break;
}
}
static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
- struct flow_offload_tuple *tuple)
+ struct flow_offload_tuple *tuple, u32 *nhoff)
{
unsigned int thoff, hdrsize, offset = 0;
struct flow_ports *ports;
struct iphdr *iph;
- if (skb->protocol == htons(ETH_P_8021Q))
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
offset += VLAN_HLEN;
+ break;
+ case htons(ETH_P_PPP_SES):
+ offset += PPPOE_SES_HLEN;
+ break;
+ }
if (!pskb_may_pull(skb, sizeof(*iph) + offset))
return -1;
@@ -226,6 +243,7 @@ static int nf_flow_tuple_ip(struct sk_bu
tuple->l4proto = iph->protocol;
tuple->iifidx = dev->ifindex;
nf_flow_tuple_encap(skb, tuple);
+ *nhoff = offset;
return 0;
}
@@ -270,14 +288,36 @@ static unsigned int nf_flow_xmit_xfrm(st
return NF_STOLEN;
}
+static inline __be16 nf_flow_pppoe_proto(const struct sk_buff *skb)
+{
+ __be16 proto;
+
+ proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
+ sizeof(struct pppoe_hdr)));
+ switch (proto) {
+ case htons(PPP_IP):
+ return htons(ETH_P_IP);
+ case htons(PPP_IPV6):
+ return htons(ETH_P_IPV6);
+ }
+
+ return 0;
+}
+
static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto)
{
- if (skb->protocol == htons(ETH_P_8021Q)) {
- struct vlan_ethhdr *veth;
+ struct vlan_ethhdr *veth;
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
veth = (struct vlan_ethhdr *)skb_mac_header(skb);
if (veth->h_vlan_encapsulated_proto == proto)
return true;
+ break;
+ case htons(ETH_P_PPP_SES):
+ if (nf_flow_pppoe_proto(skb) == proto)
+ return true;
+ break;
}
return false;
@@ -294,12 +334,18 @@ static void nf_flow_encap_pop(struct sk_
__vlan_hwaccel_clear_tag(skb);
continue;
}
- if (skb->protocol == htons(ETH_P_8021Q)) {
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
vlan_hdr = (struct vlan_hdr *)skb->data;
__skb_pull(skb, VLAN_HLEN);
vlan_set_encap_proto(skb, vlan_hdr);
skb_reset_network_header(skb);
break;
+ case htons(ETH_P_PPP_SES):
+ skb->protocol = nf_flow_pppoe_proto(skb);
+ skb_pull(skb, PPPOE_SES_HLEN);
+ skb_reset_network_header(skb);
+ break;
}
}
}
@@ -343,7 +389,7 @@ nf_flow_offload_ip_hook(void *priv, stru
!nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP)))
return NF_ACCEPT;
- if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
+ if (nf_flow_tuple_ip(skb, state->in, &tuple, &offset) < 0)
return NF_ACCEPT;
tuplehash = flow_offload_lookup(flow_table, &tuple);
@@ -357,9 +403,6 @@ nf_flow_offload_ip_hook(void *priv, stru
if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return NF_ACCEPT;
- if (skb->protocol == htons(ETH_P_8021Q))
- offset += VLAN_HLEN;
-
if (skb_try_make_writable(skb, sizeof(*iph) + offset))
return NF_DROP;
@@ -543,14 +586,20 @@ static int nf_flow_nat_ipv6(const struct
}
static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
- struct flow_offload_tuple *tuple)
+ struct flow_offload_tuple *tuple, u32 *nhoff)
{
unsigned int thoff, hdrsize, offset = 0;
struct flow_ports *ports;
struct ipv6hdr *ip6h;
- if (skb->protocol == htons(ETH_P_8021Q))
+ switch (skb->protocol) {
+ case htons(ETH_P_8021Q):
offset += VLAN_HLEN;
+ break;
+ case htons(ETH_P_PPP_SES):
+ offset += PPPOE_SES_HLEN;
+ break;
+ }
if (!pskb_may_pull(skb, sizeof(*ip6h) + offset))
return -1;
@@ -586,6 +635,7 @@ static int nf_flow_tuple_ipv6(struct sk_
tuple->l4proto = ip6h->nexthdr;
tuple->iifidx = dev->ifindex;
nf_flow_tuple_encap(skb, tuple);
+ *nhoff = offset;
return 0;
}
@@ -611,7 +661,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
!nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6)))
return NF_ACCEPT;
- if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
+ if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &offset) < 0)
return NF_ACCEPT;
tuplehash = flow_offload_lookup(flow_table, &tuple);
@@ -625,9 +675,6 @@ nf_flow_offload_ipv6_hook(void *priv, st
if (unlikely(nf_flow_exceeds_mtu(skb, mtu)))
return NF_ACCEPT;
- if (skb->protocol == htons(ETH_P_8021Q))
- offset += VLAN_HLEN;
-
ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset);
if (nf_flow_state_check(flow, ip6h->nexthdr, skb, sizeof(*ip6h)))
return NF_ACCEPT;
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -90,6 +90,7 @@ static void nft_dev_path_info(const stru
switch (path->type) {
case DEV_PATH_ETHERNET:
case DEV_PATH_VLAN:
+ case DEV_PATH_PPPOE:
info->indev = path->dev;
if (is_zero_ether_addr(info->h_source))
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
@@ -97,7 +98,7 @@ static void nft_dev_path_info(const stru
if (path->type == DEV_PATH_ETHERNET)
break;
- /* DEV_PATH_VLAN */
+ /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
info->indev = NULL;
break;
@@ -106,6 +107,8 @@ static void nft_dev_path_info(const stru
info->encap[info->num_encaps].id = path->encap.id;
info->encap[info->num_encaps].proto = path->encap.proto;
info->num_encaps++;
+ if (path->type == DEV_PATH_PPPOE)
+ memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
break;
case DEV_PATH_BRIDGE:
if (is_zero_ether_addr(info->h_source))

View File

@ -1,5 +1,5 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Mon, 18 Jan 2021 22:27:45 +0100
Date: Thu, 4 Mar 2021 19:22:55 +0100
Subject: [PATCH] netfilter: nft_flow_offload: add dsa support
Replace the master ethernet device by the dsa slave port.
@ -9,15 +9,15 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -86,6 +86,7 @@ static void nft_dev_path_info(const stru
@@ -89,6 +89,7 @@ static void nft_dev_path_info(const stru
path = &stack->path[i];
switch (path->type) {
case DEV_PATH_ETHERNET:
+ case DEV_PATH_DSA:
case DEV_PATH_VLAN:
info->dev = path->dev;
if (is_zero_ether_addr(info->h_source))
@@ -93,6 +94,10 @@ static void nft_dev_path_info(const stru
case DEV_PATH_PPPOE:
info->indev = path->dev;
@@ -97,6 +98,10 @@ static void nft_dev_path_info(const stru
if (path->type == DEV_PATH_ETHERNET)
break;
@ -26,5 +26,5 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ break;
+ }
/* DEV_PATH_VLAN */
if (info->num_vlans >= NF_FLOW_TABLE_VLAN_MAX) {
/* DEV_PATH_VLAN and DEV_PATH_PPPOE */
if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {

View File

@ -229,12 +229,12 @@ tag to the driver.
+ other_tuple = &flow->tuplehash[!dir].tuple;
+
+ for (i = 0; i < other_tuple->in_vlan_num; i++) {
+ for (i = 0; i < other_tuple->encap_num; i++) {
+ struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = other_tuple->in_vlan[i].id;
+ entry->vlan.proto = other_tuple->in_vlan[i].proto;
+ entry->vlan.vid = other_tuple->encap[i].id;
+ entry->vlan.proto = other_tuple->encap[i].proto;
+ }
+
+ return 0;

View File

@ -1,51 +0,0 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Tue, 2 Feb 2021 17:10:07 +0100
Subject: [PATCH] netfilter: nft_flow_offload: use direct xmit if
hardware offload is enabled
If there is a forward path to reach an ethernet device and hardware
offload is enabled, then use the direct xmit path.
---
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -73,9 +73,18 @@ struct nft_forward_info {
enum flow_offload_xmit_type xmit_type;
};
+static bool nft_is_valid_ether_device(const struct net_device *dev)
+{
+ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+ return false;
+
+ return true;
+}
+
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info,
- unsigned char *ha)
+ unsigned char *ha, struct nf_flowtable *flowtable)
{
const struct net_device_path *path;
int i;
@@ -131,6 +140,10 @@ static void nft_dev_path_info(const stru
break;
}
}
+
+ if (nf_flowtable_hw_offload(flowtable) &&
+ nft_is_valid_ether_device(info->dev))
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
@@ -162,7 +175,7 @@ static void nft_dev_forward_path(struct
int i;
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
- nft_dev_path_info(&stack, &info, ha);
+ nft_dev_path_info(&stack, &info, ha, &ft->data);
if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
return;

View File

@ -1,22 +0,0 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 10 Feb 2021 10:23:38 +0100
Subject: [PATCH] netfilter: nft_flow_offload: fix bridge vlan tag handling
the brigde type uses the path->bridge.vlan_* fields
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -123,8 +123,8 @@ static void nft_dev_path_info(const stru
switch (path->bridge.vlan_mode) {
case DEV_PATH_BR_VLAN_TAG:
- info->vid[info->num_vlans] = path->vlan.id;
- info->vproto[info->num_vlans] = path->vlan.proto;
+ info->vid[info->num_vlans] = path->bridge.vlan_id;
+ info->vproto[info->num_vlans] = path->bridge.vlan_proto;
info->num_vlans++;
break;
case DEV_PATH_BR_VLAN_UNTAG:

View File

@ -1,143 +0,0 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 10 Feb 2021 19:39:23 +0100
Subject: [PATCH] netfilter: flowtable: rework ingress vlan matching
When dealing with bridges with VLAN filtering and DSA/switchdev offload,
the hardware could offload adding a VLAN tag configured in the bridge.
Since there doesn't seem to be an easy way to detect that, this patch
reworks the code to optionally match the last VLAN tag that would otherwise
be inserted by the bridge.
This matters when bypassing the bridge and attaching an ingress hook on
a DSA port below it.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -115,14 +115,15 @@ struct flow_offload_tuple {
u8 l3proto;
u8 l4proto;
- struct {
- u16 id;
- __be16 proto;
- } in_vlan[NF_FLOW_TABLE_VLAN_MAX];
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
+ struct {
+ u16 id;
+ __be16 proto;
+ } in_vlan[NF_FLOW_TABLE_VLAN_MAX], in_pvid;
+
u8 dir:4,
xmit_type:2,
in_vlan_num:2;
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -281,12 +281,13 @@ static bool nf_flow_skb_vlan_protocol(co
}
static void nf_flow_vlan_pop(struct sk_buff *skb,
- struct flow_offload_tuple_rhash *tuplehash)
+ struct flow_offload_tuple_rhash *tuplehash,
+ bool strip_pvid)
{
struct vlan_hdr *vlan_hdr;
int i;
- for (i = 0; i < tuplehash->tuple.in_vlan_num; i++) {
+ for (i = 0; i < tuplehash->tuple.in_vlan_num + strip_pvid; i++) {
if (skb_vlan_tag_present(skb)) {
__vlan_hwaccel_clear_tag(skb);
continue;
@@ -316,6 +317,31 @@ static unsigned int nf_flow_queue_xmit(s
return NF_STOLEN;
}
+static bool
+nf_flow_offload_check_vlan(struct flow_offload_tuple *tuple,
+ struct flow_offload_tuple *flow_tuple,
+ bool *strip_pvid)
+{
+ int i, cur = 0;
+
+ if (flow_tuple->in_pvid.proto &&
+ !memcmp(&tuple->in_vlan[0], &flow_tuple->in_pvid,
+ sizeof(tuple->in_vlan[0])))
+ cur++;
+
+ *strip_pvid = cur;
+
+ for (i = 0; i < flow_tuple->in_vlan_num; i++, cur++) {
+ if (!memcmp(&tuple->in_vlan[cur], &flow_tuple->in_vlan[i],
+ sizeof(tuple->in_vlan[0])))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
unsigned int
nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -329,6 +355,7 @@ nf_flow_offload_ip_hook(void *priv, stru
struct rtable *rt;
unsigned int thoff;
struct iphdr *iph;
+ bool strip_pvid;
__be32 nexthop;
u32 offset = 0;
int ret;
@@ -344,6 +371,10 @@ nf_flow_offload_ip_hook(void *priv, stru
if (tuplehash == NULL)
return NF_ACCEPT;
+ if (!nf_flow_offload_check_vlan(&tuple, &tuplehash->tuple,
+ &strip_pvid))
+ return NF_ACCEPT;
+
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
@@ -368,7 +399,7 @@ nf_flow_offload_ip_hook(void *priv, stru
return NF_ACCEPT;
}
- nf_flow_vlan_pop(skb, tuplehash);
+ nf_flow_vlan_pop(skb, tuplehash, strip_pvid);
thoff -= offset;
if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
@@ -596,6 +627,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
struct net_device *outdev;
struct ipv6hdr *ip6h;
struct rt6_info *rt;
+ bool strip_pvid;
u32 offset = 0;
int ret;
@@ -610,6 +642,10 @@ nf_flow_offload_ipv6_hook(void *priv, st
if (tuplehash == NULL)
return NF_ACCEPT;
+ if (!nf_flow_offload_check_vlan(&tuple, &tuplehash->tuple,
+ &strip_pvid))
+ return NF_ACCEPT;
+
dir = tuplehash->tuple.dir;
flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
@@ -630,7 +666,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
return NF_ACCEPT;
}
- nf_flow_vlan_pop(skb, tuplehash);
+ nf_flow_vlan_pop(skb, tuplehash, strip_pvid);
if (skb_try_make_writable(skb, sizeof(*ip6h)))
return NF_DROP;

View File

@ -0,0 +1,108 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Thu, 4 Mar 2021 19:24:11 +0100
Subject: [PATCH] netfilter: nft_flow_offload: use direct xmit if
hardware offload is enabled
If there is a forward path to reach an ethernet device and hardware
offload is enabled, then use the direct xmit path.
---
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -131,6 +131,7 @@ struct flow_offload_tuple {
struct dst_entry *dst_cache;
struct {
u32 ifidx;
+ u32 hw_ifidx;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
@@ -188,6 +189,7 @@ struct nf_flow_route {
} in;
struct {
u32 ifindex;
+ u32 hw_ifindex;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
} out;
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -106,6 +106,7 @@ static int flow_offload_fill_route(struc
memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
ETH_ALEN);
flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
+ flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
break;
case FLOW_OFFLOAD_XMIT_XFRM:
case FLOW_OFFLOAD_XMIT_NEIGH:
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -506,7 +506,7 @@ static void flow_offload_redirect(struct
switch (this_tuple->xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
this_tuple = &flow->tuplehash[dir].tuple;
- ifindex = this_tuple->out.ifidx;
+ ifindex = this_tuple->out.hw_ifidx;
break;
case FLOW_OFFLOAD_XMIT_NEIGH:
other_tuple = &flow->tuplehash[!dir].tuple;
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -66,6 +66,7 @@ static int nft_dev_fill_forward_path(con
struct nft_forward_info {
const struct net_device *indev;
const struct net_device *outdev;
+ const struct net_device *hw_outdev;
struct id {
__u16 id;
__be16 proto;
@@ -76,9 +77,18 @@ struct nft_forward_info {
enum flow_offload_xmit_type xmit_type;
};
+static bool nft_is_valid_ether_device(const struct net_device *dev)
+{
+ if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
+ dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
+ return false;
+
+ return true;
+}
+
static void nft_dev_path_info(const struct net_device_path_stack *stack,
struct nft_forward_info *info,
- unsigned char *ha)
+ unsigned char *ha, struct nf_flowtable *flowtable)
{
const struct net_device_path *path;
int i;
@@ -140,6 +150,12 @@ static void nft_dev_path_info(const stru
}
if (!info->outdev)
info->outdev = info->indev;
+
+ info->hw_outdev = info->indev;
+
+ if (nf_flowtable_hw_offload(flowtable) &&
+ nft_is_valid_ether_device(info->indev))
+ info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
}
static bool nft_flowtable_find_dev(const struct net_device *dev,
@@ -171,7 +187,7 @@ static void nft_dev_forward_path(struct
int i;
if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
- nft_dev_path_info(&stack, &info, ha);
+ nft_dev_path_info(&stack, &info, ha, &ft->data);
if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
return;
@@ -187,6 +203,7 @@ static void nft_dev_forward_path(struct
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
route->tuple[dir].out.ifindex = info.outdev->ifindex;
+ route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
route->tuple[dir].xmit_type = info.xmit_type;
}
}

View File

@ -1,106 +0,0 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 10 Feb 2021 19:44:33 +0100
Subject: [PATCH] netfilter: flowtable: handle bridge vlan filter offload
tags from DSA/switchdev
When a switchdev/DSA port is an untagged member of a bridge vlan, ingress
packets could potentially be tagged with the id of the VLAN.
When the VLAN port group has been uploaded to switchdev, report the bridge
tag mode as DEV_PATH_BR_VLAN_UNTAG_HW instead of DEV_PATH_BR_VLAN_UNTAG
and handle it in netfilter flow offloading by storing the tag in the tuple
in_pvid field.
This allows the ingress hook to detect the optional tag and remove it for
software offload. This tag processing is for ingress only, egress needs to be
fully untagged
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -853,6 +853,7 @@ struct net_device_path {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
DEV_PATH_BR_VLAN_UNTAG,
+ DEV_PATH_BR_VLAN_UNTAG_HW,
} vlan_mode;
u16 vlan_id;
__be16 vlan_proto;
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -183,6 +183,10 @@ struct nf_flow_route {
u32 ifindex;
u16 vid[NF_FLOW_TABLE_VLAN_MAX];
__be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
+ struct {
+ u16 id;
+ __be16 proto;
+ } pvid;
u8 num_vlans;
} in;
struct {
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -435,6 +435,7 @@ static int br_fill_forward_path(struct n
ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
ctx->num_vlans++;
break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
case DEV_PATH_BR_VLAN_UNTAG:
ctx->num_vlans--;
break;
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1374,6 +1374,8 @@ int br_vlan_fill_forward_path_mode(struc
if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
+ else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
else
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -98,6 +98,8 @@ static int flow_offload_fill_route(struc
j++;
}
flow_tuple->in_vlan_num = route->tuple[dir].in.num_vlans;
+ flow_tuple->in_pvid.id = route->tuple[dir].in.pvid.id;
+ flow_tuple->in_pvid.proto = route->tuple[dir].in.pvid.proto;
switch (route->tuple[dir].xmit_type) {
case FLOW_OFFLOAD_XMIT_DIRECT:
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -67,6 +67,10 @@ struct nft_forward_info {
const struct net_device *dev;
__u16 vid[NF_FLOW_TABLE_VLAN_MAX];
__be16 vproto[NF_FLOW_TABLE_VLAN_MAX];
+ struct {
+ __u16 id;
+ __be16 proto;
+ } pvid;
u8 num_vlans;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
@@ -127,6 +131,10 @@ static void nft_dev_path_info(const stru
info->vproto[info->num_vlans] = path->bridge.vlan_proto;
info->num_vlans++;
break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ info->pvid.id = info->vid[info->num_vlans - 1];
+ info->pvid.proto = info->vproto[info->num_vlans - 1];
+ fallthrough;
case DEV_PATH_BR_VLAN_UNTAG:
info->num_vlans--;
break;
@@ -185,6 +193,8 @@ static void nft_dev_forward_path(struct
route->tuple[!dir].in.vid[i] = info.vid[i];
route->tuple[!dir].in.vproto[i] = info.vproto[i];
}
+ route->tuple[!dir].in.pvid.id = info.pvid.id;
+ route->tuple[!dir].in.pvid.proto = info.pvid.proto;
route->tuple[!dir].in.num_vlans = info.num_vlans;
if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {

View File

@ -0,0 +1,122 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Mon, 8 Mar 2021 12:06:44 +0100
Subject: [PATCH] netfilter: nf_flow_table: fix untagging with
hardware-offloaded bridge vlan_filtering
When switchdev offloading is enabled, treat an untagged VLAN as tagged for
ingress only
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -855,6 +855,7 @@ struct net_device_path {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
DEV_PATH_BR_VLAN_UNTAG,
+ DEV_PATH_BR_VLAN_UNTAG_HW,
} vlan_mode;
u16 vlan_id;
__be16 vlan_proto;
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -123,9 +123,10 @@ struct flow_offload_tuple {
/* All members above are keys for lookups, see flow_offload_hash(). */
struct { } __hash;
- u8 dir:4,
+ u8 dir:2,
xmit_type:2,
- encap_num:2;
+ encap_num:2,
+ in_vlan_ingress:2;
u16 mtu;
union {
struct dst_entry *dst_cache;
@@ -185,7 +186,8 @@ struct nf_flow_route {
u16 id;
__be16 proto;
} encap[NF_FLOW_TABLE_ENCAP_MAX];
- u8 num_encaps;
+ u8 num_encaps:2,
+ ingress_vlans:2;
} in;
struct {
u32 ifindex;
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -435,6 +435,7 @@ static int br_fill_forward_path(struct n
ctx->vlan[ctx->num_vlans].proto = path->bridge.vlan_proto;
ctx->num_vlans++;
break;
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
case DEV_PATH_BR_VLAN_UNTAG:
ctx->num_vlans--;
break;
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1374,6 +1374,8 @@ int br_vlan_fill_forward_path_mode(struc
if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
+ else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
+ path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
else
path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -95,6 +95,8 @@ static int flow_offload_fill_route(struc
for (i = route->tuple[dir].in.num_encaps - 1; i >= 0; i--) {
flow_tuple->encap[j].id = route->tuple[dir].in.encap[i].id;
flow_tuple->encap[j].proto = route->tuple[dir].in.encap[i].proto;
+ if (route->tuple[dir].in.ingress_vlans & BIT(i))
+ flow_tuple->in_vlan_ingress |= BIT(j);
j++;
}
flow_tuple->encap_num = route->tuple[dir].in.num_encaps;
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -592,8 +592,12 @@ nf_flow_rule_route_common(struct net *ne
other_tuple = &flow->tuplehash[!dir].tuple;
for (i = 0; i < other_tuple->encap_num; i++) {
- struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
+ struct flow_action_entry *entry;
+ if (other_tuple->in_vlan_ingress & BIT(i))
+ continue;
+
+ entry = flow_action_entry_next(flow_rule);
entry->id = FLOW_ACTION_VLAN_PUSH;
entry->vlan.vid = other_tuple->encap[i].id;
entry->vlan.proto = other_tuple->encap[i].proto;
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -72,6 +72,7 @@ struct nft_forward_info {
__be16 proto;
} encap[NF_FLOW_TABLE_ENCAP_MAX];
u8 num_encaps;
+ u8 ingress_vlans;
u8 h_source[ETH_ALEN];
u8 h_dest[ETH_ALEN];
enum flow_offload_xmit_type xmit_type;
@@ -130,6 +131,9 @@ static void nft_dev_path_info(const stru
memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
switch (path->bridge.vlan_mode) {
+ case DEV_PATH_BR_VLAN_UNTAG_HW:
+ info->ingress_vlans |= BIT(info->num_encaps - 1);
+ break;
case DEV_PATH_BR_VLAN_TAG:
info->encap[info->num_encaps].id = path->bridge.vlan_id;
info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
@@ -198,6 +202,7 @@ static void nft_dev_forward_path(struct
route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
}
route->tuple[!dir].in.num_encaps = info.num_encaps;
+ route->tuple[!dir].in.ingress_vlans = info.ingress_vlans;
if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);

View File

@ -0,0 +1,26 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Tue, 2 Mar 2021 00:51:31 +0100
Subject: [PATCH] net: flow_offload: add FLOW_ACTION_PPPOE_PUSH
---
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -147,6 +147,7 @@ enum flow_action_id {
FLOW_ACTION_MPLS_POP,
FLOW_ACTION_MPLS_MANGLE,
FLOW_ACTION_GATE,
+ FLOW_ACTION_PPPOE_PUSH,
NUM_FLOW_ACTIONS,
};
@@ -271,6 +272,9 @@ struct flow_action_entry {
u32 num_entries;
struct action_gate_entry *entries;
} gate;
+ struct { /* FLOW_ACTION_PPPOE_PUSH */
+ u16 sid;
+ } pppoe;
};
struct flow_action_cookie *cookie; /* user defined action cookie */
};

View File

@ -0,0 +1,31 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Tue, 2 Mar 2021 01:01:50 +0100
Subject: [PATCH] netfilter: flowtable: support for
FLOW_ACTION_PPPOE_PUSH
---
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -598,9 +598,18 @@ nf_flow_rule_route_common(struct net *ne
continue;
entry = flow_action_entry_next(flow_rule);
- entry->id = FLOW_ACTION_VLAN_PUSH;
- entry->vlan.vid = other_tuple->encap[i].id;
- entry->vlan.proto = other_tuple->encap[i].proto;
+
+ switch (other_tuple->encap[i].proto) {
+ case htons(ETH_P_PPP_SES):
+ entry->id = FLOW_ACTION_PPPOE_PUSH;
+ entry->pppoe.sid = other_tuple->encap[i].id;
+ break;
+ case htons(ETH_P_8021Q):
+ entry->id = FLOW_ACTION_VLAN_PUSH;
+ entry->vlan.vid = other_tuple->encap[i].id;
+ entry->vlan.proto = other_tuple->encap[i].proto;
+ break;
+ }
}
return 0;

View File

@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2031,6 +2031,8 @@ struct net_device {
@@ -2033,6 +2033,8 @@ struct net_device {
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;

View File

@ -50,7 +50,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
};
enum gro_result {
@@ -2411,6 +2414,26 @@ void netif_napi_add(struct net_device *d
@@ -2413,6 +2416,26 @@ void netif_napi_add(struct net_device *d
int (*poll)(struct napi_struct *, int), int weight);
/**

View File

@ -80,7 +80,7 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
#endif /* MTK_ETH_H */
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -0,0 +1,478 @@
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
@ -113,6 +113,10 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ __be16 proto;
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+struct mtk_flow_entry {
@ -311,13 +315,20 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 1 ||
+ data.vlan.proto != ETH_P_8021Q)
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.id = act->vlan.vid;
+ data.vlan.proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
@ -392,11 +403,13 @@ Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+ }
+
+ if (data.vlan.num == 1) {
+ if (data.vlan.proto != ETH_P_8021Q)
+ if (data.vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ mtk_foe_entry_set_vlan(&foe, data.vlan.id);
+ }
+ if (data.pppoe.num == 1)
+ mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
+
+ err = mtk_flow_set_output_device(eth, &foe, odev);
+ if (err)