blob: f3c80b7864bb67469f7ca6640743f75133831b50 [file] [log] [blame]
Wayne Tan1cabbf12022-05-01 13:01:45 -07001/*
2 * sfe_ipv6_pppoe_br.c
3 * Shortcut forwarding engine - IPv6 PPPoE bridge implementation
4 *
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <linux/skbuff.h>
21#include <net/udp.h>
22#include <linux/etherdevice.h>
23#include <linux/version.h>
24
25#include "sfe_debug.h"
26#include "sfe_api.h"
27#include "sfe.h"
28#include "sfe_ipv6.h"
29#include "sfe_pppoe.h"
30#include "sfe_vlan.h"
31
32/*
33 * sfe_ipv6_recv_pppoe_bridge()
34 * Process PPPoE bridge packets using 3-tuple acceleration
35 *
36 */
37int sfe_ipv6_recv_pppoe_bridge(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
38 unsigned int len, struct ipv6hdr *iph, unsigned int ihl, struct sfe_l2_info *l2_info)
39{
40 struct sfe_ipv6_connection_match *cm;
41 u32 service_class_id;
42 struct net_device *xmit_dev;
43 int ret;
44 bool fast_xmit;
45 netdev_features_t features;
46
47 rcu_read_lock();
48
49 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_RAW,
50 (struct sfe_ipv6_addr *)iph->saddr.s6_addr32, 0,
51 (struct sfe_ipv6_addr *)iph->daddr.s6_addr32,
52 htons(sfe_l2_pppoe_session_id_get(l2_info)));
53 if (unlikely(!cm)) {
54 rcu_read_unlock();
55 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_PPPOE_BR_NOT_IN_CME);
56 DEBUG_TRACE("%px: no connection found in 3-tuple lookup for PPPoE bridge flow\n", skb);
57 return 0;
58 }
59
60 /*
61 * Source interface validate.
62 */
63 if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
64 if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
65 struct sfe_ipv6_connection *c = cm->connection;
66 DEBUG_TRACE("flush on source interface check failure\n");
67 spin_lock_bh(&si->lock);
68 ret = sfe_ipv6_remove_connection(si, c);
69 spin_unlock_bh(&si->lock);
70
71 if (ret) {
72 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
73 }
74 }
75 rcu_read_unlock();
76 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
77 DEBUG_TRACE("exception the packet on source interface check failure\n");
78 return 0;
79 }
80
81 /*
82 * Do we expect an ingress VLAN tag for this flow?
83 */
84 if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
85 rcu_read_unlock();
86 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
87 DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb);
88 return 0;
89 }
90
91 /*
92 * Check if skb has enough headroom to write L2 headers
93 */
94 if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
95 rcu_read_unlock();
96 DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
97 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NO_HEADROOM);
98 return 0;
99 }
100
101 /*
102 * Restore PPPoE header back
103 */
104 __skb_push(skb, PPPOE_SES_HLEN);
105
106 /*
107 * Update traffic stats.
108 */
109 atomic_inc(&cm->rx_packet_count);
110 atomic_add(len, &cm->rx_byte_count);
111
112 xmit_dev = cm->xmit_dev;
113 skb->dev = xmit_dev;
114
115 /*
116 * Check to see if we need to add VLAN tags
117 */
118 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
119 sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
120 }
121
122 /*
123 * Check to see if we need to write an Ethernet header.
124 */
125 if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
126 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
127 dev_hard_header(skb, xmit_dev, ntohs(skb->protocol),
128 cm->xmit_dest_mac, cm->xmit_src_mac, len);
129 } else {
130 /*
131 * For the simple case we write this really fast.
132 */
133 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
134 eth->h_proto = skb->protocol;
135 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
136 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
137 }
138 }
139
140 /*
141 * Update priority of skb.
142 */
143 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
144 skb->priority = cm->priority;
145 }
146
147 /*
148 * Mark outgoing packet.
149 */
150 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
151 skb->mark = cm->mark;
152 /*
153 * Update service class stats if SAWF is valid.
154 */
155 if (likely(cm->sawf_valid)) {
156 service_class_id = SFE_GET_SAWF_SERVICE_CLASS(cm->mark);
157 sfe_ipv6_service_class_stats_inc(si, service_class_id, len);
158 }
159 }
160
161 /*
162 * For the first packets, check if it could got fast xmit.
163 */
164 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
165 && (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
166 cm->features = netif_skb_features(skb);
167 if (likely(sfe_fast_xmit_check(skb, cm->features))) {
168 cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
169 }
170 cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
171 }
172 features = cm->features;
173
174 fast_xmit = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT);
175
176 rcu_read_unlock();
177
178 this_cpu_inc(si->stats_pcpu->pppoe_bridge_packets_3tuple_forwarded64);
179 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
180
181 /*
182 * We're going to check for GSO flags when we transmit the packet so
183 * start fetching the necessary cache line now.
184 */
185 prefetch(skb_shinfo(skb));
186
187 /*
188 * We do per packet condition check before we could fast xmit the
189 * packet.
190 */
191 if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
192 this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
193 return 1;
194 }
195
196 /*
197 * Mark that this packet has been fast forwarded.
198 */
199 skb->fast_forwarded = 1;
200
201 /*
202 * Send the packet on its way.
203 */
204 dev_queue_xmit(skb);
205
206 return 1;
207}