blob: 28cc541b0fe7c85d0dedcf0e9ebd26bdba94879c [file] [log] [blame]
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301/*
2 * sfe_ipv6_tcp.c
3 * Shortcut forwarding engine file for IPv6 TCP
4 *
5 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/skbuff.h>
22#include <net/tcp.h>
23#include <linux/etherdevice.h>
24#include <linux/version.h>
25
26#include "sfe_debug.h"
27#include "sfe_api.h"
28#include "sfe.h"
29#include "sfe_flow_cookie.h"
30#include "sfe_ipv6.h"
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +053031#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080032#include "sfe_vlan.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053033
34/*
35 * sfe_ipv6_process_tcp_option_sack()
36 * Parse TCP SACK option and update ack according
37 */
38static bool sfe_ipv6_process_tcp_option_sack(const struct tcphdr *th, const u32 data_offs,
39 u32 *ack)
40{
41 u32 length = sizeof(struct tcphdr);
42 u8 *ptr = (u8 *)th + length;
43
44 /*
45 * Ignore processing if TCP packet has only TIMESTAMP option.
46 */
47 if (likely(data_offs == length + TCPOLEN_TIMESTAMP + 1 + 1)
48 && likely(ptr[0] == TCPOPT_NOP)
49 && likely(ptr[1] == TCPOPT_NOP)
50 && likely(ptr[2] == TCPOPT_TIMESTAMP)
51 && likely(ptr[3] == TCPOLEN_TIMESTAMP)) {
52 return true;
53 }
54
55 /*
56 * TCP options. Parse SACK option.
57 */
58 while (length < data_offs) {
59 u8 size;
60 u8 kind;
61
62 ptr = (u8 *)th + length;
63 kind = *ptr;
64
65 /*
66 * NOP, for padding
67 * Not in the switch because to fast escape and to not calculate size
68 */
69 if (kind == TCPOPT_NOP) {
70 length++;
71 continue;
72 }
73
74 if (kind == TCPOPT_SACK) {
75 u32 sack = 0;
76 u8 re = 1 + 1;
77
78 size = *(ptr + 1);
79 if ((size < (1 + 1 + TCPOLEN_SACK_PERBLOCK))
80 || ((size - (1 + 1)) % (TCPOLEN_SACK_PERBLOCK))
81 || (size > (data_offs - length))) {
82 return false;
83 }
84
85 re += 4;
86 while (re < size) {
87 u32 sack_re;
88 u8 *sptr = ptr + re;
89 sack_re = (sptr[0] << 24) | (sptr[1] << 16) | (sptr[2] << 8) | sptr[3];
90 if (sack_re > sack) {
91 sack = sack_re;
92 }
93 re += TCPOLEN_SACK_PERBLOCK;
94 }
95 if (sack > *ack) {
96 *ack = sack;
97 }
98 length += size;
99 continue;
100 }
101 if (kind == TCPOPT_EOL) {
102 return true;
103 }
104 size = *(ptr + 1);
105 if (size < 2) {
106 return false;
107 }
108 length += size;
109 }
110
111 return true;
112}
113
114/*
115 * sfe_ipv6_recv_tcp()
116 * Handle TCP packet receives and forwarding.
117 */
118int sfe_ipv6_recv_tcp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
Ken Zhu88c58152021-12-09 15:12:06 -0800119 unsigned int len, struct ipv6hdr *iph, unsigned int ihl, bool sync_on_find, struct sfe_l2_info *l2_info)
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530120{
121 struct tcphdr *tcph;
122 struct sfe_ipv6_addr *src_ip;
123 struct sfe_ipv6_addr *dest_ip;
124 __be16 src_port;
125 __be16 dest_port;
126 struct sfe_ipv6_connection_match *cm;
127 struct sfe_ipv6_connection_match *counter_cm;
128 u32 flags;
129 struct net_device *xmit_dev;
130 bool ret;
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530131 bool hw_csum;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530132 bool bridge_flow;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800133 bool fast_xmit;
134 netdev_features_t features;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530135
136 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800137 * Is our packet too short to contain a valid TCP header?
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530138 */
139 if (!pskb_may_pull(skb, (sizeof(struct tcphdr) + ihl))) {
140
141 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_HEADER_INCOMPLETE);
142 DEBUG_TRACE("packet too short for TCP header\n");
143 return 0;
144 }
145
146 /*
147 * Read the IP address and port information. Read the IP header data first
148 * because we've almost certainly got that in the cache. We may not yet have
149 * the TCP header cached though so allow more time for any prefetching.
150 */
151 src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
152 dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
153
154 tcph = (struct tcphdr *)(skb->data + ihl);
155 src_port = tcph->source;
156 dest_port = tcph->dest;
157 flags = tcp_flag_word(tcph);
158
159 rcu_read_lock();
160
161 /*
162 * Look for a connection match.
163 */
164#ifdef CONFIG_NF_FLOW_COOKIE
165 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
166 if (unlikely(!cm)) {
167 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_TCP, src_ip, src_port, dest_ip, dest_port);
168 }
169#else
170 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_TCP, src_ip, src_port, dest_ip, dest_port);
171#endif
172 if (unlikely(!cm)) {
173 /*
174 * We didn't get a connection but as TCP is connection-oriented that
175 * may be because this is a non-fast connection (not running established).
176 * For diagnostic purposes we differentiate this here.
177 */
178 if (likely((flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK)) == TCP_FLAG_ACK)) {
179 rcu_read_unlock();
180
181 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_NO_CONNECTION_FAST_FLAGS);
182
183 DEBUG_TRACE("no connection found - fast flags\n");
184 return 0;
185 }
186
187 rcu_read_unlock();
188
189 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_NO_CONNECTION_SLOW_FLAGS);
190 DEBUG_TRACE("no connection found - slow flags: 0x%x\n",
191 flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK));
192 return 0;
193 }
194
195 /*
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530196 * Source interface validate.
197 */
198 if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
199 struct sfe_ipv6_connection *c = cm->connection;
200 spin_lock_bh(&si->lock);
201 ret = sfe_ipv6_remove_connection(si, c);
202 spin_unlock_bh(&si->lock);
203
204 if (ret) {
205 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
206 }
207 rcu_read_unlock();
208 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
209 DEBUG_TRACE("flush on wrong source interface check failure\n");
210 return 0;
211 }
212
213 /*
214 * If our packet has beern marked as "flush on find" we can't actually
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530215 * forward it in the fast path, but now that we've found an associated
Ken Zhu88c58152021-12-09 15:12:06 -0800216 * connection we need sync its status before throw it slow path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530217 */
Ken Zhu88c58152021-12-09 15:12:06 -0800218 if (unlikely(sync_on_find)) {
219 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530220 rcu_read_unlock();
221
222 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
Ken Zhu88c58152021-12-09 15:12:06 -0800223 DEBUG_TRACE("Sync on find\n");
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530224 return 0;
225 }
226
227#ifdef CONFIG_XFRM
228 /*
229 * We can't accelerate the flow on this direction, just let it go
230 * through the slow path.
231 */
232 if (unlikely(!cm->flow_accel)) {
233 rcu_read_unlock();
234 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
235 return 0;
236 }
237#endif
238
Wayne Tanbb7f1782021-12-13 11:16:04 -0800239 /*
240 * Do we expect an ingress VLAN tag for this flow?
241 */
242 if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
243 rcu_read_unlock();
244 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
245 DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb);
246 return 0;
247 }
248
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530249 bridge_flow = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
250
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530251 /*
252 * Does our hop_limit allow forwarding?
253 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530254 if (likely(!bridge_flow)) {
255 if (unlikely(iph->hop_limit < 2)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800256 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530257 rcu_read_unlock();
258
259 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_SMALL_TTL);
Ken Zhu88c58152021-12-09 15:12:06 -0800260 DEBUG_TRACE("hop_limit too low\n");
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530261 return 0;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530262 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530263 }
264
265 /*
266 * If our packet is larger than the MTU of the transmit interface then
267 * we can't forward it easily.
268 */
269 if (unlikely((len > cm->xmit_dev_mtu) && !skb_is_gso(skb))) {
Ken Zhu88c58152021-12-09 15:12:06 -0800270 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530271 rcu_read_unlock();
272
273 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_NEEDS_FRAGMENTATION);
Ken Zhu88c58152021-12-09 15:12:06 -0800274 DEBUG_TRACE("Larger than MTU\n");
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530275 return 0;
276 }
277
278 /*
279 * Look at our TCP flags. Anything missing an ACK or that has RST, SYN or FIN
280 * set is not a fast path packet.
281 */
282 if (unlikely((flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK)) != TCP_FLAG_ACK)) {
283 struct sfe_ipv6_connection *c = cm->connection;
284 spin_lock_bh(&si->lock);
285 ret = sfe_ipv6_remove_connection(si, c);
286 spin_unlock_bh(&si->lock);
287
288 DEBUG_TRACE("TCP flags: 0x%x are not fast\n",
289 flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK));
290 if (ret) {
291 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
292 }
293 rcu_read_unlock();
294
295 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_FLAGS);
296 return 0;
297 }
298
299 counter_cm = cm->counter_match;
300
301 /*
302 * Are we doing sequence number checking?
303 */
304 if (likely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK))) {
305 u32 seq;
306 u32 ack;
307 u32 sack;
308 u32 data_offs;
309 u32 end;
310 u32 left_edge;
311 u32 scaled_win;
312 u32 max_end;
313
314 /*
315 * Is our sequence fully past the right hand edge of the window?
316 */
317 seq = ntohl(tcph->seq);
318 if (unlikely((s32)(seq - (cm->protocol_state.tcp.max_end + 1)) > 0)) {
319 struct sfe_ipv6_connection *c = cm->connection;
320 spin_lock_bh(&si->lock);
321 ret = sfe_ipv6_remove_connection(si, c);
322 spin_unlock_bh(&si->lock);
323
324 DEBUG_TRACE("seq: %u exceeds right edge: %u\n",
325 seq, cm->protocol_state.tcp.max_end + 1);
326 if (ret) {
327 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
328 }
329 rcu_read_unlock();
330
331 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_SEQ_EXCEEDS_RIGHT_EDGE);
332 return 0;
333 }
334
335 /*
336 * Check that our TCP data offset isn't too short.
337 */
338 data_offs = tcph->doff << 2;
339 if (unlikely(data_offs < sizeof(struct tcphdr))) {
340 struct sfe_ipv6_connection *c = cm->connection;
341 spin_lock_bh(&si->lock);
342 ret = sfe_ipv6_remove_connection(si, c);
343 spin_unlock_bh(&si->lock);
344
345 DEBUG_TRACE("TCP data offset: %u, too small\n", data_offs);
346 if (ret) {
347 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
348 }
349 rcu_read_unlock();
350
351 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_SMALL_DATA_OFFS);
352 return 0;
353 }
354
355 /*
356 * Update ACK according to any SACK option.
357 */
358 ack = ntohl(tcph->ack_seq);
359 sack = ack;
360 if (unlikely(!sfe_ipv6_process_tcp_option_sack(tcph, data_offs, &sack))) {
361 struct sfe_ipv6_connection *c = cm->connection;
362 spin_lock_bh(&si->lock);
363 ret = sfe_ipv6_remove_connection(si, c);
364 spin_unlock_bh(&si->lock);
365
366 DEBUG_TRACE("TCP option SACK size is wrong\n");
367 if (ret) {
368 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
369 }
370 rcu_read_unlock();
371
372 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_BAD_SACK);
373 return 0;
374 }
375
376 /*
377 * Check that our TCP data offset isn't past the end of the packet.
378 */
379 data_offs += sizeof(struct ipv6hdr);
380 if (unlikely(len < data_offs)) {
381 struct sfe_ipv6_connection *c = cm->connection;
382 spin_lock_bh(&si->lock);
383 ret = sfe_ipv6_remove_connection(si, c);
384 spin_unlock_bh(&si->lock);
385
386 DEBUG_TRACE("TCP data offset: %u, past end of packet: %u\n",
387 data_offs, len);
388 if (ret) {
389 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
390 }
391 rcu_read_unlock();
392
393 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_BIG_DATA_OFFS);
394 return 0;
395 }
396
397 end = seq + len - data_offs;
398
399 /*
400 * Is our sequence fully before the left hand edge of the window?
401 */
402 if (unlikely((s32)(end - (cm->protocol_state.tcp.end
403 - counter_cm->protocol_state.tcp.max_win - 1)) < 0)) {
404 struct sfe_ipv6_connection *c = cm->connection;
405 spin_lock_bh(&si->lock);
406 ret = sfe_ipv6_remove_connection(si, c);
407 spin_unlock_bh(&si->lock);
408
409 DEBUG_TRACE("seq: %u before left edge: %u\n",
410 end, cm->protocol_state.tcp.end - counter_cm->protocol_state.tcp.max_win - 1);
411 if (ret) {
412 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
413 }
414 rcu_read_unlock();
415
416 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_SEQ_BEFORE_LEFT_EDGE);
417 return 0;
418 }
419
420 /*
421 * Are we acking data that is to the right of what has been sent?
422 */
423 if (unlikely((s32)(sack - (counter_cm->protocol_state.tcp.end + 1)) > 0)) {
424 struct sfe_ipv6_connection *c = cm->connection;
425 spin_lock_bh(&si->lock);
426 ret = sfe_ipv6_remove_connection(si, c);
427 spin_unlock_bh(&si->lock);
428
429 DEBUG_TRACE("ack: %u exceeds right edge: %u\n",
430 sack, counter_cm->protocol_state.tcp.end + 1);
431 if (ret) {
432 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
433 }
434 rcu_read_unlock();
435
436 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_ACK_EXCEEDS_RIGHT_EDGE);
437 return 0;
438 }
439
440 /*
441 * Is our ack too far before the left hand edge of the window?
442 */
443 left_edge = counter_cm->protocol_state.tcp.end
444 - cm->protocol_state.tcp.max_win
445 - SFE_IPV6_TCP_MAX_ACK_WINDOW
446 - 1;
447 if (unlikely((s32)(sack - left_edge) < 0)) {
448 struct sfe_ipv6_connection *c = cm->connection;
449 spin_lock_bh(&si->lock);
450 ret = sfe_ipv6_remove_connection(si, c);
451 spin_unlock_bh(&si->lock);
452
453 DEBUG_TRACE("ack: %u before left edge: %u\n", sack, left_edge);
454 if (ret) {
455 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
456 }
457 rcu_read_unlock();
458
459 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_TCP_ACK_BEFORE_LEFT_EDGE);
460 return 0;
461 }
462
463 /*
464 * Have we just seen the largest window size yet for this connection? If yes
465 * then we need to record the new value.
466 */
467 scaled_win = ntohs(tcph->window) << cm->protocol_state.tcp.win_scale;
468 scaled_win += (sack - ack);
469 if (unlikely(cm->protocol_state.tcp.max_win < scaled_win)) {
470 cm->protocol_state.tcp.max_win = scaled_win;
471 }
472
473 /*
474 * If our sequence and/or ack numbers have advanced then record the new state.
475 */
476 if (likely((s32)(end - cm->protocol_state.tcp.end) >= 0)) {
477 cm->protocol_state.tcp.end = end;
478 }
479
480 max_end = sack + scaled_win;
481 if (likely((s32)(max_end - counter_cm->protocol_state.tcp.max_end) >= 0)) {
482 counter_cm->protocol_state.tcp.max_end = max_end;
483 }
484 }
485
486 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530487 * Check if skb was cloned. If it was, unshare it. Because
488 * the data area is going to be written in this path and we don't want to
489 * change the cloned skb's data section.
490 */
491 if (unlikely(skb_cloned(skb))) {
492 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
493 skb = skb_unshare(skb, GFP_ATOMIC);
494 if (!skb) {
495 DEBUG_WARN("Failed to unshare the cloned skb\n");
496 rcu_read_unlock();
497 return 0;
498 }
499
500 /*
501 * Update the iph and tcph pointers with the unshared skb's data area.
502 */
503 iph = (struct ipv6hdr *)skb->data;
504 tcph = (struct tcphdr *)(skb->data + ihl);
505 }
506
507 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530508 * For PPPoE packets, match server MAC and session id
509 */
510 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) {
511 struct pppoe_hdr *ph;
512 struct ethhdr *eth;
513
514 if (unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
515 rcu_read_unlock();
516 DEBUG_TRACE("%px: PPPoE header not present in packet for PPPoE rule\n", skb);
517 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
518 return 0;
519 }
520
521 ph = (struct pppoe_hdr *)(skb->head + sfe_l2_pppoe_hdr_offset_get(l2_info));
522 eth = (struct ethhdr *)(skb->head + sfe_l2_hdr_offset_get(l2_info));
523
524 if (unlikely(cm->pppoe_session_id != ntohs(ph->sid)) || unlikely(!(ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source)))) {
525 DEBUG_TRACE("%px: PPPoE sessions with session IDs %d and %d or server MACs %pM and %pM did not match \n",
526 skb, cm->pppoe_session_id, htons(ph->sid), cm->pppoe_remote_mac, eth->h_source);
527 rcu_read_unlock();
528 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
529 return 0;
530 }
531 skb->protocol = htons(l2_info->protocol);
532 this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64);
533
534 } else if (unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
535
536 /*
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530537 * If packet contains PPPoE header but CME doesn't contain PPPoE flag yet we are exceptioning the packet to linux
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530538 */
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530539 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW))) {
540 rcu_read_unlock();
541 DEBUG_TRACE("%px: CME doesn't contain PPPoE flag but packet has PPPoE header\n", skb);
542 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_PPPOE_NOT_SET_IN_CME);
543 return 0;
544
545 }
546
547 /*
548 * For bridged flows when packet contains PPPoE header, restore the header back and forward to xmit interface
549 */
550 __skb_push(skb, (sizeof(struct pppoe_hdr) + sizeof(struct sfe_ppp_hdr)));
551 l2_info->l2_hdr_size -= (sizeof(struct pppoe_hdr) + sizeof(struct sfe_ppp_hdr));
552 this_cpu_inc(si->stats_pcpu->pppoe_bridge_packets_forwarded64);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530553 }
554
555 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800556 * Check if skb has enough headroom to write L2 headers
557 */
558 if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
559 rcu_read_unlock();
560 DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
561 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NO_HEADROOM);
562 return 0;
563 }
564
565 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530566 * From this point on we're good to modify the packet.
567 */
568
569 /*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530570 * For PPPoE flows, add PPPoE header before L2 header is added.
571 */
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530572 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Wayne Tanbb7f1782021-12-13 11:16:04 -0800573 sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IPV6);
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530574 this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64);
575 }
576
577 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530578 * Update DSCP
579 */
580 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
581 sfe_ipv6_change_dsfield(iph, cm->dscp);
582 }
583
584 /*
585 * Decrement our hop_limit.
586 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530587 if (likely(!bridge_flow)) {
588 iph->hop_limit -= 1;
589 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530590
591 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530592 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
593 * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here
594 * so that HW does not re-calculate/replace the L4 csum
595 */
596 hw_csum = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
597
598 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530599 * Do we have to perform translations of the source address/port?
600 */
601 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
602 u16 tcp_csum;
603 u32 sum;
604
605 iph->saddr.s6_addr32[0] = cm->xlate_src_ip[0].addr[0];
606 iph->saddr.s6_addr32[1] = cm->xlate_src_ip[0].addr[1];
607 iph->saddr.s6_addr32[2] = cm->xlate_src_ip[0].addr[2];
608 iph->saddr.s6_addr32[3] = cm->xlate_src_ip[0].addr[3];
609 tcph->source = cm->xlate_src_port;
610
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530611 if (unlikely(!hw_csum)) {
612 tcp_csum = tcph->check;
613 sum = tcp_csum + cm->xlate_src_csum_adjustment;
614 sum = (sum & 0xffff) + (sum >> 16);
615 tcph->check = (u16)sum;
616 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530617 }
618
619 /*
620 * Do we have to perform translations of the destination address/port?
621 */
622 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
623 u16 tcp_csum;
624 u32 sum;
625
626 iph->daddr.s6_addr32[0] = cm->xlate_dest_ip[0].addr[0];
627 iph->daddr.s6_addr32[1] = cm->xlate_dest_ip[0].addr[1];
628 iph->daddr.s6_addr32[2] = cm->xlate_dest_ip[0].addr[2];
629 iph->daddr.s6_addr32[3] = cm->xlate_dest_ip[0].addr[3];
630 tcph->dest = cm->xlate_dest_port;
631
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530632 if (unlikely(!hw_csum)) {
633 tcp_csum = tcph->check;
634 sum = tcp_csum + cm->xlate_dest_csum_adjustment;
635 sum = (sum & 0xffff) + (sum >> 16);
636 tcph->check = (u16)sum;
637 }
638 }
639
640 /*
641 * If HW checksum offload is not possible, incremental L4 checksum is used to update the packet.
642 * Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is not recalculated further in packet
643 * path.
644 */
645 if (likely(hw_csum)) {
646 skb->ip_summed = CHECKSUM_PARTIAL;
647 } else {
648 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530649 }
650
651 /*
652 * Update traffic stats.
653 */
654 atomic_inc(&cm->rx_packet_count);
655 atomic_add(len, &cm->rx_byte_count);
656
657 xmit_dev = cm->xmit_dev;
658 skb->dev = xmit_dev;
659
660 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800661 * Check to see if we need to add VLAN tags
662 */
663 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
664 sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
665 }
666
667 /*
668 * Check to see if we need to write an Ethernet header.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530669 */
670 if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
671 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530672 dev_hard_header(skb, xmit_dev, ntohs(skb->protocol),
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530673 cm->xmit_dest_mac, cm->xmit_src_mac, len);
674 } else {
675 /*
676 * For the simple case we write this really fast.
677 */
678 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530679
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530680 eth->h_proto = skb->protocol;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530681 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
682 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
683 }
684 }
685
686 /*
687 * Update priority of skb.
688 */
689 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
690 skb->priority = cm->priority;
691 }
692
693 /*
694 * Mark outgoing packet
695 */
Ken Zhu37040ea2021-09-09 21:11:15 -0700696 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
697 skb->mark = cm->mark;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530698 }
699
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800700 /*
701 * For the first packets, check if it could got fast xmit.
702 */
703 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
704 && (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
705 cm->features = netif_skb_features(skb);
706 if (likely(sfe_fast_xmit_check(skb, cm->features))) {
707 cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
708 }
709 cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
710 }
711 features = cm->features;
712
713 fast_xmit = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT);
714
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530715 rcu_read_unlock();
716
717 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
718
719 /*
720 * We're going to check for GSO flags when we transmit the packet so
721 * start fetching the necessary cache line now.
722 */
723 prefetch(skb_shinfo(skb));
724
725 /*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800726 * We do per packet condition check before we could fast xmit the
727 * packet.
728 */
729 if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
730 this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
731 return 1;
732 }
733
734 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530735 * Mark that this packet has been fast forwarded.
736 */
737 skb->fast_forwarded = 1;
738
739 /*
740 * Send the packet on its way.
741 */
742 dev_queue_xmit(skb);
743
744 return 1;
745}