blob: f2cebe70cb142e5423aeacd6fa52f4e29f0bd82a [file] [log] [blame]
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301/*
2 * sfe_ipv4_tcp.c
3 * Shortcut forwarding engine - IPv4 TCP implementation
4 *
5 * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/skbuff.h>
22#include <net/tcp.h>
23#include <linux/etherdevice.h>
24#include <linux/lockdep.h>
25
26#include "sfe_debug.h"
27#include "sfe_api.h"
28#include "sfe.h"
29#include "sfe_flow_cookie.h"
30#include "sfe_ipv4.h"
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +053031#include "sfe_pppoe.h"
Wayne Tanbb7f1782021-12-13 11:16:04 -080032#include "sfe_vlan.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053033
34/*
35 * sfe_ipv4_process_tcp_option_sack()
36 * Parse TCP SACK option and update ack according
37 */
38static bool sfe_ipv4_process_tcp_option_sack(const struct tcphdr *th, const u32 data_offs,
39 u32 *ack)
40{
41 u32 length = sizeof(struct tcphdr);
42 u8 *ptr = (u8 *)th + length;
43
44 /*
45 * Ignore processing if TCP packet has only TIMESTAMP option.
46 */
47 if (likely(data_offs == length + TCPOLEN_TIMESTAMP + 1 + 1)
48 && likely(ptr[0] == TCPOPT_NOP)
49 && likely(ptr[1] == TCPOPT_NOP)
50 && likely(ptr[2] == TCPOPT_TIMESTAMP)
51 && likely(ptr[3] == TCPOLEN_TIMESTAMP)) {
52 return true;
53 }
54
55 /*
56 * TCP options. Parse SACK option.
57 */
58 while (length < data_offs) {
59 u8 size;
60 u8 kind;
61
62 ptr = (u8 *)th + length;
63 kind = *ptr;
64
65 /*
66 * NOP, for padding
67 * Not in the switch because to fast escape and to not calculate size
68 */
69 if (kind == TCPOPT_NOP) {
70 length++;
71 continue;
72 }
73
74 if (kind == TCPOPT_SACK) {
75 u32 sack = 0;
76 u8 re = 1 + 1;
77
78 size = *(ptr + 1);
79 if ((size < (1 + 1 + TCPOLEN_SACK_PERBLOCK))
80 || ((size - (1 + 1)) % (TCPOLEN_SACK_PERBLOCK))
81 || (size > (data_offs - length))) {
82 return false;
83 }
84
85 re += 4;
86 while (re < size) {
87 u32 sack_re;
88 u8 *sptr = ptr + re;
89 sack_re = (sptr[0] << 24) | (sptr[1] << 16) | (sptr[2] << 8) | sptr[3];
90 if (sack_re > sack) {
91 sack = sack_re;
92 }
93 re += TCPOLEN_SACK_PERBLOCK;
94 }
95 if (sack > *ack) {
96 *ack = sack;
97 }
98 length += size;
99 continue;
100 }
101 if (kind == TCPOPT_EOL) {
102 return true;
103 }
104 size = *(ptr + 1);
105 if (size < 2) {
106 return false;
107 }
108 length += size;
109 }
110
111 return true;
112}
113
114/*
115 * sfe_ipv4_recv_tcp()
116 * Handle TCP packet receives and forwarding.
117 */
118int sfe_ipv4_recv_tcp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
Ken Zhu88c58152021-12-09 15:12:06 -0800119 unsigned int len, struct iphdr *iph, unsigned int ihl, bool sync_on_find, struct sfe_l2_info *l2_info)
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530120{
121 struct tcphdr *tcph;
122 __be32 src_ip;
123 __be32 dest_ip;
124 __be16 src_port;
125 __be16 dest_port;
126 struct sfe_ipv4_connection_match *cm;
127 struct sfe_ipv4_connection_match *counter_cm;
128 u8 ttl;
129 u32 flags;
Parikshit Guned31a8202022-01-05 22:15:04 +0530130 u32 service_class_id;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530131 struct net_device *xmit_dev;
132 bool ret;
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530133 bool hw_csum;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530134 bool bridge_flow;
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800135 bool fast_xmit;
136 netdev_features_t features;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530137
138 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800139 * Is our packet too short to contain a valid TCP header?
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530140 */
141 if (unlikely(!pskb_may_pull(skb, (sizeof(struct tcphdr) + ihl)))) {
142 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_HEADER_INCOMPLETE);
143 DEBUG_TRACE("packet too short for TCP header\n");
144 return 0;
145 }
146
147 /*
148 * Read the IP address and port information. Read the IP header data first
149 * because we've almost certainly got that in the cache. We may not yet have
150 * the TCP header cached though so allow more time for any prefetching.
151 */
152 src_ip = iph->saddr;
153 dest_ip = iph->daddr;
154
155 tcph = (struct tcphdr *)(skb->data + ihl);
156 src_port = tcph->source;
157 dest_port = tcph->dest;
158 flags = tcp_flag_word(tcph);
159
160 rcu_read_lock();
161
162 /*
163 * Look for a connection match.
164 */
165#ifdef CONFIG_NF_FLOW_COOKIE
166 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
167 if (unlikely(!cm)) {
168 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_TCP, src_ip, src_port, dest_ip, dest_port);
169 }
170#else
Wayne Tan1cabbf12022-05-01 13:01:45 -0700171 /*
172 * 5-tuple lookup for TCP flow.
173 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530174 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_TCP, src_ip, src_port, dest_ip, dest_port);
175#endif
176 if (unlikely(!cm)) {
177 /*
178 * We didn't get a connection but as TCP is connection-oriented that
179 * may be because this is a non-fast connection (not running established).
180 * For diagnostic purposes we differentiate this here.
181 */
182 if (likely((flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK)) == TCP_FLAG_ACK)) {
183
184 rcu_read_unlock();
185 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_NO_CONNECTION_FAST_FLAGS);
186 DEBUG_TRACE("no connection found - fast flags\n");
187 return 0;
188 }
189
190 rcu_read_unlock();
191 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_NO_CONNECTION_SLOW_FLAGS);
192 DEBUG_TRACE("no connection found - slow flags: 0x%x\n",
193 flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK));
194 return 0;
195 }
196
197 /*
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530198 * Source interface validate.
199 */
200 if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
Murat Sezgin9c538972022-05-17 13:33:17 -0700201 if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
202 struct sfe_ipv4_connection *c = cm->connection;
203 DEBUG_TRACE("flush on source interface check failure\n");
204 spin_lock_bh(&si->lock);
205 ret = sfe_ipv4_remove_connection(si, c);
206 spin_unlock_bh(&si->lock);
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530207
Murat Sezgin9c538972022-05-17 13:33:17 -0700208 if (ret) {
209 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
210 }
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530211 }
212 rcu_read_unlock();
213 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
Murat Sezgin9c538972022-05-17 13:33:17 -0700214 DEBUG_TRACE("exception the packet on source interface check failure\n");
Ratheesh Kannoth5dee3772022-01-18 11:27:14 +0530215 return 0;
216 }
217
218 /*
Wayne Tan1cabbf12022-05-01 13:01:45 -0700219 * If our packet has been marked as "sync on find" we can't actually
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530220 * forward it in the fast path, but now that we've found an associated
Ken Zhu88c58152021-12-09 15:12:06 -0800221 * connection we need sync its status before throw it slow path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530222 */
Ken Zhu88c58152021-12-09 15:12:06 -0800223 if (unlikely(sync_on_find)) {
224 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530225 rcu_read_unlock();
226
227 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
Ken Zhu88c58152021-12-09 15:12:06 -0800228 DEBUG_TRACE("Sync on find\n");
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530229 return 0;
230 }
231
232#ifdef CONFIG_XFRM
233 /*
234 * We can't accelerate the flow on this direction, just let it go
235 * through the slow path.
236 */
237 if (unlikely(!cm->flow_accel)) {
238 rcu_read_unlock();
239 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
240 return 0;
241 }
242#endif
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530243
Wayne Tanbb7f1782021-12-13 11:16:04 -0800244 /*
245 * Do we expect an ingress VLAN tag for this flow?
246 */
247 if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
248 rcu_read_unlock();
249 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
250 DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb);
251 return 0;
252 }
253
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530254 bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
255
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530256 /*
257 * Does our TTL allow forwarding?
258 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530259 if (likely(!bridge_flow)) {
260 ttl = iph->ttl;
261 if (unlikely(ttl < 2)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800262 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530263 rcu_read_unlock();
Ken Zhu88c58152021-12-09 15:12:06 -0800264
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530265 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_SMALL_TTL);
Ken Zhu88c58152021-12-09 15:12:06 -0800266 DEBUG_TRACE("TTL too low\n");
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530267 return 0;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530268 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530269 }
270
271 /*
272 * If our packet is larger than the MTU of the transmit interface then
273 * we can't forward it easily.
274 */
275 if (unlikely((len > cm->xmit_dev_mtu) && !skb_is_gso(skb))) {
Ken Zhu88c58152021-12-09 15:12:06 -0800276 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530277 rcu_read_unlock();
Ken Zhu88c58152021-12-09 15:12:06 -0800278
Wayne Tanbb7f1782021-12-13 11:16:04 -0800279 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
Ken Zhu88c58152021-12-09 15:12:06 -0800280 DEBUG_TRACE("Larger than MTU\n");
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530281 return 0;
282 }
283
284 /*
285 * Look at our TCP flags. Anything missing an ACK or that has RST, SYN or FIN
286 * set is not a fast path packet.
287 */
288 if (unlikely((flags & (TCP_FLAG_SYN | TCP_FLAG_RST | TCP_FLAG_FIN | TCP_FLAG_ACK)) != TCP_FLAG_ACK)) {
289 struct sfe_ipv4_connection *c = cm->connection;
290 spin_lock_bh(&si->lock);
291 ret = sfe_ipv4_remove_connection(si, c);
292 spin_unlock_bh(&si->lock);
293
Wayne Tan1cabbf12022-05-01 13:01:45 -0700294 DEBUG_TRACE("TCP flags: %#x are not fast. %u->%u\n",
295 htonl(flags), htons(src_port), htons(dest_port));
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530296 if (ret) {
297 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
298 }
299 rcu_read_unlock();
300 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_FLAGS);
301 return 0;
302 }
303
304 counter_cm = cm->counter_match;
305
306 /*
307 * Are we doing sequence number checking?
308 */
309 if (likely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_NO_SEQ_CHECK))) {
310 u32 seq;
311 u32 ack;
312 u32 sack;
313 u32 data_offs;
314 u32 end;
315 u32 left_edge;
316 u32 scaled_win;
317 u32 max_end;
318
319 /*
320 * Is our sequence fully past the right hand edge of the window?
321 */
322 seq = ntohl(tcph->seq);
323 if (unlikely((s32)(seq - (cm->protocol_state.tcp.max_end + 1)) > 0)) {
324 struct sfe_ipv4_connection *c = cm->connection;
325 spin_lock_bh(&si->lock);
326 ret = sfe_ipv4_remove_connection(si, c);
327 spin_unlock_bh(&si->lock);
328
329 DEBUG_TRACE("seq: %u exceeds right edge: %u\n",
330 seq, cm->protocol_state.tcp.max_end + 1);
331 if (ret) {
332 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
333 }
334 rcu_read_unlock();
335 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_SEQ_EXCEEDS_RIGHT_EDGE);
336 return 0;
337 }
338
339 /*
340 * Check that our TCP data offset isn't too short.
341 */
342 data_offs = tcph->doff << 2;
343 if (unlikely(data_offs < sizeof(struct tcphdr))) {
344 struct sfe_ipv4_connection *c = cm->connection;
345 spin_lock_bh(&si->lock);
346 ret = sfe_ipv4_remove_connection(si, c);
347 spin_unlock_bh(&si->lock);
348
349 DEBUG_TRACE("TCP data offset: %u, too small\n", data_offs);
350 if (ret) {
351 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
352 }
353 rcu_read_unlock();
354 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_SMALL_DATA_OFFS);
355 return 0;
356 }
357
358 /*
359 * Update ACK according to any SACK option.
360 */
361 ack = ntohl(tcph->ack_seq);
362 sack = ack;
363 if (unlikely(!sfe_ipv4_process_tcp_option_sack(tcph, data_offs, &sack))) {
364 struct sfe_ipv4_connection *c = cm->connection;
365 spin_lock_bh(&si->lock);
366 ret = sfe_ipv4_remove_connection(si, c);
367 spin_unlock_bh(&si->lock);
368
369 DEBUG_TRACE("TCP option SACK size is wrong\n");
370 if (ret) {
371 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
372 }
373 rcu_read_unlock();
374 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_BAD_SACK);
375 return 0;
376 }
377
378 /*
379 * Check that our TCP data offset isn't past the end of the packet.
380 */
381 data_offs += sizeof(struct iphdr);
382 if (unlikely(len < data_offs)) {
383 struct sfe_ipv4_connection *c = cm->connection;
384 spin_lock_bh(&si->lock);
385 ret = sfe_ipv4_remove_connection(si, c);
386 spin_unlock_bh(&si->lock);
387
388 DEBUG_TRACE("TCP data offset: %u, past end of packet: %u\n",
389 data_offs, len);
390 if (ret) {
391 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
392 }
393 rcu_read_unlock();
394 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_BIG_DATA_OFFS);
395 return 0;
396 }
397
398 end = seq + len - data_offs;
399
400 /*
401 * Is our sequence fully before the left hand edge of the window?
402 */
403 if (unlikely((s32)(end - (cm->protocol_state.tcp.end
404 - counter_cm->protocol_state.tcp.max_win - 1)) < 0)) {
405 struct sfe_ipv4_connection *c = cm->connection;
406 spin_lock_bh(&si->lock);
407 ret = sfe_ipv4_remove_connection(si, c);
408 spin_unlock_bh(&si->lock);
409
410 DEBUG_TRACE("seq: %u before left edge: %u\n",
411 end, cm->protocol_state.tcp.end - counter_cm->protocol_state.tcp.max_win - 1);
412 if (ret) {
413 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
414 }
415 rcu_read_unlock();
416 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_SEQ_BEFORE_LEFT_EDGE);
417 return 0;
418 }
419
420 /*
421 * Are we acking data that is to the right of what has been sent?
422 */
423 if (unlikely((s32)(sack - (counter_cm->protocol_state.tcp.end + 1)) > 0)) {
424 struct sfe_ipv4_connection *c = cm->connection;
425 spin_lock_bh(&si->lock);
426 ret = sfe_ipv4_remove_connection(si, c);
427 spin_unlock_bh(&si->lock);
428
429 DEBUG_TRACE("ack: %u exceeds right edge: %u\n",
430 sack, counter_cm->protocol_state.tcp.end + 1);
431 if (ret) {
432 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
433 }
434 rcu_read_unlock();
435 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_ACK_EXCEEDS_RIGHT_EDGE);
436 return 0;
437 }
438
439 /*
440 * Is our ack too far before the left hand edge of the window?
441 */
442 left_edge = counter_cm->protocol_state.tcp.end
443 - cm->protocol_state.tcp.max_win
444 - SFE_IPV4_TCP_MAX_ACK_WINDOW
445 - 1;
446 if (unlikely((s32)(sack - left_edge) < 0)) {
447 struct sfe_ipv4_connection *c = cm->connection;
448 spin_lock_bh(&si->lock);
449 ret = sfe_ipv4_remove_connection(si, c);
450 spin_unlock_bh(&si->lock);
451
452 DEBUG_TRACE("ack: %u before left edge: %u\n", sack, left_edge);
453 if (ret) {
454 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
455 }
456 rcu_read_unlock();
457 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_TCP_ACK_BEFORE_LEFT_EDGE);
458 return 0;
459 }
460
461 /*
462 * Have we just seen the largest window size yet for this connection? If yes
463 * then we need to record the new value.
464 */
465 scaled_win = ntohs(tcph->window) << cm->protocol_state.tcp.win_scale;
466 scaled_win += (sack - ack);
467 if (unlikely(cm->protocol_state.tcp.max_win < scaled_win)) {
468 cm->protocol_state.tcp.max_win = scaled_win;
469 }
470
471 /*
472 * If our sequence and/or ack numbers have advanced then record the new state.
473 */
474 if (likely((s32)(end - cm->protocol_state.tcp.end) >= 0)) {
475 cm->protocol_state.tcp.end = end;
476 }
477
478 max_end = sack + scaled_win;
479 if (likely((s32)(max_end - counter_cm->protocol_state.tcp.max_end) >= 0)) {
480 counter_cm->protocol_state.tcp.max_end = max_end;
481 }
482 }
483
484 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530485 * Check if skb was cloned. If it was, unshare it. Because
486 * the data area is going to be written in this path and we don't want to
487 * change the cloned skb's data section.
488 */
489 if (unlikely(skb_cloned(skb))) {
490 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
491 skb = skb_unshare(skb, GFP_ATOMIC);
492 if (!skb) {
493 DEBUG_WARN("Failed to unshare the cloned skb\n");
494 rcu_read_unlock();
495 return 0;
496 }
497
498 /*
499 * Update the iph and tcph pointers with the unshared skb's data area.
500 */
501 iph = (struct iphdr *)skb->data;
502 tcph = (struct tcphdr *)(skb->data + ihl);
503 }
504
505 /*
Wayne Tan1cabbf12022-05-01 13:01:45 -0700506 * Check if skb has enough headroom to write L2 headers
507 */
508 if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
509 rcu_read_unlock();
510 DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
511 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NO_HEADROOM);
512 return 0;
513 }
514
515 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530516 * For PPPoE packets, match server MAC and session id
517 */
518 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530519 struct ethhdr *eth;
Nitin Shetty9af87d42022-02-11 16:25:29 +0530520 bool pppoe_match;
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530521
522 if (unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
523 rcu_read_unlock();
524 DEBUG_TRACE("%px: PPPoE header not present in packet for PPPoE rule\n", skb);
525 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING);
526 return 0;
527 }
528
Nitin Shetty9af87d42022-02-11 16:25:29 +0530529 eth = eth_hdr(skb);
530
531 pppoe_match = (cm->pppoe_session_id == sfe_l2_pppoe_session_id_get(l2_info)) &&
532 ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source);
533
534 if (unlikely(!pppoe_match)) {
535 DEBUG_TRACE("%px: PPPoE session ID %d and %d or MAC %pM and %pM did not match\n",
536 skb, cm->pppoe_session_id, sfe_l2_pppoe_session_id_get(l2_info),
537 cm->pppoe_remote_mac, eth->h_source);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530538 rcu_read_unlock();
539 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
540 return 0;
541 }
Nitin Shetty9af87d42022-02-11 16:25:29 +0530542
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530543 skb->protocol = htons(l2_info->protocol);
544 this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530545 } else if (unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
546
547 /*
Nitin Shetty9af87d42022-02-11 16:25:29 +0530548 * If packet contains PPPoE header but CME doesn't contain PPPoE flag yet we are exceptioning
549 * the packet to linux
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530550 */
Wayne Tan1cabbf12022-05-01 13:01:45 -0700551 if (unlikely(!bridge_flow)) {
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530552 rcu_read_unlock();
553 DEBUG_TRACE("%px: CME doesn't contain PPPoE flag but packet has PPPoE header\n", skb);
554 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_NOT_SET_IN_CME);
555 return 0;
556 }
557
558 /*
Nitin Shetty9af87d42022-02-11 16:25:29 +0530559 * For bridged flows when packet contains PPPoE header, restore the header back and forward
560 * to xmit interface
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530561 */
Wayne Tan1cabbf12022-05-01 13:01:45 -0700562 __skb_push(skb, PPPOE_SES_HLEN);
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530563 this_cpu_inc(si->stats_pcpu->pppoe_bridge_packets_forwarded64);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530564 }
565
566 /*
567 * From this point on we're good to modify the packet.
568 */
569
570 /*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530571 * For PPPoE flows, add PPPoE header before L2 header is added.
572 */
Guduri Prathyusha034d6352022-01-12 16:49:04 +0530573 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) {
Wayne Tanbb7f1782021-12-13 11:16:04 -0800574 sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IP);
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530575 this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64);
576 }
577
578 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530579 * Update DSCP
580 */
581 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
582 iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
583 }
584
585 /*
586 * Decrement our TTL.
587 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530588 if (likely(!bridge_flow)) {
589 iph->ttl = ttl - 1;
590 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530591
592 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530593 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
594 * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here
595 * so that HW does not re-calculate/replace the L4 csum
596 */
597 hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
598
599 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530600 * Do we have to perform translations of the source address/port?
601 */
602 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
603 u16 tcp_csum;
604 u32 sum;
605
606 iph->saddr = cm->xlate_src_ip;
607 tcph->source = cm->xlate_src_port;
608
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530609 if (unlikely(!hw_csum)) {
610 tcp_csum = tcph->check;
611 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
612 sum = tcp_csum + cm->xlate_src_partial_csum_adjustment;
613 } else {
614 sum = tcp_csum + cm->xlate_src_csum_adjustment;
615 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530616
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530617 sum = (sum & 0xffff) + (sum >> 16);
618 tcph->check = (u16)sum;
619 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530620 }
621
622 /*
623 * Do we have to perform translations of the destination address/port?
624 */
625 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
626 u16 tcp_csum;
627 u32 sum;
628
629 iph->daddr = cm->xlate_dest_ip;
630 tcph->dest = cm->xlate_dest_port;
631
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530632 if (unlikely(!hw_csum)) {
633 tcp_csum = tcph->check;
634 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
635 sum = tcp_csum + cm->xlate_dest_partial_csum_adjustment;
636 } else {
637 sum = tcp_csum + cm->xlate_dest_csum_adjustment;
638 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530639
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530640 sum = (sum & 0xffff) + (sum >> 16);
641 tcph->check = (u16)sum;
642 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530643 }
644
645 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530646 * If HW checksum offload is not possible, full L3 checksum and incremental L4 checksum
647 * are used to update the packet. Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is
648 * not recalculated further in packet path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530649 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530650 if (likely(hw_csum)) {
651 skb->ip_summed = CHECKSUM_PARTIAL;
652 } else {
653 iph->check = sfe_ipv4_gen_ip_csum(iph);
654 skb->ip_summed = CHECKSUM_UNNECESSARY;
655 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530656
657 /*
658 * Update traffic stats.
659 */
660 atomic_inc(&cm->rx_packet_count);
661 atomic_add(len, &cm->rx_byte_count);
662
663 xmit_dev = cm->xmit_dev;
664 skb->dev = xmit_dev;
665
666 /*
Wayne Tanbb7f1782021-12-13 11:16:04 -0800667 * Check to see if we need to add VLAN tags
668 */
669 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
670 sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
671 }
672
673 /*
674 * Check to see if we need to write an Ethernet header.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530675 */
676 if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
677 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530678 dev_hard_header(skb, xmit_dev, ntohs(skb->protocol),
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530679 cm->xmit_dest_mac, cm->xmit_src_mac, len);
680 } else {
681 /*
682 * For the simple case we write this really fast.
683 */
684 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
685
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530686 eth->h_proto = skb->protocol;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530687
688 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
689 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
690 }
691 }
692
693 /*
694 * Update priority of skb.
695 */
696 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
697 skb->priority = cm->priority;
698 }
699
700 /*
701 * Mark outgoing packet
702 */
Ken Zhu37040ea2021-09-09 21:11:15 -0700703 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
Ken Zhu306a68f2022-01-20 09:00:43 -0800704 skb->mark = cm->mark;
Parikshit Guned31a8202022-01-05 22:15:04 +0530705 /*
706 * Update service class stats if SAWF is valid.
707 */
708 if (likely(cm->sawf_valid)) {
709 service_class_id = SFE_GET_SAWF_SERVICE_CLASS(cm->mark);
710 sfe_ipv4_service_class_stats_inc(si, service_class_id, len);
711 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530712 }
713
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800714 /*
715 * For the first packets, check if it could got fast xmit.
716 */
717 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
718 && (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
719 cm->features = netif_skb_features(skb);
720 if (likely(sfe_fast_xmit_check(skb, cm->features))) {
721 cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT;
722 }
723 cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
724 }
725 features = cm->features;
726 fast_xmit = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
727
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530728 rcu_read_unlock();
729
730 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
731
732 /*
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800733 * We do per packet condition check before we could fast xmit the
734 * packet.
735 */
Nitin Shetty502db402022-04-27 21:29:28 +0530736 if (likely(fast_xmit)) {
737 if (likely(!skb_is_gso(skb))) {
738 if (likely(dev_fast_xmit(skb, xmit_dev, features))) {
739 this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
740 return 1;
741 }
742 } else {
743 cm->flags &= ~SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT;
744 DEBUG_TRACE("%px: fast xmit disabled for xmit dev %s", skb, xmit_dev->name);
745 }
Ken Zhu7e38d1a2021-11-30 17:31:46 -0800746 }
747
748 /*
Sourav Poddar00345c02022-07-25 17:17:25 +0530749 * We're going to check for GSO flags when we transmit the packet so
750 * start fetching the necessary cache line now.
751 */
752 prefetch(skb_shinfo(skb));
753
754 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530755 * Mark that this packet has been fast forwarded.
756 */
757 skb->fast_forwarded = 1;
758
759 /*
760 * Send the packet on its way.
761 */
762 dev_queue_xmit(skb);
763
764 return 1;
765}