blob: 8465ffbf674fdf5ba5e2cb4382bdc02f3a830867 [file] [log] [blame]
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301/*
2 * sfe_ipv4_udp.c
3 * Shortcut forwarding engine - IPv4 UDP implementation
4 *
5 * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05306 * Copyright (c) 2021,2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/skbuff.h>
22#include <net/udp.h>
23#include <linux/etherdevice.h>
24#include <linux/lockdep.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053025#include <linux/version.h>
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053026
27#include "sfe_debug.h"
28#include "sfe_api.h"
29#include "sfe.h"
30#include "sfe_flow_cookie.h"
31#include "sfe_ipv4.h"
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +053032#include "sfe_pppoe.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053033
34/*
Amitesh Anand63be37d2021-12-24 20:51:48 +053035 * sfe_ipv4_udp_sk_deliver()
36 * Deliver the packet to the protocol handler registered with Linux.
37 * To be called under rcu_read_lock()
38 * Returns:
39 * 1 if the packet needs to be passed to Linux.
40 * 0 if the packet is processed successfully.
41 * -1 if the packet is dropped in SFE.
42 */
43static int sfe_ipv4_udp_sk_deliver(struct sk_buff *skb, struct sfe_ipv4_connection_match *cm, unsigned int ihl)
44{
45 struct udp_sock *up;
46 struct sock *sk;
47 int ret;
48 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
49
50 /*
51 * Call the decap handler for valid encap_rcv handler.
52 */
53 up = rcu_dereference(cm->up);
54 encap_rcv = READ_ONCE(up->encap_rcv);
55 if (!encap_rcv) {
56 DEBUG_ERROR("%px: sfe: Error: up->encap_rcv is NULL\n", skb);
57 return 1;
58 }
59
60#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
61 nf_reset(skb);
62#else
63 nf_reset_ct(skb);
64#endif
65
66 skb_pull(skb, ihl);
67 skb_reset_transport_header(skb);
68
69 /*
70 * Verify checksum before giving to encap_rcv handler function.
71 * TODO: The following approach is ignorant for UDPLITE for now.
72 * Instead, consider calling Linux API to do checksum validation.
73 */
74 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
75 skb->csum = inet_compute_pseudo(skb, IPPROTO_UDP);
76 if (unlikely(__skb_checksum_complete(skb))) {
77 DEBUG_ERROR("%px: sfe: Invalid udp checksum\n", skb);
78 kfree_skb(skb);
79 return -1;
80 }
81 DEBUG_TRACE("%px: sfe: udp checksum verified in s/w correctly.\n", skb);
82 }
83
84 sk = (struct sock *)up;
85
86 /*
87 * At this point, L4 checksum has already been verified and pkt is going
88 * to Linux's tunnel decap-handler. Setting ip_summed field to CHECKSUM_NONE,
89 * to ensure that later packet's inner header checksum is validated correctly.
90 * TODO: Find the fix to set skb->ip_summed = CHECKSUM_NONE;
91 */
92
93 /*
94 * encap_rcv() returns the following value:
95 * =0 if skb was successfully passed to the encap
96 * handler or was discarded by it.
97 * >0 if skb should be passed on to UDP.
98 * <0 if skb should be resubmitted as proto -N
99 */
100 ret = encap_rcv(sk, skb);
101 if (unlikely(ret)) {
102 /*
103 * If encap_rcv fails, vxlan driver drops the packet.
104 * No need to free the skb here.
105 */
106
107 DEBUG_ERROR("%px: sfe: udp-decap API return error: %d\n", skb, ret);
108 return -1;
109 }
110
111 return 0;
112}
113
114/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530115 * sfe_ipv4_recv_udp()
116 * Handle UDP packet receives and forwarding.
117 */
118int sfe_ipv4_recv_udp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
Ken Zhu88c58152021-12-09 15:12:06 -0800119 unsigned int len, struct iphdr *iph, unsigned int ihl,
120 bool sync_on_find, struct sfe_l2_info *l2_info, bool tun_outer)
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530121{
122 struct udphdr *udph;
123 __be32 src_ip;
124 __be32 dest_ip;
125 __be16 src_port;
126 __be16 dest_port;
127 struct sfe_ipv4_connection_match *cm;
128 u8 ttl;
129 struct net_device *xmit_dev;
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530130 bool hw_csum;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530131 int err;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530132 bool bridge_flow;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530133
134 /*
135 * Is our packet too short to contain a valid UDP header?
136 */
137 if (unlikely(!pskb_may_pull(skb, (sizeof(struct udphdr) + ihl)))) {
138 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE);
Amitesh Anand63be37d2021-12-24 20:51:48 +0530139 DEBUG_TRACE("%px: packet too short for UDP header\n", skb);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530140 return 0;
141 }
142
143 /*
144 * Read the IP address and port information. Read the IP header data first
145 * because we've almost certainly got that in the cache. We may not yet have
146 * the UDP header cached though so allow more time for any prefetching.
147 */
148 src_ip = iph->saddr;
149 dest_ip = iph->daddr;
150
151 udph = (struct udphdr *)(skb->data + ihl);
152 src_port = udph->source;
153 dest_port = udph->dest;
154
155 rcu_read_lock();
156
157 /*
158 * Look for a connection match.
159 */
160#ifdef CONFIG_NF_FLOW_COOKIE
161 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
162 if (unlikely(!cm)) {
163 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
164 }
165#else
Amitesh Anand63be37d2021-12-24 20:51:48 +0530166 /*
167 * 5-tuple lookup for UDP flow.
168 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530169 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
170#endif
171 if (unlikely(!cm)) {
172
Amitesh Anand63be37d2021-12-24 20:51:48 +0530173 /*
174 * try a 4-tuple lookup; required for tunnels like vxlan.
175 */
176 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, 0, dest_ip, dest_port);
177 if (unlikely(!cm)) {
178 rcu_read_unlock();
179 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NO_CONNECTION);
180 DEBUG_TRACE("%px: sfe: no connection found in 4-tuple lookup.\n", skb);
181 return 0;
182 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530183 }
184
185 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800186 * If our packet has beern marked as "sync on find" we can't actually
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530187 * forward it in the fast path, but now that we've found an associated
Ken Zhu88c58152021-12-09 15:12:06 -0800188 * connection we need sync its status before exception it to slow path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530189 */
Ken Zhu88c58152021-12-09 15:12:06 -0800190 if (unlikely(sync_on_find)) {
191 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530192 rcu_read_unlock();
193 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
Ken Zhu88c58152021-12-09 15:12:06 -0800194 DEBUG_TRACE("%px: sfe: sync on find\n", cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530195 return 0;
196 }
197
198#ifdef CONFIG_XFRM
199 /*
200 * We can't accelerate the flow on this direction, just let it go
201 * through the slow path.
202 */
203 if (unlikely(!cm->flow_accel)) {
204 rcu_read_unlock();
205 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
206 return 0;
207 }
208#endif
209
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530210 bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
211
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530212 /*
213 * Does our TTL allow forwarding?
214 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530215 if (likely(!bridge_flow)) {
216 ttl = iph->ttl;
217 if (unlikely(ttl < 2)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800218 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530219 rcu_read_unlock();
220
Ken Zhu88c58152021-12-09 15:12:06 -0800221 DEBUG_TRACE("%px: sfe: TTL too low\n", skb);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530222 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_SMALL_TTL);
223 return 0;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530224 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530225 }
226
227 /*
228 * If our packet is larger than the MTU of the transmit interface then
229 * we can't forward it easily.
230 */
231 if (unlikely(len > cm->xmit_dev_mtu)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800232 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530233 rcu_read_unlock();
234 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION);
Ken Zhu88c58152021-12-09 15:12:06 -0800235 DEBUG_TRACE("%px: sfe: larger than MTU\n", cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530236 return 0;
237 }
238
239 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530240 * For PPPoE packets, match server MAC and session id
241 */
242 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) {
243 struct pppoe_hdr *ph;
244 struct ethhdr *eth;
245
246 if (unlikely(!l2_info) || unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
247 rcu_read_unlock();
248 DEBUG_TRACE("%px: PPPoE is not parsed\n", skb);
249 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING);
250 return 0;
251 }
252
253 ph = (struct pppoe_hdr *)(skb->head + sfe_l2_pppoe_hdr_offset_get(l2_info));
254 eth = (struct ethhdr *)(skb->head + sfe_l2_hdr_offset_get(l2_info));
255 if (unlikely(cm->pppoe_session_id != htons(ph->sid)) || unlikely(!(ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source)))) {
256 rcu_read_unlock();
257 DEBUG_TRACE("%px: PPPoE sessions did not match \n", skb);
258 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
259 return 0;
260 }
261 this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64);
262
263 } else if (unlikely(l2_info) && unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
264
265 /*
266 * If packet contains PPPOE header but CME doesn't contain PPPoE flag yet we are exceptioning the packet to linux
267 */
268 rcu_read_unlock();
269 DEBUG_TRACE("%px: CME doesn't contain PPPOE flag but packet has PPPoE header\n", skb);
270 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_NOT_SET_IN_CME);
271 return 0;
272 }
273
274 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530275 * From this point on we're good to modify the packet.
276 */
277
278 /*
279 * Check if skb was cloned. If it was, unshare it. Because
280 * the data area is going to be written in this path and we don't want to
281 * change the cloned skb's data section.
282 */
283 if (unlikely(skb_cloned(skb))) {
284 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
285 skb = skb_unshare(skb, GFP_ATOMIC);
286 if (!skb) {
Amitesh Anand63be37d2021-12-24 20:51:48 +0530287 DEBUG_WARN("%px: Failed to unshare the cloned skb\n", skb);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530288 rcu_read_unlock();
289 return 0;
290 }
291
292 /*
293 * Update the iph and udph pointers with the unshared skb's data area.
294 */
295 iph = (struct iphdr *)skb->data;
296 udph = (struct udphdr *)(skb->data + ihl);
297 }
298
299 /*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530300 * For PPPoE flows, add PPPoE header before L2 header is added.
301 */
302 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
303 if (unlikely(!sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IP))) {
304 rcu_read_unlock();
305 DEBUG_WARN("%px: PPPoE header addition failed\n", skb);
306 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_HEADER_ENCAP_FAILED);
307 return 0;
308 }
309 this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64);
310 }
311
312 /*
313 * TODO: VLAN header should be added here when they are supported.
314 */
315
316 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530317 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
318 * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here
319 * so that HW does not re-calculate/replace the L4 csum
320 */
321 hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
322
323 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530324 * Do we have to perform translations of the source address/port?
325 */
326 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
327 u16 udp_csum;
328
329 iph->saddr = cm->xlate_src_ip;
330 udph->source = cm->xlate_src_port;
331
332 /*
333 * Do we have a non-zero UDP checksum? If we do then we need
334 * to update it.
335 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530336 if (unlikely(!hw_csum)) {
337 udp_csum = udph->check;
338 if (likely(udp_csum)) {
339 u32 sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530340
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530341 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
342 sum = udp_csum + cm->xlate_src_partial_csum_adjustment;
343 } else {
344 sum = udp_csum + cm->xlate_src_csum_adjustment;
345 }
346
347 sum = (sum & 0xffff) + (sum >> 16);
348 udph->check = (u16)sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530349 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530350 }
351 }
352
353 /*
354 * Do we have to perform translations of the destination address/port?
355 */
356 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
357 u16 udp_csum;
358
359 iph->daddr = cm->xlate_dest_ip;
360 udph->dest = cm->xlate_dest_port;
361
362 /*
363 * Do we have a non-zero UDP checksum? If we do then we need
364 * to update it.
365 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530366 if (unlikely(!hw_csum)) {
367 udp_csum = udph->check;
368 if (likely(udp_csum)) {
369 u32 sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530370
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530371 /*
372 * TODO: Use a common API for below incremental checksum calculation
373 * for IPv4/IPv6 UDP/TCP
374 */
375 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
376 sum = udp_csum + cm->xlate_dest_partial_csum_adjustment;
377 } else {
378 sum = udp_csum + cm->xlate_dest_csum_adjustment;
379 }
380
381 sum = (sum & 0xffff) + (sum >> 16);
382 udph->check = (u16)sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530383 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530384 }
385 }
386
387 /*
Amitesh Anand63be37d2021-12-24 20:51:48 +0530388 * UDP sock will be valid only in decap-path.
389 * Call encap_rcv function associated with udp_sock in cm.
390 */
391 if (unlikely(cm->up)) {
392 /*
393 * Call decap handler associated with sock.
394 * Also validates UDP checksum before calling decap handler.
395 */
396 err = sfe_ipv4_udp_sk_deliver(skb, cm, ihl);
397 if (unlikely(err == -1)) {
398 rcu_read_unlock();
399 this_cpu_inc(si->stats_pcpu->packets_dropped64);
400 return 1;
401 } else if (unlikely(err == 1)) {
402 rcu_read_unlock();
403 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
404 return 0;
405 }
406
407 /*
408 * Update traffic stats.
409 */
410 atomic_inc(&cm->rx_packet_count);
411 atomic_add(len, &cm->rx_byte_count);
412
413 rcu_read_unlock();
414 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
415 DEBUG_TRACE("%px: sfe: sfe_ipv4_recv_udp -> encap_rcv done.\n", skb);
416 return 1;
417 }
418
419 /*
420 * Decrement our TTL
421 * Except when called from hook function in post-decap.
422 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530423 if (likely(!bridge_flow)) {
424 iph->ttl -= (u8)(!tun_outer);
425 }
Amitesh Anand63be37d2021-12-24 20:51:48 +0530426
427 /*
428 * Update DSCP
429 */
430 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
431 iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
432 }
433
434 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530435 * If HW checksum offload is not possible, full L3 checksum and incremental L4 checksum
436 * are used to update the packet. Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is
437 * not recalculated further in packet path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530438 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530439 if (likely(hw_csum)) {
440 skb->ip_summed = CHECKSUM_PARTIAL;
441 } else {
442 iph->check = sfe_ipv4_gen_ip_csum(iph);
443 skb->ip_summed = CHECKSUM_UNNECESSARY;
444 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530445
446 /*
447 * Update traffic stats.
448 */
449 atomic_inc(&cm->rx_packet_count);
450 atomic_add(len, &cm->rx_byte_count);
451
452 xmit_dev = cm->xmit_dev;
453 skb->dev = xmit_dev;
454
455 /*
456 * Check to see if we need to write a header.
457 */
458 if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
459 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
460 dev_hard_header(skb, xmit_dev, ETH_P_IP,
461 cm->xmit_dest_mac, cm->xmit_src_mac, len);
462 } else {
463 /*
464 * For the simple case we write this really fast.
465 */
466 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
467 eth->h_proto = htons(ETH_P_IP);
468 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
469 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
470 }
471 }
472
473 /*
474 * Update priority of skb.
475 */
476 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
477 skb->priority = cm->priority;
478 }
479
480 /*
481 * Mark outgoing packet.
482 */
Ken Zhu37040ea2021-09-09 21:11:15 -0700483 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
484 skb->mark = cm->connection->mark;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530485 }
486
487 rcu_read_unlock();
488
489 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
490
491 /*
492 * We're going to check for GSO flags when we transmit the packet so
493 * start fetching the necessary cache line now.
494 */
495 prefetch(skb_shinfo(skb));
496
497 /*
498 * Mark that this packet has been fast forwarded.
499 */
500 skb->fast_forwarded = 1;
501
502 /*
503 * Send the packet on its way.
504 */
505 dev_queue_xmit(skb);
506
507 return 1;
508}