blob: f38cc2972cfe995514632ae5b5fdea18a3db258a [file] [log] [blame]
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301/*
2 * sfe_ipv4_udp.c
3 * Shortcut forwarding engine - IPv4 UDP implementation
4 *
5 * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha5f27e232022-01-06 14:39:04 +05306 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/skbuff.h>
22#include <net/udp.h>
23#include <linux/etherdevice.h>
24#include <linux/lockdep.h>
Amitesh Anand63be37d2021-12-24 20:51:48 +053025#include <linux/version.h>
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053026
27#include "sfe_debug.h"
28#include "sfe_api.h"
29#include "sfe.h"
30#include "sfe_flow_cookie.h"
31#include "sfe_ipv4.h"
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +053032#include "sfe_pppoe.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053033
34/*
Amitesh Anand63be37d2021-12-24 20:51:48 +053035 * sfe_ipv4_udp_sk_deliver()
36 * Deliver the packet to the protocol handler registered with Linux.
37 * To be called under rcu_read_lock()
38 * Returns:
39 * 1 if the packet needs to be passed to Linux.
40 * 0 if the packet is processed successfully.
41 * -1 if the packet is dropped in SFE.
42 */
43static int sfe_ipv4_udp_sk_deliver(struct sk_buff *skb, struct sfe_ipv4_connection_match *cm, unsigned int ihl)
44{
45 struct udp_sock *up;
46 struct sock *sk;
47 int ret;
48 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
49
50 /*
51 * Call the decap handler for valid encap_rcv handler.
52 */
53 up = rcu_dereference(cm->up);
54 encap_rcv = READ_ONCE(up->encap_rcv);
55 if (!encap_rcv) {
56 DEBUG_ERROR("%px: sfe: Error: up->encap_rcv is NULL\n", skb);
57 return 1;
58 }
59
60#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
61 nf_reset(skb);
62#else
63 nf_reset_ct(skb);
64#endif
65
66 skb_pull(skb, ihl);
67 skb_reset_transport_header(skb);
68
69 /*
70 * Verify checksum before giving to encap_rcv handler function.
71 * TODO: The following approach is ignorant for UDPLITE for now.
72 * Instead, consider calling Linux API to do checksum validation.
73 */
74 if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
75 skb->csum = inet_compute_pseudo(skb, IPPROTO_UDP);
76 if (unlikely(__skb_checksum_complete(skb))) {
77 DEBUG_ERROR("%px: sfe: Invalid udp checksum\n", skb);
78 kfree_skb(skb);
79 return -1;
80 }
81 DEBUG_TRACE("%px: sfe: udp checksum verified in s/w correctly.\n", skb);
82 }
83
84 sk = (struct sock *)up;
85
86 /*
87 * At this point, L4 checksum has already been verified and pkt is going
88 * to Linux's tunnel decap-handler. Setting ip_summed field to CHECKSUM_NONE,
89 * to ensure that later packet's inner header checksum is validated correctly.
90 * TODO: Find the fix to set skb->ip_summed = CHECKSUM_NONE;
91 */
92
93 /*
94 * encap_rcv() returns the following value:
95 * =0 if skb was successfully passed to the encap
96 * handler or was discarded by it.
97 * >0 if skb should be passed on to UDP.
98 * <0 if skb should be resubmitted as proto -N
99 */
100 ret = encap_rcv(sk, skb);
101 if (unlikely(ret)) {
102 /*
103 * If encap_rcv fails, vxlan driver drops the packet.
104 * No need to free the skb here.
105 */
106
107 DEBUG_ERROR("%px: sfe: udp-decap API return error: %d\n", skb, ret);
108 return -1;
109 }
110
111 return 0;
112}
113
114/*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530115 * sfe_ipv4_recv_udp()
116 * Handle UDP packet receives and forwarding.
117 */
118int sfe_ipv4_recv_udp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
Ken Zhu88c58152021-12-09 15:12:06 -0800119 unsigned int len, struct iphdr *iph, unsigned int ihl,
120 bool sync_on_find, struct sfe_l2_info *l2_info, bool tun_outer)
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530121{
122 struct udphdr *udph;
123 __be32 src_ip;
124 __be32 dest_ip;
125 __be16 src_port;
126 __be16 dest_port;
127 struct sfe_ipv4_connection_match *cm;
128 u8 ttl;
129 struct net_device *xmit_dev;
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530130 bool hw_csum;
Amitesh Anand63be37d2021-12-24 20:51:48 +0530131 int err;
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530132 bool bridge_flow;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530133
134 /*
135 * Is our packet too short to contain a valid UDP header?
136 */
137 if (unlikely(!pskb_may_pull(skb, (sizeof(struct udphdr) + ihl)))) {
138 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE);
Amitesh Anand63be37d2021-12-24 20:51:48 +0530139 DEBUG_TRACE("%px: packet too short for UDP header\n", skb);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530140 return 0;
141 }
142
143 /*
144 * Read the IP address and port information. Read the IP header data first
145 * because we've almost certainly got that in the cache. We may not yet have
146 * the UDP header cached though so allow more time for any prefetching.
147 */
148 src_ip = iph->saddr;
149 dest_ip = iph->daddr;
150
151 udph = (struct udphdr *)(skb->data + ihl);
152 src_port = udph->source;
153 dest_port = udph->dest;
154
155 rcu_read_lock();
156
157 /*
158 * Look for a connection match.
159 */
160#ifdef CONFIG_NF_FLOW_COOKIE
161 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
162 if (unlikely(!cm)) {
163 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
164 }
165#else
Amitesh Anand63be37d2021-12-24 20:51:48 +0530166 /*
167 * 5-tuple lookup for UDP flow.
168 */
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530169 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
170#endif
171 if (unlikely(!cm)) {
172
Amitesh Anand63be37d2021-12-24 20:51:48 +0530173 /*
174 * try a 4-tuple lookup; required for tunnels like vxlan.
175 */
176 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, 0, dest_ip, dest_port);
177 if (unlikely(!cm)) {
178 rcu_read_unlock();
179 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NO_CONNECTION);
180 DEBUG_TRACE("%px: sfe: no connection found in 4-tuple lookup.\n", skb);
181 return 0;
182 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530183 }
184
185 /*
Ken Zhu88c58152021-12-09 15:12:06 -0800186 * If our packet has beern marked as "sync on find" we can't actually
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530187 * forward it in the fast path, but now that we've found an associated
Ken Zhu88c58152021-12-09 15:12:06 -0800188 * connection we need sync its status before exception it to slow path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530189 */
Ken Zhu88c58152021-12-09 15:12:06 -0800190 if (unlikely(sync_on_find)) {
191 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530192 rcu_read_unlock();
193 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
Ken Zhu88c58152021-12-09 15:12:06 -0800194 DEBUG_TRACE("%px: sfe: sync on find\n", cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530195 return 0;
196 }
197
198#ifdef CONFIG_XFRM
199 /*
200 * We can't accelerate the flow on this direction, just let it go
201 * through the slow path.
202 */
203 if (unlikely(!cm->flow_accel)) {
204 rcu_read_unlock();
205 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
206 return 0;
207 }
208#endif
209
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530210 bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
211
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530212 /*
213 * Does our TTL allow forwarding?
214 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530215 if (likely(!bridge_flow)) {
216 ttl = iph->ttl;
217 if (unlikely(ttl < 2)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800218 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530219 rcu_read_unlock();
220
Ken Zhu88c58152021-12-09 15:12:06 -0800221 DEBUG_TRACE("%px: sfe: TTL too low\n", skb);
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530222 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_SMALL_TTL);
223 return 0;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530224 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530225 }
226
227 /*
228 * If our packet is larger than the MTU of the transmit interface then
229 * we can't forward it easily.
230 */
231 if (unlikely(len > cm->xmit_dev_mtu)) {
Ken Zhu88c58152021-12-09 15:12:06 -0800232 sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530233 rcu_read_unlock();
234 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION);
Ken Zhu88c58152021-12-09 15:12:06 -0800235 DEBUG_TRACE("%px: sfe: larger than MTU\n", cm);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530236 return 0;
237 }
238
239 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530240 * Check if skb was cloned. If it was, unshare it. Because
241 * the data area is going to be written in this path and we don't want to
242 * change the cloned skb's data section.
243 */
244 if (unlikely(skb_cloned(skb))) {
245 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
246 skb = skb_unshare(skb, GFP_ATOMIC);
247 if (!skb) {
Amitesh Anand63be37d2021-12-24 20:51:48 +0530248 DEBUG_WARN("%px: Failed to unshare the cloned skb\n", skb);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530249 rcu_read_unlock();
250 return 0;
251 }
252
253 /*
254 * Update the iph and udph pointers with the unshared skb's data area.
255 */
256 iph = (struct iphdr *)skb->data;
257 udph = (struct udphdr *)(skb->data + ihl);
258 }
259
260 /*
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530261 * For PPPoE packets, match server MAC and session id
262 */
263 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) {
264 struct pppoe_hdr *ph;
265 struct ethhdr *eth;
266
267 if (unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
268 rcu_read_unlock();
269 DEBUG_TRACE("%px: PPPoE header not present in packet for PPPoE rule\n", skb);
270 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING);
271 return 0;
272 }
273
274 ph = (struct pppoe_hdr *)(skb->head + sfe_l2_pppoe_hdr_offset_get(l2_info));
275 eth = (struct ethhdr *)(skb->head + sfe_l2_hdr_offset_get(l2_info));
276 if (unlikely(cm->pppoe_session_id != ntohs(ph->sid)) || unlikely(!(ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source)))) {
277 DEBUG_TRACE("%px: PPPoE sessions with session IDs %d and %d or server MACs %pM and %pM did not match\n",
278 skb, cm->pppoe_session_id, htons(ph->sid), cm->pppoe_remote_mac, eth->h_source);
279 rcu_read_unlock();
280 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
281 return 0;
282 }
283 skb->protocol = htons(l2_info->protocol);
284 this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64);
285
286 } else if (unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
287
288 /*
289 * If packet contains PPPOE header but CME doesn't contain PPPoE flag yet we are exceptioning the packet to linux
290 */
291 rcu_read_unlock();
292 DEBUG_TRACE("%px: CME doesn't contain PPPOE flag but packet has PPPoE header\n", skb);
293 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_NOT_SET_IN_CME);
294 return 0;
295 }
296
297 /*
298 * From this point on we're good to modify the packet.
299 */
300
301 /*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530302 * For PPPoE flows, add PPPoE header before L2 header is added.
303 */
304 if (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
305 if (unlikely(!sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IP))) {
306 rcu_read_unlock();
307 DEBUG_WARN("%px: PPPoE header addition failed\n", skb);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530308 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NO_HEADROOM);
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530309 return 0;
310 }
311 this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64);
312 }
313
314 /*
315 * TODO: VLAN header should be added here when they are supported.
316 */
317
318 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530319 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
320 * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here
321 * so that HW does not re-calculate/replace the L4 csum
322 */
323 hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
324
325 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530326 * Do we have to perform translations of the source address/port?
327 */
328 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
329 u16 udp_csum;
330
331 iph->saddr = cm->xlate_src_ip;
332 udph->source = cm->xlate_src_port;
333
334 /*
335 * Do we have a non-zero UDP checksum? If we do then we need
336 * to update it.
337 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530338 if (unlikely(!hw_csum)) {
339 udp_csum = udph->check;
340 if (likely(udp_csum)) {
341 u32 sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530342
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530343 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
344 sum = udp_csum + cm->xlate_src_partial_csum_adjustment;
345 } else {
346 sum = udp_csum + cm->xlate_src_csum_adjustment;
347 }
348
349 sum = (sum & 0xffff) + (sum >> 16);
350 udph->check = (u16)sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530351 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530352 }
353 }
354
355 /*
356 * Do we have to perform translations of the destination address/port?
357 */
358 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
359 u16 udp_csum;
360
361 iph->daddr = cm->xlate_dest_ip;
362 udph->dest = cm->xlate_dest_port;
363
364 /*
365 * Do we have a non-zero UDP checksum? If we do then we need
366 * to update it.
367 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530368 if (unlikely(!hw_csum)) {
369 udp_csum = udph->check;
370 if (likely(udp_csum)) {
371 u32 sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530372
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530373 /*
374 * TODO: Use a common API for below incremental checksum calculation
375 * for IPv4/IPv6 UDP/TCP
376 */
377 if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) {
378 sum = udp_csum + cm->xlate_dest_partial_csum_adjustment;
379 } else {
380 sum = udp_csum + cm->xlate_dest_csum_adjustment;
381 }
382
383 sum = (sum & 0xffff) + (sum >> 16);
384 udph->check = (u16)sum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530385 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530386 }
387 }
388
389 /*
Amitesh Anand63be37d2021-12-24 20:51:48 +0530390 * UDP sock will be valid only in decap-path.
391 * Call encap_rcv function associated with udp_sock in cm.
392 */
393 if (unlikely(cm->up)) {
394 /*
395 * Call decap handler associated with sock.
396 * Also validates UDP checksum before calling decap handler.
397 */
398 err = sfe_ipv4_udp_sk_deliver(skb, cm, ihl);
399 if (unlikely(err == -1)) {
400 rcu_read_unlock();
401 this_cpu_inc(si->stats_pcpu->packets_dropped64);
402 return 1;
403 } else if (unlikely(err == 1)) {
404 rcu_read_unlock();
405 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
406 return 0;
407 }
408
409 /*
410 * Update traffic stats.
411 */
412 atomic_inc(&cm->rx_packet_count);
413 atomic_add(len, &cm->rx_byte_count);
414
415 rcu_read_unlock();
416 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
417 DEBUG_TRACE("%px: sfe: sfe_ipv4_recv_udp -> encap_rcv done.\n", skb);
418 return 1;
419 }
420
421 /*
422 * Decrement our TTL
423 * Except when called from hook function in post-decap.
424 */
Ratheesh Kannoth71fc51e2022-01-05 10:02:47 +0530425 if (likely(!bridge_flow)) {
426 iph->ttl -= (u8)(!tun_outer);
427 }
Amitesh Anand63be37d2021-12-24 20:51:48 +0530428
429 /*
430 * Update DSCP
431 */
432 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
433 iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
434 }
435
436 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530437 * If HW checksum offload is not possible, full L3 checksum and incremental L4 checksum
438 * are used to update the packet. Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is
439 * not recalculated further in packet path.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530440 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530441 if (likely(hw_csum)) {
442 skb->ip_summed = CHECKSUM_PARTIAL;
443 } else {
444 iph->check = sfe_ipv4_gen_ip_csum(iph);
445 skb->ip_summed = CHECKSUM_UNNECESSARY;
446 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530447
448 /*
449 * Update traffic stats.
450 */
451 atomic_inc(&cm->rx_packet_count);
452 atomic_add(len, &cm->rx_byte_count);
453
454 xmit_dev = cm->xmit_dev;
455 skb->dev = xmit_dev;
456
457 /*
458 * Check to see if we need to write a header.
459 */
460 if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
461 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530462 dev_hard_header(skb, xmit_dev, ntohs(skb->protocol),
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530463 cm->xmit_dest_mac, cm->xmit_src_mac, len);
464 } else {
465 /*
466 * For the simple case we write this really fast.
467 */
468 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
Guduri Prathyusha5f27e232022-01-06 14:39:04 +0530469 eth->h_proto = skb->protocol;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530470 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
471 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
472 }
473 }
474
475 /*
476 * Update priority of skb.
477 */
478 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
479 skb->priority = cm->priority;
480 }
481
482 /*
483 * Mark outgoing packet.
484 */
Ken Zhu37040ea2021-09-09 21:11:15 -0700485 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
486 skb->mark = cm->connection->mark;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530487 }
488
489 rcu_read_unlock();
490
491 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
492
493 /*
494 * We're going to check for GSO flags when we transmit the packet so
495 * start fetching the necessary cache line now.
496 */
497 prefetch(skb_shinfo(skb));
498
499 /*
500 * Mark that this packet has been fast forwarded.
501 */
502 skb->fast_forwarded = 1;
503
504 /*
505 * Send the packet on its way.
506 */
507 dev_queue_xmit(skb);
508
509 return 1;
510}