Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 1 | /* |
| 2 | * sfe_ipv4_udp.c |
| 3 | * Shortcut forwarding engine - IPv4 UDP implementation |
| 4 | * |
| 5 | * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved. |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 6 | * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 7 | * |
| 8 | * Permission to use, copy, modify, and/or distribute this software for any |
| 9 | * purpose with or without fee is hereby granted, provided that the above |
| 10 | * copyright notice and this permission notice appear in all copies. |
| 11 | * |
| 12 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
| 13 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
| 14 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
| 15 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
| 16 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
| 17 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
| 18 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
| 19 | */ |
| 20 | |
| 21 | #include <linux/skbuff.h> |
| 22 | #include <net/udp.h> |
| 23 | #include <linux/etherdevice.h> |
| 24 | #include <linux/lockdep.h> |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 25 | #include <linux/version.h> |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 26 | |
| 27 | #include "sfe_debug.h" |
| 28 | #include "sfe_api.h" |
| 29 | #include "sfe.h" |
| 30 | #include "sfe_flow_cookie.h" |
| 31 | #include "sfe_ipv4.h" |
Guduri Prathyusha | 79a5fee | 2021-11-11 17:59:10 +0530 | [diff] [blame] | 32 | #include "sfe_pppoe.h" |
Wayne Tan | bb7f178 | 2021-12-13 11:16:04 -0800 | [diff] [blame] | 33 | #include "sfe_vlan.h" |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 34 | |
| 35 | /* |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 36 | * sfe_ipv4_udp_sk_deliver() |
| 37 | * Deliver the packet to the protocol handler registered with Linux. |
| 38 | * To be called under rcu_read_lock() |
| 39 | * Returns: |
| 40 | * 1 if the packet needs to be passed to Linux. |
| 41 | * 0 if the packet is processed successfully. |
| 42 | * -1 if the packet is dropped in SFE. |
| 43 | */ |
| 44 | static int sfe_ipv4_udp_sk_deliver(struct sk_buff *skb, struct sfe_ipv4_connection_match *cm, unsigned int ihl) |
| 45 | { |
| 46 | struct udp_sock *up; |
| 47 | struct sock *sk; |
| 48 | int ret; |
| 49 | int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); |
| 50 | |
| 51 | /* |
| 52 | * Call the decap handler for valid encap_rcv handler. |
| 53 | */ |
| 54 | up = rcu_dereference(cm->up); |
| 55 | encap_rcv = READ_ONCE(up->encap_rcv); |
| 56 | if (!encap_rcv) { |
| 57 | DEBUG_ERROR("%px: sfe: Error: up->encap_rcv is NULL\n", skb); |
| 58 | return 1; |
| 59 | } |
| 60 | |
| 61 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) |
| 62 | nf_reset(skb); |
| 63 | #else |
| 64 | nf_reset_ct(skb); |
| 65 | #endif |
| 66 | |
| 67 | skb_pull(skb, ihl); |
| 68 | skb_reset_transport_header(skb); |
| 69 | |
| 70 | /* |
| 71 | * Verify checksum before giving to encap_rcv handler function. |
| 72 | * TODO: The following approach is ignorant for UDPLITE for now. |
| 73 | * Instead, consider calling Linux API to do checksum validation. |
| 74 | */ |
| 75 | if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { |
| 76 | skb->csum = inet_compute_pseudo(skb, IPPROTO_UDP); |
| 77 | if (unlikely(__skb_checksum_complete(skb))) { |
| 78 | DEBUG_ERROR("%px: sfe: Invalid udp checksum\n", skb); |
| 79 | kfree_skb(skb); |
| 80 | return -1; |
| 81 | } |
| 82 | DEBUG_TRACE("%px: sfe: udp checksum verified in s/w correctly.\n", skb); |
| 83 | } |
| 84 | |
| 85 | sk = (struct sock *)up; |
| 86 | |
| 87 | /* |
| 88 | * At this point, L4 checksum has already been verified and pkt is going |
| 89 | * to Linux's tunnel decap-handler. Setting ip_summed field to CHECKSUM_NONE, |
| 90 | * to ensure that later packet's inner header checksum is validated correctly. |
| 91 | * TODO: Find the fix to set skb->ip_summed = CHECKSUM_NONE; |
| 92 | */ |
| 93 | |
| 94 | /* |
| 95 | * encap_rcv() returns the following value: |
| 96 | * =0 if skb was successfully passed to the encap |
| 97 | * handler or was discarded by it. |
| 98 | * >0 if skb should be passed on to UDP. |
| 99 | * <0 if skb should be resubmitted as proto -N |
| 100 | */ |
| 101 | ret = encap_rcv(sk, skb); |
| 102 | if (unlikely(ret)) { |
| 103 | /* |
| 104 | * If encap_rcv fails, vxlan driver drops the packet. |
| 105 | * No need to free the skb here. |
| 106 | */ |
| 107 | |
| 108 | DEBUG_ERROR("%px: sfe: udp-decap API return error: %d\n", skb, ret); |
| 109 | return -1; |
| 110 | } |
| 111 | |
| 112 | return 0; |
| 113 | } |
| 114 | |
| 115 | /* |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 116 | * sfe_ipv4_recv_udp() |
| 117 | * Handle UDP packet receives and forwarding. |
| 118 | */ |
| 119 | int sfe_ipv4_recv_udp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev, |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 120 | unsigned int len, struct iphdr *iph, unsigned int ihl, |
| 121 | bool sync_on_find, struct sfe_l2_info *l2_info, bool tun_outer) |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 122 | { |
| 123 | struct udphdr *udph; |
| 124 | __be32 src_ip; |
| 125 | __be32 dest_ip; |
| 126 | __be16 src_port; |
| 127 | __be16 dest_port; |
| 128 | struct sfe_ipv4_connection_match *cm; |
| 129 | u8 ttl; |
| 130 | struct net_device *xmit_dev; |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 131 | bool hw_csum; |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 132 | int err; |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 133 | bool bridge_flow; |
Ratheesh Kannoth | 5dee377 | 2022-01-18 11:27:14 +0530 | [diff] [blame] | 134 | int ret; |
Ken Zhu | 7e38d1a | 2021-11-30 17:31:46 -0800 | [diff] [blame^] | 135 | bool fast_xmit; |
| 136 | netdev_features_t features; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 137 | |
| 138 | /* |
| 139 | * Is our packet too short to contain a valid UDP header? |
| 140 | */ |
| 141 | if (unlikely(!pskb_may_pull(skb, (sizeof(struct udphdr) + ihl)))) { |
| 142 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE); |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 143 | DEBUG_TRACE("%px: packet too short for UDP header\n", skb); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Read the IP address and port information. Read the IP header data first |
| 149 | * because we've almost certainly got that in the cache. We may not yet have |
| 150 | * the UDP header cached though so allow more time for any prefetching. |
| 151 | */ |
| 152 | src_ip = iph->saddr; |
| 153 | dest_ip = iph->daddr; |
| 154 | |
| 155 | udph = (struct udphdr *)(skb->data + ihl); |
| 156 | src_port = udph->source; |
| 157 | dest_port = udph->dest; |
| 158 | |
| 159 | rcu_read_lock(); |
| 160 | |
| 161 | /* |
| 162 | * Look for a connection match. |
| 163 | */ |
| 164 | #ifdef CONFIG_NF_FLOW_COOKIE |
| 165 | cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match; |
| 166 | if (unlikely(!cm)) { |
| 167 | cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port); |
| 168 | } |
| 169 | #else |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 170 | /* |
| 171 | * 5-tuple lookup for UDP flow. |
| 172 | */ |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 173 | cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port); |
| 174 | #endif |
| 175 | if (unlikely(!cm)) { |
| 176 | |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 177 | /* |
| 178 | * try a 4-tuple lookup; required for tunnels like vxlan. |
| 179 | */ |
| 180 | cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, 0, dest_ip, dest_port); |
| 181 | if (unlikely(!cm)) { |
| 182 | rcu_read_unlock(); |
| 183 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NO_CONNECTION); |
| 184 | DEBUG_TRACE("%px: sfe: no connection found in 4-tuple lookup.\n", skb); |
| 185 | return 0; |
| 186 | } |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | /* |
Ratheesh Kannoth | 5dee377 | 2022-01-18 11:27:14 +0530 | [diff] [blame] | 190 | * Source interface validate. |
| 191 | */ |
| 192 | if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) { |
| 193 | struct sfe_ipv4_connection *c = cm->connection; |
| 194 | spin_lock_bh(&si->lock); |
| 195 | ret = sfe_ipv4_remove_connection(si, c); |
| 196 | spin_unlock_bh(&si->lock); |
| 197 | |
| 198 | if (ret) { |
| 199 | sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH); |
| 200 | } |
| 201 | rcu_read_unlock(); |
| 202 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE); |
| 203 | DEBUG_TRACE("flush on wrong source interface check failure\n"); |
| 204 | return 0; |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * If our packet has beern marked as "flush on find" we can't actually |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 209 | * forward it in the fast path, but now that we've found an associated |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 210 | * connection we need sync its status before exception it to slow path. |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 211 | */ |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 212 | if (unlikely(sync_on_find)) { |
| 213 | sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 214 | rcu_read_unlock(); |
| 215 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT); |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 216 | DEBUG_TRACE("%px: sfe: sync on find\n", cm); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 217 | return 0; |
| 218 | } |
| 219 | |
| 220 | #ifdef CONFIG_XFRM |
| 221 | /* |
| 222 | * We can't accelerate the flow on this direction, just let it go |
| 223 | * through the slow path. |
| 224 | */ |
| 225 | if (unlikely(!cm->flow_accel)) { |
| 226 | rcu_read_unlock(); |
| 227 | this_cpu_inc(si->stats_pcpu->packets_not_forwarded64); |
| 228 | return 0; |
| 229 | } |
| 230 | #endif |
| 231 | |
Wayne Tan | bb7f178 | 2021-12-13 11:16:04 -0800 | [diff] [blame] | 232 | /* |
| 233 | * Do we expect an ingress VLAN tag for this flow? |
| 234 | */ |
| 235 | if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) { |
| 236 | rcu_read_unlock(); |
| 237 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH); |
| 238 | DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb); |
| 239 | return 0; |
| 240 | } |
| 241 | |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 242 | bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW); |
| 243 | |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 244 | /* |
| 245 | * Does our TTL allow forwarding? |
| 246 | */ |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 247 | if (likely(!bridge_flow)) { |
| 248 | ttl = iph->ttl; |
| 249 | if (unlikely(ttl < 2)) { |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 250 | sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS); |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 251 | rcu_read_unlock(); |
| 252 | |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 253 | DEBUG_TRACE("%px: sfe: TTL too low\n", skb); |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 254 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_SMALL_TTL); |
| 255 | return 0; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 256 | } |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 257 | } |
| 258 | |
| 259 | /* |
| 260 | * If our packet is larger than the MTU of the transmit interface then |
| 261 | * we can't forward it easily. |
| 262 | */ |
| 263 | if (unlikely(len > cm->xmit_dev_mtu)) { |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 264 | sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 265 | rcu_read_unlock(); |
| 266 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION); |
Ken Zhu | 88c5815 | 2021-12-09 15:12:06 -0800 | [diff] [blame] | 267 | DEBUG_TRACE("%px: sfe: larger than MTU\n", cm); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 268 | return 0; |
| 269 | } |
| 270 | |
| 271 | /* |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 272 | * Check if skb was cloned. If it was, unshare it. Because |
| 273 | * the data area is going to be written in this path and we don't want to |
| 274 | * change the cloned skb's data section. |
| 275 | */ |
| 276 | if (unlikely(skb_cloned(skb))) { |
| 277 | DEBUG_TRACE("%px: skb is a cloned skb\n", skb); |
| 278 | skb = skb_unshare(skb, GFP_ATOMIC); |
| 279 | if (!skb) { |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 280 | DEBUG_WARN("%px: Failed to unshare the cloned skb\n", skb); |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 281 | rcu_read_unlock(); |
| 282 | return 0; |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * Update the iph and udph pointers with the unshared skb's data area. |
| 287 | */ |
| 288 | iph = (struct iphdr *)skb->data; |
| 289 | udph = (struct udphdr *)(skb->data + ihl); |
| 290 | } |
| 291 | |
| 292 | /* |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 293 | * For PPPoE packets, match server MAC and session id |
| 294 | */ |
| 295 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) { |
| 296 | struct pppoe_hdr *ph; |
| 297 | struct ethhdr *eth; |
| 298 | |
| 299 | if (unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) { |
| 300 | rcu_read_unlock(); |
| 301 | DEBUG_TRACE("%px: PPPoE header not present in packet for PPPoE rule\n", skb); |
| 302 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING); |
| 303 | return 0; |
| 304 | } |
| 305 | |
| 306 | ph = (struct pppoe_hdr *)(skb->head + sfe_l2_pppoe_hdr_offset_get(l2_info)); |
| 307 | eth = (struct ethhdr *)(skb->head + sfe_l2_hdr_offset_get(l2_info)); |
| 308 | if (unlikely(cm->pppoe_session_id != ntohs(ph->sid)) || unlikely(!(ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source)))) { |
| 309 | DEBUG_TRACE("%px: PPPoE sessions with session IDs %d and %d or server MACs %pM and %pM did not match\n", |
| 310 | skb, cm->pppoe_session_id, htons(ph->sid), cm->pppoe_remote_mac, eth->h_source); |
| 311 | rcu_read_unlock(); |
| 312 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_PPPOE_SESSION); |
| 313 | return 0; |
| 314 | } |
| 315 | skb->protocol = htons(l2_info->protocol); |
| 316 | this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64); |
| 317 | |
| 318 | } else if (unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) { |
| 319 | |
| 320 | /* |
Guduri Prathyusha | 034d635 | 2022-01-12 16:49:04 +0530 | [diff] [blame] | 321 | * If packet contains PPPoE header but CME doesn't contain PPPoE flag yet we are exceptioning the packet to linux |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 322 | */ |
Guduri Prathyusha | 034d635 | 2022-01-12 16:49:04 +0530 | [diff] [blame] | 323 | if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW))) { |
| 324 | rcu_read_unlock(); |
| 325 | DEBUG_TRACE("%px: CME doesn't contain PPPoE flag but packet has PPPoE header\n", skb); |
| 326 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_NOT_SET_IN_CME); |
| 327 | return 0; |
| 328 | |
| 329 | } |
| 330 | |
| 331 | /* |
| 332 | * For bridged flows when packet contains PPPoE header, restore the header back and forward to xmit interface |
| 333 | */ |
| 334 | __skb_push(skb, (sizeof(struct pppoe_hdr) + sizeof(struct sfe_ppp_hdr))); |
| 335 | l2_info->l2_hdr_size -= (sizeof(struct pppoe_hdr) + sizeof(struct sfe_ppp_hdr)); |
| 336 | this_cpu_inc(si->stats_pcpu->pppoe_bridge_packets_forwarded64); |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 337 | } |
| 338 | |
| 339 | /* |
Wayne Tan | bb7f178 | 2021-12-13 11:16:04 -0800 | [diff] [blame] | 340 | * Check if skb has enough headroom to write L2 headers |
| 341 | */ |
| 342 | if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) { |
| 343 | rcu_read_unlock(); |
| 344 | DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb)); |
| 345 | sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NO_HEADROOM); |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | /* |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 350 | * From this point on we're good to modify the packet. |
| 351 | */ |
| 352 | |
| 353 | /* |
Guduri Prathyusha | 79a5fee | 2021-11-11 17:59:10 +0530 | [diff] [blame] | 354 | * For PPPoE flows, add PPPoE header before L2 header is added. |
| 355 | */ |
Guduri Prathyusha | 034d635 | 2022-01-12 16:49:04 +0530 | [diff] [blame] | 356 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PPPOE_ENCAP)) { |
Wayne Tan | bb7f178 | 2021-12-13 11:16:04 -0800 | [diff] [blame] | 357 | sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IP); |
Guduri Prathyusha | 79a5fee | 2021-11-11 17:59:10 +0530 | [diff] [blame] | 358 | this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64); |
| 359 | } |
| 360 | |
| 361 | /* |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 362 | * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable. |
| 363 | * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here |
| 364 | * so that HW does not re-calculate/replace the L4 csum |
| 365 | */ |
| 366 | hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY); |
| 367 | |
| 368 | /* |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 369 | * Do we have to perform translations of the source address/port? |
| 370 | */ |
| 371 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC)) { |
| 372 | u16 udp_csum; |
| 373 | |
| 374 | iph->saddr = cm->xlate_src_ip; |
| 375 | udph->source = cm->xlate_src_port; |
| 376 | |
| 377 | /* |
| 378 | * Do we have a non-zero UDP checksum? If we do then we need |
| 379 | * to update it. |
| 380 | */ |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 381 | if (unlikely(!hw_csum)) { |
| 382 | udp_csum = udph->check; |
| 383 | if (likely(udp_csum)) { |
| 384 | u32 sum; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 385 | |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 386 | if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
| 387 | sum = udp_csum + cm->xlate_src_partial_csum_adjustment; |
| 388 | } else { |
| 389 | sum = udp_csum + cm->xlate_src_csum_adjustment; |
| 390 | } |
| 391 | |
| 392 | sum = (sum & 0xffff) + (sum >> 16); |
| 393 | udph->check = (u16)sum; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 394 | } |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 395 | } |
| 396 | } |
| 397 | |
| 398 | /* |
| 399 | * Do we have to perform translations of the destination address/port? |
| 400 | */ |
| 401 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST)) { |
| 402 | u16 udp_csum; |
| 403 | |
| 404 | iph->daddr = cm->xlate_dest_ip; |
| 405 | udph->dest = cm->xlate_dest_port; |
| 406 | |
| 407 | /* |
| 408 | * Do we have a non-zero UDP checksum? If we do then we need |
| 409 | * to update it. |
| 410 | */ |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 411 | if (unlikely(!hw_csum)) { |
| 412 | udp_csum = udph->check; |
| 413 | if (likely(udp_csum)) { |
| 414 | u32 sum; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 415 | |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 416 | /* |
| 417 | * TODO: Use a common API for below incremental checksum calculation |
| 418 | * for IPv4/IPv6 UDP/TCP |
| 419 | */ |
| 420 | if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
| 421 | sum = udp_csum + cm->xlate_dest_partial_csum_adjustment; |
| 422 | } else { |
| 423 | sum = udp_csum + cm->xlate_dest_csum_adjustment; |
| 424 | } |
| 425 | |
| 426 | sum = (sum & 0xffff) + (sum >> 16); |
| 427 | udph->check = (u16)sum; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 428 | } |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 429 | } |
| 430 | } |
| 431 | |
| 432 | /* |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 433 | * UDP sock will be valid only in decap-path. |
| 434 | * Call encap_rcv function associated with udp_sock in cm. |
| 435 | */ |
| 436 | if (unlikely(cm->up)) { |
| 437 | /* |
| 438 | * Call decap handler associated with sock. |
| 439 | * Also validates UDP checksum before calling decap handler. |
| 440 | */ |
| 441 | err = sfe_ipv4_udp_sk_deliver(skb, cm, ihl); |
| 442 | if (unlikely(err == -1)) { |
| 443 | rcu_read_unlock(); |
| 444 | this_cpu_inc(si->stats_pcpu->packets_dropped64); |
| 445 | return 1; |
| 446 | } else if (unlikely(err == 1)) { |
| 447 | rcu_read_unlock(); |
| 448 | this_cpu_inc(si->stats_pcpu->packets_not_forwarded64); |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | /* |
| 453 | * Update traffic stats. |
| 454 | */ |
| 455 | atomic_inc(&cm->rx_packet_count); |
| 456 | atomic_add(len, &cm->rx_byte_count); |
| 457 | |
| 458 | rcu_read_unlock(); |
| 459 | this_cpu_inc(si->stats_pcpu->packets_forwarded64); |
| 460 | DEBUG_TRACE("%px: sfe: sfe_ipv4_recv_udp -> encap_rcv done.\n", skb); |
| 461 | return 1; |
| 462 | } |
| 463 | |
| 464 | /* |
| 465 | * Decrement our TTL |
| 466 | * Except when called from hook function in post-decap. |
| 467 | */ |
Ratheesh Kannoth | 71fc51e | 2022-01-05 10:02:47 +0530 | [diff] [blame] | 468 | if (likely(!bridge_flow)) { |
| 469 | iph->ttl -= (u8)(!tun_outer); |
| 470 | } |
Amitesh Anand | 63be37d | 2021-12-24 20:51:48 +0530 | [diff] [blame] | 471 | |
| 472 | /* |
| 473 | * Update DSCP |
| 474 | */ |
| 475 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) { |
| 476 | iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp; |
| 477 | } |
| 478 | |
| 479 | /* |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 480 | * If HW checksum offload is not possible, full L3 checksum and incremental L4 checksum |
| 481 | * are used to update the packet. Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is |
| 482 | * not recalculated further in packet path. |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 483 | */ |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 484 | if (likely(hw_csum)) { |
| 485 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 486 | } else { |
| 487 | iph->check = sfe_ipv4_gen_ip_csum(iph); |
Ratheesh Kannoth | a3cf0e0 | 2021-12-09 09:44:10 +0530 | [diff] [blame] | 488 | } |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 489 | |
| 490 | /* |
| 491 | * Update traffic stats. |
| 492 | */ |
| 493 | atomic_inc(&cm->rx_packet_count); |
| 494 | atomic_add(len, &cm->rx_byte_count); |
| 495 | |
| 496 | xmit_dev = cm->xmit_dev; |
| 497 | skb->dev = xmit_dev; |
| 498 | |
| 499 | /* |
Wayne Tan | bb7f178 | 2021-12-13 11:16:04 -0800 | [diff] [blame] | 500 | * Check to see if we need to add VLAN tags |
| 501 | */ |
| 502 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) { |
| 503 | sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr); |
| 504 | } |
| 505 | |
| 506 | /* |
| 507 | * Check to see if we need to write an Ethernet header. |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 508 | */ |
| 509 | if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) { |
| 510 | if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) { |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 511 | dev_hard_header(skb, xmit_dev, ntohs(skb->protocol), |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 512 | cm->xmit_dest_mac, cm->xmit_src_mac, len); |
| 513 | } else { |
| 514 | /* |
| 515 | * For the simple case we write this really fast. |
| 516 | */ |
| 517 | struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN); |
Guduri Prathyusha | 5f27e23 | 2022-01-06 14:39:04 +0530 | [diff] [blame] | 518 | eth->h_proto = skb->protocol; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 519 | ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac); |
| 520 | ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac); |
| 521 | } |
| 522 | } |
| 523 | |
| 524 | /* |
| 525 | * Update priority of skb. |
| 526 | */ |
| 527 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) { |
| 528 | skb->priority = cm->priority; |
| 529 | } |
| 530 | |
| 531 | /* |
| 532 | * Mark outgoing packet. |
| 533 | */ |
Ken Zhu | 37040ea | 2021-09-09 21:11:15 -0700 | [diff] [blame] | 534 | if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) { |
| 535 | skb->mark = cm->connection->mark; |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 536 | } |
| 537 | |
Ken Zhu | 7e38d1a | 2021-11-30 17:31:46 -0800 | [diff] [blame^] | 538 | /* |
| 539 | * For the first packets, check if it could got fast xmit. |
| 540 | */ |
| 541 | if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED) |
| 542 | && (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){ |
| 543 | cm->features = netif_skb_features(skb); |
| 544 | if (likely(sfe_fast_xmit_check(skb, cm->features))) { |
| 545 | cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT; |
| 546 | } |
| 547 | cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED; |
| 548 | } |
| 549 | features = cm->features; |
| 550 | |
| 551 | fast_xmit = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT); |
| 552 | |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 553 | rcu_read_unlock(); |
| 554 | |
| 555 | this_cpu_inc(si->stats_pcpu->packets_forwarded64); |
| 556 | |
| 557 | /* |
| 558 | * We're going to check for GSO flags when we transmit the packet so |
| 559 | * start fetching the necessary cache line now. |
| 560 | */ |
| 561 | prefetch(skb_shinfo(skb)); |
| 562 | |
| 563 | /* |
Ken Zhu | 7e38d1a | 2021-11-30 17:31:46 -0800 | [diff] [blame^] | 564 | * We do per packet condition check before we could fast xmit the |
| 565 | * packet. |
| 566 | */ |
| 567 | if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) { |
| 568 | this_cpu_inc(si->stats_pcpu->packets_fast_xmited64); |
| 569 | return 1; |
| 570 | } |
| 571 | |
| 572 | /* |
Ratheesh Kannoth | 6307bec | 2021-11-25 08:26:39 +0530 | [diff] [blame] | 573 | * Mark that this packet has been fast forwarded. |
| 574 | */ |
| 575 | skb->fast_forwarded = 1; |
| 576 | |
| 577 | /* |
| 578 | * Send the packet on its way. |
| 579 | */ |
| 580 | dev_queue_xmit(skb); |
| 581 | |
| 582 | return 1; |
| 583 | } |