blob: 5dba3a94c7da8c3396325a5e11758da9adf48f9f [file] [log] [blame]
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05301/*
2 * sfe_ipv6_udp.c
3 * Shortcut forwarding engine file for IPv6 UDP
4 *
5 * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +05306 * Copyright (c) 2021,2022 Qualcomm Innovation Center, Inc. All rights reserved.
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +05307 *
8 * Permission to use, copy, modify, and/or distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include <linux/skbuff.h>
22#include <net/udp.h>
23#include <linux/etherdevice.h>
24#include <linux/version.h>
25
26#include "sfe_debug.h"
27#include "sfe_api.h"
28#include "sfe.h"
29#include "sfe_flow_cookie.h"
30#include "sfe_ipv6.h"
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +053031#include "sfe_pppoe.h"
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053032
33/*
34 * sfe_ipv6_recv_udp()
35 * Handle UDP packet receives and forwarding.
36 */
37int sfe_ipv6_recv_udp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +053038 unsigned int len, struct ipv6hdr *iph, unsigned int ihl, bool flush_on_find, struct sfe_l2_info *l2_info)
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053039{
40 struct udphdr *udph;
41 struct sfe_ipv6_addr *src_ip;
42 struct sfe_ipv6_addr *dest_ip;
43 __be16 src_port;
44 __be16 dest_port;
45 struct sfe_ipv6_connection_match *cm;
46 struct net_device *xmit_dev;
47 bool ret;
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +053048 bool hw_csum;
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +053049
50 /*
51 * Is our packet too short to contain a valid UDP header?
52 */
53 if (!pskb_may_pull(skb, (sizeof(struct udphdr) + ihl))) {
54
55 sfe_ipv6_exception_stats_inc(si,SFE_IPV6_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE);
56 DEBUG_TRACE("packet too short for UDP header\n");
57 return 0;
58 }
59
60 /*
61 * Read the IP address and port information. Read the IP header data first
62 * because we've almost certainly got that in the cache. We may not yet have
63 * the UDP header cached though so allow more time for any prefetching.
64 */
65 src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
66 dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
67
68 udph = (struct udphdr *)(skb->data + ihl);
69 src_port = udph->source;
70 dest_port = udph->dest;
71
72 rcu_read_lock();
73
74 /*
75 * Look for a connection match.
76 */
77#ifdef CONFIG_NF_FLOW_COOKIE
78 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
79 if (unlikely(!cm)) {
80 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
81 }
82#else
83 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_UDP, src_ip, src_port, dest_ip, dest_port);
84#endif
85 if (unlikely(!cm)) {
86 rcu_read_unlock();
87 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UDP_NO_CONNECTION);
88
89 DEBUG_TRACE("no connection found\n");
90 return 0;
91 }
92
93 /*
94 * If our packet has beern marked as "flush on find" we can't actually
95 * forward it in the fast path, but now that we've found an associated
96 * connection we can flush that out before we process the packet.
97 */
98 if (unlikely(flush_on_find)) {
99 struct sfe_ipv6_connection *c = cm->connection;
100 spin_lock_bh(&si->lock);
101 ret = sfe_ipv6_remove_connection(si, c);
102 spin_unlock_bh(&si->lock);
103
104 DEBUG_TRACE("flush on find\n");
105 if (ret) {
106 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
107 }
108 rcu_read_unlock();
109
110 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UDP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
111 return 0;
112 }
113
114#ifdef CONFIG_XFRM
115 /*
116 * We can't accelerate the flow on this direction, just let it go
117 * through the slow path.
118 */
119 if (unlikely(!cm->flow_accel)) {
120 rcu_read_unlock();
121 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
122 return 0;
123 }
124#endif
125
126 /*
127 * Does our hop_limit allow forwarding?
128 */
129 if (unlikely(iph->hop_limit < 2)) {
130 struct sfe_ipv6_connection *c = cm->connection;
131 spin_lock_bh(&si->lock);
132 ret = sfe_ipv6_remove_connection(si, c);
133 spin_unlock_bh(&si->lock);
134
135 DEBUG_TRACE("hop_limit too low\n");
136 if (ret) {
137 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
138 }
139 rcu_read_unlock();
140
141 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UDP_SMALL_TTL);
142 return 0;
143 }
144
145 /*
146 * If our packet is larger than the MTU of the transmit interface then
147 * we can't forward it easily.
148 */
149 if (unlikely(len > cm->xmit_dev_mtu)) {
150 struct sfe_ipv6_connection *c = cm->connection;
151 spin_lock_bh(&si->lock);
152 ret = sfe_ipv6_remove_connection(si, c);
153 spin_unlock_bh(&si->lock);
154
155 DEBUG_TRACE("larger than mtu\n");
156 if (ret) {
157 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
158 }
159 rcu_read_unlock();
160
161 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION);
162 return 0;
163 }
164
165 /*
Guduri Prathyusha647fe3e2021-11-22 19:17:51 +0530166 * For PPPoE packets, match server MAC and session id
167 */
168 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_DECAP)) {
169 struct pppoe_hdr *ph;
170 struct ethhdr *eth;
171
172 if (unlikely(!l2_info) || unlikely(!sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
173 rcu_read_unlock();
174 DEBUG_TRACE("%px: PPPoE is not parsed\n", skb);
175 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING);
176 return 0;
177 }
178
179 ph = (struct pppoe_hdr *)(skb->head + sfe_l2_pppoe_hdr_offset_get(l2_info));
180 eth = (struct ethhdr *)(skb->head + sfe_l2_hdr_offset_get(l2_info));
181 if (unlikely(cm->pppoe_session_id != htons(ph->sid)) || unlikely(!(ether_addr_equal((u8*)cm->pppoe_remote_mac, (u8 *)eth->h_source)))) {
182 rcu_read_unlock();
183 DEBUG_TRACE("%px: PPPoE sessions did not match \n", skb);
184 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_PPPOE_SESSION);
185 return 0;
186 }
187 this_cpu_inc(si->stats_pcpu->pppoe_decap_packets_forwarded64);
188
189 } else if (unlikely(l2_info) && unlikely(sfe_l2_parse_flag_check(l2_info, SFE_L2_PARSE_FLAGS_PPPOE_INGRESS))) {
190
191 /*
192 * If packet contains PPPOE header but CME doesn't contain PPPoE flag yet we are exceptioning the packet to linux
193 */
194 rcu_read_unlock();
195 DEBUG_TRACE("%px: PPPoE is not parsed\n", skb);
196 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INCORRECT_PPPOE_PARSING);
197 return 0;
198 }
199
200 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530201 * From this point on we're good to modify the packet.
202 */
203
204 /*
205 * Check if skb was cloned. If it was, unshare it. Because
206 * the data area is going to be written in this path and we don't want to
207 * change the cloned skb's data section.
208 */
209 if (unlikely(skb_cloned(skb))) {
210 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
211 skb = skb_unshare(skb, GFP_ATOMIC);
212 if (!skb) {
213 DEBUG_WARN("Failed to unshare the cloned skb\n");
214 rcu_read_unlock();
215 return 0;
216 }
217
218 /*
219 * Update the iph and udph pointers with the unshared skb's data area.
220 */
221 iph = (struct ipv6hdr *)skb->data;
222 udph = (struct udphdr *)(skb->data + ihl);
223 }
224
225 /*
Guduri Prathyusha79a5fee2021-11-11 17:59:10 +0530226 * For PPPoE flows, add PPPoE header before L2 header is added.
227 */
228 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PPPOE_ENCAP) {
229 if (unlikely(!sfe_pppoe_add_header(skb, cm->pppoe_session_id, PPP_IPV6))) {
230 rcu_read_unlock();
231 DEBUG_WARN("%px: PPPoE header addition failed\n", skb);
232 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_PPPOE_HEADER_ENCAP_FAILED);
233 return 0;
234 }
235 this_cpu_inc(si->stats_pcpu->pppoe_encap_packets_forwarded64);
236 }
237
238 /*
239 * TODO: VLAN header should be added here when they are supported.
240 */
241
242 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530243 * Update DSCP
244 */
245 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
246 sfe_ipv6_change_dsfield(iph, cm->dscp);
247 }
248
249 /*
250 * Decrement our hop_limit.
251 */
252 iph->hop_limit -= 1;
253
254 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530255 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
256 * Note: If L4 csum at Rx was found to be incorrect, we (router) should use incremental L4 checksum here
257 * so that HW does not re-calculate/replace the L4 csum
258 */
259 hw_csum = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
260
261 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530262 * Do we have to perform translations of the source address/port?
263 */
264 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
265 u16 udp_csum;
266
267 iph->saddr.s6_addr32[0] = cm->xlate_src_ip[0].addr[0];
268 iph->saddr.s6_addr32[1] = cm->xlate_src_ip[0].addr[1];
269 iph->saddr.s6_addr32[2] = cm->xlate_src_ip[0].addr[2];
270 iph->saddr.s6_addr32[3] = cm->xlate_src_ip[0].addr[3];
271 udph->source = cm->xlate_src_port;
272
273 /*
274 * Do we have a non-zero UDP checksum? If we do then we need
275 * to update it.
276 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530277 if (unlikely(!hw_csum)) {
278 udp_csum = udph->check;
279 if (likely(udp_csum)) {
280 u32 sum = udp_csum + cm->xlate_src_csum_adjustment;
281 sum = (sum & 0xffff) + (sum >> 16);
282 udph->check = (u16)sum;
283 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530284 }
285 }
286
287 /*
288 * Do we have to perform translations of the destination address/port?
289 */
290 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
291 u16 udp_csum;
292
293 iph->daddr.s6_addr32[0] = cm->xlate_dest_ip[0].addr[0];
294 iph->daddr.s6_addr32[1] = cm->xlate_dest_ip[0].addr[1];
295 iph->daddr.s6_addr32[2] = cm->xlate_dest_ip[0].addr[2];
296 iph->daddr.s6_addr32[3] = cm->xlate_dest_ip[0].addr[3];
297 udph->dest = cm->xlate_dest_port;
298
299 /*
300 * Do we have a non-zero UDP checksum? If we do then we need
301 * to update it.
302 */
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530303 if (unlikely(!hw_csum)) {
304 udp_csum = udph->check;
305 if (likely(udp_csum)) {
306 u32 sum = udp_csum + cm->xlate_dest_csum_adjustment;
307 sum = (sum & 0xffff) + (sum >> 16);
308 udph->check = (u16)sum;
309 }
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530310 }
311 }
312
313 /*
Ratheesh Kannotha3cf0e02021-12-09 09:44:10 +0530314 * If HW checksum offload is not possible, incremental L4 checksum is used to update the packet.
315 * Setting ip_summed to CHECKSUM_UNNECESSARY ensures checksum is not recalculated further in packet
316 * path.
317 */
318 if (likely(hw_csum)) {
319 skb->ip_summed = CHECKSUM_PARTIAL;
320 } else {
321 skb->ip_summed = CHECKSUM_UNNECESSARY;
322 }
323
324 /*
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530325 * Update traffic stats.
326 */
327 atomic_inc(&cm->rx_packet_count);
328 atomic_add(len, &cm->rx_byte_count);
329
330 xmit_dev = cm->xmit_dev;
331 skb->dev = xmit_dev;
332
333 /*
334 * Check to see if we need to write a header.
335 */
336 if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
337 if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
338 dev_hard_header(skb, xmit_dev, ETH_P_IPV6,
339 cm->xmit_dest_mac, cm->xmit_src_mac, len);
340 } else {
341 /*
342 * For the simple case we write this really fast.
343 */
344 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
345 eth->h_proto = htons(ETH_P_IPV6);
346 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
347 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
Ratheesh Kannoth6307bec2021-11-25 08:26:39 +0530348 }
349 }
350
351 /*
352 * Update priority of skb.
353 */
354 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
355 skb->priority = cm->priority;
356 }
357
358 /*
359 * Mark outgoing packet.
360 */
361 skb->mark = cm->connection->mark;
362 if (skb->mark) {
363 DEBUG_TRACE("SKB MARK is NON ZERO %x\n", skb->mark);
364 }
365
366 rcu_read_unlock();
367
368 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
369
370 /*
371 * We're going to check for GSO flags when we transmit the packet so
372 * start fetching the necessary cache line now.
373 */
374 prefetch(skb_shinfo(skb));
375
376 /*
377 * Mark that this packet has been fast forwarded.
378 */
379 skb->fast_forwarded = 1;
380
381 /*
382 * Send the packet on its way.
383 */
384 dev_queue_xmit(skb);
385
386 return 1;
387}