blob: 361c23ae8ee338657de27852257f4bf4dfe5e35a [file] [log] [blame]
Nitin Shettye6ed5b52021-12-27 14:50:11 +05301/*
2 * sfe_ipv6_gre.c
3 * Shortcut forwarding engine file for IPv6 GRE
4 *
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <linux/skbuff.h>
21#include <net/gre.h>
22#include <net/protocol.h>
23#include <linux/etherdevice.h>
24#include <linux/version.h>
25#include <net/ip6_checksum.h>
26
27#include "sfe_debug.h"
28#include "sfe_api.h"
29#include "sfe.h"
30#include "sfe_flow_cookie.h"
31#include "sfe_ipv6.h"
Nitin Shetty2114a892022-01-28 20:03:56 +053032#include "sfe_vlan.h"
Nitin Shettye6ed5b52021-12-27 14:50:11 +053033
34/*
35 * sfe_ipv6_recv_gre()
36 * Handle GRE packet receives and forwarding.
37 */
38int sfe_ipv6_recv_gre(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
Nitin Shetty2114a892022-01-28 20:03:56 +053039 unsigned int len, struct ipv6hdr *iph, unsigned int ihl, bool sync_on_find,
40 struct sfe_l2_info *l2_info, bool tun_outer)
Nitin Shettye6ed5b52021-12-27 14:50:11 +053041{
42 struct sfe_ipv6_connection_match *cm;
43 struct sfe_ipv6_addr *dest_ip;
44 struct sfe_ipv6_addr *src_ip;
45 struct net_device *xmit_dev;
46 bool bridge_flow;
47 bool passthrough;
48 bool ret;
49
50 /*
51 * Is our packet too short to contain a valid UDP header?
52 */
53 if (!pskb_may_pull(skb, (sizeof(struct gre_base_hdr) + ihl))) {
54
55 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_HEADER_INCOMPLETE);
56 DEBUG_TRACE("packet too short for GRE header\n");
57 return 0;
58 }
59
60 /*
61 * Read the IP address and port information. Read the IP header data first
62 * because we've almost certainly got that in the cache. We may not yet have
63 * the UDP header cached though so allow more time for any prefetching.
64 */
65 src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
66 dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
67
68 rcu_read_lock();
69
70 /*
71 * Look for a connection match.
72 */
73#ifdef CONFIG_NF_FLOW_COOKIE
74 cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
75 if (unlikely(!cm)) {
76 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_GRE, src_ip, 0, dest_ip, 0);
77 }
78#else
79 cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_GRE, src_ip, 0, dest_ip, 0);
80#endif
81 if (unlikely(!cm)) {
82 rcu_read_unlock();
83 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_NO_CONNECTION);
84 DEBUG_TRACE("no connection match found dev %s src ip %pI6 dest ip %pI6\n", dev->name, src_ip, dest_ip);
85 return 0;
86 }
87
88 /*
Nitin Shetty2114a892022-01-28 20:03:56 +053089 * Do we expect an ingress VLAN tag for this flow?
90 */
91 if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
92 rcu_read_unlock();
93 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
94 DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb);
95 return 0;
96 }
97
98 /*
Nitin Shettye6ed5b52021-12-27 14:50:11 +053099 * Source interface validate.
100 */
101 if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
102 struct sfe_ipv6_connection *c = cm->connection;
103 int ret;
104 spin_lock_bh(&si->lock);
105 ret = sfe_ipv6_remove_connection(si, c);
106 spin_unlock_bh(&si->lock);
107
108 if (ret) {
109 sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
110 }
111 rcu_read_unlock();
112 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
113 DEBUG_TRACE("flush on wrong source interface check failure\n");
114 return 0;
115 }
116
117 passthrough = cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
118
119 /*
120 * If our packet has beern marked as "sync on find" we can't actually
121 * forward it in the fast path, but now that we've found an associated
122 * connection we need sync its status before exception it to slow path. unless
123 * it is passthrough packet.
124 * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
125 */
126 if (unlikely(sync_on_find) && !passthrough) {
127 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
128 rcu_read_unlock();
129
130 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT);
131 DEBUG_TRACE("Sync on find\n");
132 return 0;
133 }
134
135 bridge_flow = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
136
137 /*
138 * Does our hop_limit allow forwarding?
139 */
140 if (!bridge_flow && (iph->hop_limit < 2) && passthrough) {
141 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
142 rcu_read_unlock();
143
144 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_SMALL_TTL);
145 DEBUG_TRACE("hop_limit too low\n");
146 return 0;
147 }
148
149 /*
150 * From this point on we're good to modify the packet.
151 */
152
153 /*
154 * Check if skb was cloned. If it was, unshare it. Because
155 * the data area is going to be written in this path and we don't want to
156 * change the cloned skb's data section.
157 */
158 if (unlikely(skb_cloned(skb))) {
159 DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
160 skb = skb_unshare(skb, GFP_ATOMIC);
161 if (!skb) {
162 DEBUG_WARN("Failed to unshare the cloned skb\n");
163 rcu_read_unlock();
164 return 1;
165 }
166
167 /*
168 * Update the iph and udph pointers with the unshared skb's data area.
169 */
170 iph = (struct ipv6hdr *)skb->data;
171 }
172
173 /*
Nitin Shetty2114a892022-01-28 20:03:56 +0530174 * Check if skb has enough headroom to write L2 headers
175 */
176 if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
177 rcu_read_unlock();
178 DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
179 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_NO_HEADROOM);
180 return 0;
181 }
182
183 /*
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530184 * protocol handler will be valid only in decap-path.
185 */
186 if (cm->proto) {
187 struct inet6_protocol *ipprot = cm->proto;
Nitin Shetty2114a892022-01-28 20:03:56 +0530188 skb_reset_network_header(skb);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530189 skb_pull(skb, ihl);
190 skb_reset_transport_header(skb);
191 skb->fast_forwarded = 1;
192
193 ret = ipprot->handler(skb);
194 if (ret) {
195 this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
196 rcu_read_unlock();
197 return 1;
198 }
199
200 /*
201 * Update traffic stats.
202 */
203 atomic_inc(&cm->rx_packet_count);
204 atomic_add(len, &cm->rx_byte_count);
205
206 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
207 rcu_read_unlock();
208 DEBUG_TRACE("%p: %s decap done\n",skb, __func__);
209 return 1;
210 }
211
212 /*
213 * If our packet is larger than the MTU of the transmit interface then
214 * we can't forward it easily.
215 */
216 if (unlikely(len > cm->xmit_dev_mtu)) {
217 sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
218 rcu_read_unlock();
219
220 sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION);
221 DEBUG_TRACE("Larger than MTU\n");
222 return 0;
223 }
224
225 /*
226 * Update DSCP
227 */
228 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
229 sfe_ipv6_change_dsfield(iph, cm->dscp);
230 }
231
Nitin Shettybe7e0532022-02-16 00:13:48 +0530232 iph->hop_limit -= (u8)(!bridge_flow & !tun_outer);
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530233
234 /*
235 * Update traffic stats.
236 */
237 atomic_inc(&cm->rx_packet_count);
238 atomic_add(len, &cm->rx_byte_count);
239
240 xmit_dev = cm->xmit_dev;
241 skb->dev = xmit_dev;
242
Nitin Shetty2114a892022-01-28 20:03:56 +0530243 /*
244 * Check to see if we need to add VLAN tags
245 */
246 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
247 sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
248 }
249
Nitin Shettye6ed5b52021-12-27 14:50:11 +0530250 if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR) {
251 /*
252 * For the simple case we write this really fast.
253 */
254 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
255 eth->h_proto = htons(ETH_P_IPV6);
256 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
257 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
258 } else if (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR) {
259 dev_hard_header(skb, xmit_dev, ETH_P_IPV6,
260 cm->xmit_dest_mac, cm->xmit_src_mac, len);
261 }
262
263 /*
264 * Update priority of skb.
265 */
266 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
267 skb->priority = cm->priority;
268 }
269
270 /*
271 * Mark outgoing packet.
272 */
273 if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
274 skb->mark = cm->mark;
275 }
276
277 rcu_read_unlock();
278
279 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
280
281 /*
282 * We're going to check for GSO flags when we transmit the packet so
283 * start fetching the necessary cache line now.
284 */
285 prefetch(skb_shinfo(skb));
286
287 /*
288 * Mark that this packet has been fast forwarded.
289 */
290 skb->fast_forwarded = 1;
291
292 /*
293 * Send the packet on its way.
294 */
295 dev_queue_xmit(skb);
296
297 return 1;
298}