blob: 6d12853eebc8322c11dd10a3d3e7ac339777b04d [file] [log] [blame]
Wayne Tan1cabbf12022-05-01 13:01:45 -07001/*
2 * sfe_ipv4_pppoe_br.c
3 * Shortcut forwarding engine - IPv4 PPPoE bridge implementation
4 *
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <linux/skbuff.h>
21#include <net/udp.h>
22#include <linux/etherdevice.h>
23#include <linux/version.h>
24
25#include "sfe_debug.h"
26#include "sfe_api.h"
27#include "sfe.h"
28#include "sfe_ipv4.h"
29#include "sfe_pppoe.h"
30#include "sfe_vlan.h"
31
32/*
33 * sfe_ipv4_recv_pppoe_bridge()
34 * Process PPPoE bridge packets using 3-tuple acceleration
35 *
36 */
37int sfe_ipv4_recv_pppoe_bridge(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
38 unsigned int len, struct iphdr *iph, unsigned int ihl, struct sfe_l2_info *l2_info)
39{
40 struct sfe_ipv4_connection_match *cm;
41 u32 service_class_id;
42 struct net_device *xmit_dev;
43 int ret;
44 bool fast_xmit;
45 netdev_features_t features;
46
47 rcu_read_lock();
48
49 cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_RAW, iph->saddr, 0, iph->daddr, htons(sfe_l2_pppoe_session_id_get(l2_info)));
50 if (unlikely(!cm)) {
51 rcu_read_unlock();
52 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_PPPOE_BR_NOT_IN_CME);
53 DEBUG_TRACE("%px: no connection found in 3-tuple lookup for PPPoE bridge flow\n", skb);
54 return 0;
55 }
56
57 /*
58 * Source interface validate.
59 */
60 if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
61 if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
62 struct sfe_ipv4_connection *c = cm->connection;
63 DEBUG_TRACE("flush on source interface check failure\n");
64 spin_lock_bh(&si->lock);
65 ret = sfe_ipv4_remove_connection(si, c);
66 spin_unlock_bh(&si->lock);
67
68 if (ret) {
69 sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
70 }
71 }
72 rcu_read_unlock();
73 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
74 DEBUG_TRACE("exception the packet on source interface check failure\n");
75 return 0;
76 }
77
78 /*
79 * Do we expect an ingress VLAN tag for this flow?
80 */
81 if (unlikely(!sfe_vlan_validate_ingress_tag(skb, cm->ingress_vlan_hdr_cnt, cm->ingress_vlan_hdr, l2_info))) {
82 rcu_read_unlock();
83 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INGRESS_VLAN_TAG_MISMATCH);
84 DEBUG_TRACE("VLAN tag mismatch. skb=%px\n", skb);
85 return 0;
86 }
87
88 /*
89 * Check if skb has enough headroom to write L2 headers
90 */
91 if (unlikely(skb_headroom(skb) < cm->l2_hdr_size)) {
92 rcu_read_unlock();
93 DEBUG_WARN("%px: Not enough headroom: %u\n", skb, skb_headroom(skb));
94 sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_NO_HEADROOM);
95 return 0;
96 }
97
98 /*
99 * Restore PPPoE header back
100 */
101 __skb_push(skb, PPPOE_SES_HLEN);
102
103 /*
104 * Update traffic stats.
105 */
106 atomic_inc(&cm->rx_packet_count);
107 atomic_add(len, &cm->rx_byte_count);
108
109 xmit_dev = cm->xmit_dev;
110 skb->dev = xmit_dev;
111
112 /*
113 * Check to see if we need to add VLAN tags
114 */
115 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG)) {
116 sfe_vlan_add_tag(skb, cm->egress_vlan_hdr_cnt, cm->egress_vlan_hdr);
117 }
118
119 /*
120 * Check to see if we need to write an Ethernet header.
121 */
122 if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
123 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
124 dev_hard_header(skb, xmit_dev, ntohs(skb->protocol),
125 cm->xmit_dest_mac, cm->xmit_src_mac, len);
126 } else {
127 /*
128 * For the simple case we write this really fast.
129 */
130 struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
131 eth->h_proto = skb->protocol;
132 ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
133 ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
134 }
135 }
136
137 /*
138 * Update priority of skb.
139 */
140 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
141 skb->priority = cm->priority;
142 }
143
144 /*
145 * Mark outgoing packet.
146 */
147 if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
148 skb->mark = cm->mark;
149 /*
150 * Update service class stats if SAWF is valid.
151 */
152 if (likely(cm->sawf_valid)) {
153 service_class_id = SFE_GET_SAWF_SERVICE_CLASS(cm->mark);
154 sfe_ipv4_service_class_stats_inc(si, service_class_id, len);
155 }
156 }
157
158 /*
159 * For the first packets, check if it could got fast xmit.
160 */
161 if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
162 && (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
163 cm->features = netif_skb_features(skb);
164 if (likely(sfe_fast_xmit_check(skb, cm->features))) {
165 cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT;
166 }
167 cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
168 }
169 features = cm->features;
170
171 fast_xmit = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
172
173 rcu_read_unlock();
174
175 this_cpu_inc(si->stats_pcpu->pppoe_bridge_packets_3tuple_forwarded64);
176 this_cpu_inc(si->stats_pcpu->packets_forwarded64);
177
178 /*
179 * We're going to check for GSO flags when we transmit the packet so
180 * start fetching the necessary cache line now.
181 */
182 prefetch(skb_shinfo(skb));
183
184 /*
185 * We do per packet condition check before we could fast xmit the
186 * packet.
187 */
188 if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
189 this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
190 return 1;
191 }
192
193 /*
194 * Mark that this packet has been fast forwarded.
195 */
196 skb->fast_forwarded = 1;
197
198 /*
199 * Send the packet on its way.
200 */
201 dev_queue_xmit(skb);
202
203 return 1;
204}