[qca-nss-clients] Adding vxlanmgr to accelerate vxlan tunnels
Change-Id: I7e6552c898c77e896c7fc592efb7e2f11b367549
Signed-off-by: Apoorv Gupta <apoogupt@codeaurora.org>
diff --git a/Makefile b/Makefile
index 87d27f1..a7b50ec 100644
--- a/Makefile
+++ b/Makefile
@@ -27,6 +27,7 @@
obj-$(tun6rd)+= qca-nss-tun6rd.o
obj-$(qdisc)+= nss_qdisc/
obj-$(vlan-mgr)+= vlan/
+obj-$(vxlanmgr)+= vxlanmgr/
obj-$(pvxlanmgr)+= pvxlanmgr/
obj-$(pppoe)+= pppoe/
obj-$(ovpn-mgr)+= openvpn/
diff --git a/vxlanmgr/Makefile b/vxlanmgr/Makefile
new file mode 100644
index 0000000..7410642
--- /dev/null
+++ b/vxlanmgr/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -I$(obj)/../exports -I$(obj)/.. -I$(obj)/nss_hal/include
+ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)"
+ccflags-y += -DNSS_VXLANMGR_DEBUG_LEVEL=2
+ccflags-y += -Wall -Werror
+
+obj-m += qca-nss-vxlanmgr.o
+qca-nss-vxlanmgr-objs := nss_vxlanmgr.o
+qca-nss-vxlanmgr-objs += nss_vxlanmgr_tunnel.o
+qca-nss-vxlanmgr-objs += nss_vxlanmgr_tun_stats.o
diff --git a/vxlanmgr/nss_vxlanmgr.c b/vxlanmgr/nss_vxlanmgr.c
new file mode 100644
index 0000000..f554c1f
--- /dev/null
+++ b/vxlanmgr/nss_vxlanmgr.c
@@ -0,0 +1,146 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_vxlanmgr.c
+ * NSS to HLOS VxLAN manager
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/vxlan.h>
+#include <nss_api_if.h>
+#include "nss_vxlanmgr.h"
+#include "nss_vxlanmgr_tun_stats.h"
+
+/*
+ * VxLAN context
+ */
+struct nss_vxlanmgr_ctx vxlan_ctx;
+
+/*
+ * nss_vxlanmgr_netdev_event()
+ * Netdevice notifier for NSS VxLAN manager module
+ */
+static int nss_vxlanmgr_netdev_event(struct notifier_block *nb, unsigned long event, void *dev)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(dev);
+
+ if (!is_vxlan_dev(netdev)) {
+ /*
+ * Return if it's not a vxlan netdev
+ */
+ return NOTIFY_DONE;
+ }
+
+ switch (event) {
+ case NETDEV_DOWN:
+ nss_vxlanmgr_trace("%p: NETDEV_DOWN: event %lu name %s\n", netdev, event, netdev->name);
+ return nss_vxlanmgr_tunnel_deconfig(netdev);
+ case NETDEV_UP:
+ nss_vxlanmgr_trace("%p: NETDEV_UP: event %lu name %s\n", netdev, event, netdev->name);
+ return nss_vxlanmgr_tunnel_config(netdev);
+ case NETDEV_UNREGISTER:
+ nss_vxlanmgr_trace("%p: NETDEV_UNREGISTER: event %lu name %s\n", netdev, event, netdev->name);
+ return nss_vxlanmgr_tunnel_destroy(netdev);
+ case NETDEV_REGISTER:
+ nss_vxlanmgr_trace("%p: NETDEV_REGISTER: event %lu name %s\n", netdev, event, netdev->name);
+ return nss_vxlanmgr_tunnel_create(netdev);
+ default:
+ nss_vxlanmgr_trace("%p: Unhandled notifier event %lu name %s\n", netdev, event, netdev->name);
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * Linux Net device Notifier
+ */
+static struct notifier_block nss_vxlanmgr_netdev_notifier = {
+ .notifier_call = nss_vxlanmgr_netdev_event,
+};
+
+/*
+ * nss_vxlanmgr_exit_module()
+ * Tunnel vxlan module exit function
+ */
+void __exit nss_vxlanmgr_exit_module(void)
+{
+ int ret;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx, *temp;
+
+ /*
+ * Check if there are any tunnels.
+ * Delete all the tunnels from NSS FW and free.
+ */
+ list_for_each_entry_safe(tun_ctx, temp, &vxlan_ctx.list, head) {
+ /*
+ * Send deconfigure and destroy message to FW.
+ */
+ nss_vxlanmgr_trace("Removing tunnel %s\n", tun_ctx->dev->name);
+ nss_vxlanmgr_tunnel_deconfig(tun_ctx->dev);
+ nss_vxlanmgr_tunnel_destroy(tun_ctx->dev);
+ }
+
+ nss_vxlanmgr_tun_stats_dentry_deinit();
+ ret = unregister_netdevice_notifier(&nss_vxlanmgr_netdev_notifier);
+ if (ret) {
+ nss_vxlanmgr_warn("failed to unregister netdevice notifier: error %d\n", ret);
+ return;
+ }
+
+ nss_vxlanmgr_info("module unloaded\n");
+}
+
+/*
+ * nss_vxlanmgr_init_module()
+ * Tunnel vxlan module init function
+ */
+int __init nss_vxlanmgr_init_module(void)
+{
+ int ret;
+ /*
+ * If the node is not compatible, don't do anything.
+ */
+ if (!of_find_node_by_name(NULL, "nss-common")) {
+ nss_vxlanmgr_warn("nss-common not found.\n");
+ return -1;
+ }
+
+ INIT_LIST_HEAD(&vxlan_ctx.list);
+ vxlan_ctx.nss_ctx = nss_vxlan_get_ctx();
+ spin_lock_init(&vxlan_ctx.tun_lock);
+
+ if (!nss_vxlanmgr_tun_stats_dentry_init()) {
+ nss_vxlanmgr_warn("Failed to create debugfs entry\n");
+ return -1;
+ }
+
+ ret = register_netdevice_notifier(&nss_vxlanmgr_netdev_notifier);
+ if (ret) {
+ nss_vxlanmgr_tun_stats_dentry_deinit();
+ nss_vxlanmgr_warn("Failed to register netdevice notifier: error %d\n", ret);
+ return -1;
+ }
+
+ nss_vxlanmgr_info("Module %s loaded\n", NSS_CLIENT_BUILD_ID);
+ return 0;
+}
+
+module_init(nss_vxlanmgr_init_module);
+module_exit(nss_vxlanmgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS VxLAN manager");
diff --git a/vxlanmgr/nss_vxlanmgr.h b/vxlanmgr/nss_vxlanmgr.h
new file mode 100644
index 0000000..bb39119
--- /dev/null
+++ b/vxlanmgr/nss_vxlanmgr.h
@@ -0,0 +1,85 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_vxlanmgr.h
+ * VxLAN manager header
+ */
+#ifndef __NSS_VXLANMGR_H
+#define __NSS_VXLANMGR_H
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define nss_vxlanmgr_warn(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_vxlanmgr_info(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_vxlanmgr_trace(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#else /* CONFIG_DYNAMIC_DEBUG */
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_VXLANMGR_DEBUG_LEVEL < 2)
+#define nss_vxlanmgr_warn(s, ...)
+#else
+#define nss_vxlanmgr_warn(s, ...) pr_warn("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_VXLANMGR_DEBUG_LEVEL < 3)
+#define nss_vxlanmgr_info(s, ...)
+#else
+#define nss_vxlanmgr_info(s, ...) pr_notice("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_VXLANMGR_DEBUG_LEVEL < 4)
+#define nss_vxlanmgr_trace(s, ...)
+#else
+#define nss_vxlanmgr_trace(s, ...) pr_info("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+struct nss_vxlanmgr_ctx {
+ struct list_head list; /* vxlanmgr context list head */
+ struct dentry *dentry; /* debugfs entry for qca-nss-vxlanmgr */
+ struct nss_ctx_instance *nss_ctx; /* nss context for vxlan tunnel */
+ uint32_t tun_count; /* active vxlan tunnel count */
+ spinlock_t tun_lock; /* spinlock */
+};
+
+struct nss_vxlanmgr_tun_ctx {
+ struct list_head head; /* tunnel context list entry */
+ struct net_device *dev; /* tunnel netdevice pointer */
+ struct dentry *dentry; /* per tunnel debugfs entry */
+ struct nss_vxlanmgr_ctx *vxlan_ctx; /* pointer to vxlanmgr context */
+ struct nss_vxlanmgr_tun_stats *stats; /* tunnel statistics structure */
+ uint32_t inner_ifnum; /* inner node interface number */
+ uint32_t outer_ifnum; /* outer node interface number */
+ uint32_t vni; /* vnet identifier */
+ uint16_t tunnel_flags; /* vxlan tunnel flags */
+ uint16_t flow_label; /* flowlabel */
+ uint16_t src_port_min; /* minimum source port */
+ uint16_t src_port_max; /* maximum source port*/
+ uint16_t dest_port; /* destination port */
+ uint8_t tos; /* tos value */
+ uint8_t ttl; /* time to live */
+};
+
+extern int nss_vxlanmgr_tunnel_create(struct net_device *dev);
+extern int nss_vxlanmgr_tunnel_destroy(struct net_device *dev);
+extern int nss_vxlanmgr_tunnel_config(struct net_device *dev);
+extern int nss_vxlanmgr_tunnel_deconfig(struct net_device *dev);
+
+#endif /* __NSS_VXLANMGR_H */
diff --git a/vxlanmgr/nss_vxlanmgr_tun_stats.c b/vxlanmgr/nss_vxlanmgr_tun_stats.c
new file mode 100644
index 0000000..40e5372
--- /dev/null
+++ b/vxlanmgr/nss_vxlanmgr_tun_stats.c
@@ -0,0 +1,319 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <nss_api_if.h>
+#include "nss_vxlanmgr.h"
+#include "nss_vxlanmgr_tun_stats.h"
+
+/*
+ * VxLAN context
+ */
+extern struct nss_vxlanmgr_ctx vxlan_ctx;
+
+/*
+ * nss_vxlanmgr_tun_stats_str
+ * Vxlan statistics strings for nss tunnel stats
+ */
+static int8_t *nss_vxlanmgr_tun_stats_str[NSS_VXLANMGR_TUN_STATS_TYPE_MAX] = {
+ "rx_pkts",
+ "rx_bytes",
+ "tx_pkts",
+ "tx_bytes",
+ "rx_queue_0_dropped",
+ "rx_queue_1_dropped",
+ "rx_queue_2_dropped",
+ "rx_queue_3_dropped",
+ "Except MAC DB look up failed",
+ "Except Insufficient Headroom",
+ "Except MAC moved",
+ "Except No Policy ID",
+ "Except Extra flags",
+ "Except VNI Look-up failed",
+ "Dropped packet malformed",
+ "Dropped next node queue is full",
+};
+
+/*
+ * nss_vxlanmgr_tun_stats_show()
+ * Read Vxlan Tunnel statistics
+ */
+static int nss_vxlanmgr_tun_stats_show(struct seq_file *m, void __attribute__((unused))*p)
+{
+ int i;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlanmgr_tun_stats *vxlan_tunnel_stats;
+
+ tun_ctx = kzalloc(sizeof(struct nss_vxlanmgr_tun_ctx), GFP_KERNEL);
+ if (!tun_ctx) {
+ nss_vxlanmgr_warn("Failed to allocate memory for tun_ctx\n");
+ return -ENOMEM;
+ }
+
+ vxlan_tunnel_stats = kzalloc(sizeof(struct nss_vxlanmgr_tun_stats), GFP_KERNEL);
+ if (!vxlan_tunnel_stats) {
+ nss_vxlanmgr_warn("Failed to allocate memory for vxlan_tunnel_stats\n");
+ kfree(tun_ctx);
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ memcpy(tun_ctx, m->private, sizeof(struct nss_vxlanmgr_tun_ctx));
+ memcpy(vxlan_tunnel_stats, tun_ctx->stats, sizeof(struct nss_vxlanmgr_tun_stats));
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ /*
+ * Tunnel stats
+ */
+ seq_printf(m, "\n%s tunnel stats start:\n", tun_ctx->dev->name);
+
+ seq_printf(m, "\t%s configuration:\n", tun_ctx->dev->name);
+ seq_printf(m, "\t\tvni = %u\n", tun_ctx->vni);
+ seq_printf(m, "\t\ttunnel_flags = %x\n", tun_ctx->tunnel_flags);
+ seq_printf(m, "\t\tflow_label = %u\n", tun_ctx->flow_label);
+ seq_printf(m, "\t\tsrc_port_min = %u\n", tun_ctx->src_port_min);
+ seq_printf(m, "\t\tsrc_port_max = %u\n", tun_ctx->src_port_max);
+ seq_printf(m, "\t\tdest_port = %u\n", ntohs(tun_ctx->dest_port));
+ seq_printf(m, "\t\ttos = %u\n", tun_ctx->tos);
+ seq_printf(m, "\t\tttl = %u\n", tun_ctx->ttl);
+
+ seq_printf(m, "\n\tInner ifnum %u stats:\n", tun_ctx->inner_ifnum);
+ for (i = 0; i < NSS_VXLANMGR_TUN_STATS_TYPE_MAX; i++) {
+ seq_printf(m, "\t\t%s = %llu\n",
+ nss_vxlanmgr_tun_stats_str[i],
+ vxlan_tunnel_stats->inner_stats[i]);
+ }
+
+ seq_printf(m, "\n\tOuter ifnum %u stats:\n", tun_ctx->outer_ifnum);
+ for (i = 0; i < NSS_VXLANMGR_TUN_STATS_TYPE_MAX; i++) {
+ seq_printf(m, "\t\t%s = %llu\n",
+ nss_vxlanmgr_tun_stats_str[i],
+ vxlan_tunnel_stats->outer_stats[i]);
+ }
+
+ seq_printf(m, "\n\tMAC DB stats:\n");
+ for (i = 0; i < NSS_VXLAN_MACDB_ENTRIES_MAX; i++) {
+ if (!vxlan_tunnel_stats->mac_stats[i][0]) {
+ continue;
+ }
+ seq_printf(m, "\t\t%pM = %llu\n",
+ &vxlan_tunnel_stats->mac_stats[i][0],
+ vxlan_tunnel_stats->mac_stats[i][1]);
+ }
+ seq_printf(m, "\n\tPackets dropped at host: %llu\n",
+ vxlan_tunnel_stats->host_packet_drop);
+
+ seq_printf(m, "\n%s tunnel stats end\n\n", tun_ctx->dev->name);
+ kfree(tun_ctx);
+ kfree(vxlan_tunnel_stats);
+ return 0;
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_open()
+ */
+static int nss_vxlanmgr_tun_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, nss_vxlanmgr_tun_stats_show, inode->i_private);
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_update()
+ * Update inner and outer node statistics
+ */
+void nss_vxlanmgr_tun_stats_update(uint64_t *stats, struct nss_vxlan_stats_msg *stats_msg)
+{
+ uint32_t i;
+
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_RX_PKTS] += stats_msg->node_stats.rx_packets;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_RX_BYTES] += stats_msg->node_stats.rx_bytes;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_TX_PKTS] += stats_msg->node_stats.tx_packets;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_TX_BYTES] += stats_msg->node_stats.tx_bytes;
+
+ for (i = 0; i < NSS_MAX_NUM_PRI; i++) {
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_RX_QUEUE_0_DROPPED + i] += stats_msg->node_stats.rx_dropped[i];
+ }
+
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_MAC_DB_LOOKUP_FAILED] +=
+ stats_msg->except_mac_db_lookup_failed;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_HEADROOM_INSUFFICIENT] +=
+ stats_msg->except_low_hroom;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_MAC_MOVE] +=
+ stats_msg->except_mac_move;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_NO_POLICY_ID] +=
+ stats_msg->except_no_policy_id;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_EXTRA_FLAGS] +=
+ stats_msg->except_extra_vxlan_hdr_flags;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_VNI_LOOKUP_FAILED] +=
+ stats_msg->except_vni_lookup_failed;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_DROP_MALFORMED] +=
+ stats_msg->dropped_malformed;
+ stats[NSS_VXLANMGR_TUN_STATS_TYPE_DROP_NEXT_NODE_QUEUE_FULL] +=
+ stats_msg->dropped_next_node_queue_full;
+}
+
+/*
+ * nss_vxlanmgr_tun_macdb_stats_sync()
+ * Sync function for vxlan fdb entries
+ */
+void nss_vxlanmgr_tun_macdb_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+{
+ struct nss_vxlan_macdb_stats_msg *db_stats;
+ struct nss_vxlanmgr_tun_stats *s = tun_ctx->stats;
+ uint16_t i, j, nentries;
+
+ db_stats = &nvm->msg.db_stats;
+ nentries = db_stats->cnt;
+
+ dev_hold(tun_ctx->dev);
+
+ if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) {
+ nss_vxlanmgr_warn("%p: No more than 20 entries allowed per message.\n", tun_ctx->dev);
+ dev_put(tun_ctx->dev);
+ return;
+ }
+
+ for (j = 0; j < nentries; j++) {
+ if (!db_stats->entry[j].hits) {
+ continue;
+ }
+ for (i = 0; i < NSS_VXLAN_MACDB_ENTRIES_MAX; i++) {
+ if (ether_addr_equal((uint8_t *)&s->mac_stats[i][0],
+ (uint8_t *)db_stats->entry[j].mac)) {
+ s->mac_stats[i][1] += db_stats->entry[j].hits;
+ break;
+ }
+ }
+ }
+ dev_put(tun_ctx->dev);
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_sync()
+ * Sync function for vxlan statistics
+ */
+void nss_vxlanmgr_tun_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+{
+ uint32_t ifnum = nvm->cm.interface;
+ struct nss_vxlan_stats_msg *stats = &nvm->msg.stats;
+ struct nss_vxlanmgr_tun_stats *s = tun_ctx->stats;
+
+ if (tun_ctx->inner_ifnum == ifnum) {
+ nss_vxlanmgr_tun_stats_update(s->inner_stats, stats);
+ } else if (tun_ctx->outer_ifnum == ifnum) {
+ nss_vxlanmgr_tun_stats_update(s->outer_stats, stats);
+ } else {
+ nss_vxlanmgr_warn("Invalid interface number\n");
+ }
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_ops
+ * File operations for VxLAN tunnel stats
+ */
+static const struct file_operations nss_vxlanmgr_tun_stats_ops = { \
+ .open = nss_vxlanmgr_tun_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/*
+ * nss_vxlanmgr_tun_stats_deinit()
+ * Remove the stats entry for the given interface number.
+ */
+void nss_vxlanmgr_tun_stats_deinit(struct nss_vxlanmgr_tun_ctx *tun_ctx)
+{
+ struct nss_vxlanmgr_tun_stats *stats_stats = tun_ctx->stats;
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ kfree(stats_stats);
+ tun_ctx->stats = NULL;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_init()
+ * Alloc and initialize tunnel debug stats.
+ */
+bool nss_vxlanmgr_tun_stats_init(struct nss_vxlanmgr_tun_ctx *tun_ctx)
+{
+ struct nss_vxlanmgr_tun_stats *stats_stats;
+
+ stats_stats = kzalloc(sizeof(struct nss_vxlanmgr_tun_stats), GFP_ATOMIC);
+ if (!stats_stats) {
+ nss_vxlanmgr_warn("Failed to allocate memory for stats_stats\n");
+ return false;
+ }
+
+ tun_ctx->stats = stats_stats;
+ return true;
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_dentry_remove()
+ * Remove debufs file for given tunnel context.
+ */
+void nss_vxlanmgr_tun_stats_dentry_remove(struct nss_vxlanmgr_tun_ctx *tun_ctx)
+{
+ debugfs_remove(tun_ctx->dentry);
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_dentry_create()
+ * Create dentry for a given tunnel.
+ */
+bool nss_vxlanmgr_tun_stats_dentry_create(struct nss_vxlanmgr_tun_ctx *tun_ctx)
+{
+ char dentry_name[IFNAMSIZ];
+
+ scnprintf(dentry_name, sizeof(dentry_name), "%s", tun_ctx->dev->name);
+ tun_ctx->dentry = debugfs_create_file(dentry_name, S_IRUGO,
+ tun_ctx->vxlan_ctx->dentry, tun_ctx, &nss_vxlanmgr_tun_stats_ops);
+ if (!tun_ctx->dentry) {
+ nss_vxlanmgr_warn("Debugfs file creation failed for tun %s\n", tun_ctx->dev->name);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_dentry_deinit()
+ * Cleanup the debugfs tree.
+ */
+void nss_vxlanmgr_tun_stats_dentry_deinit()
+{
+ debugfs_remove_recursive(vxlan_ctx.dentry);
+}
+
+/*
+ * nss_vxlanmgr_tun_stats_dentry_init()
+ * Create VxLAN tunnel statistics debugfs entry.
+ */
+bool nss_vxlanmgr_tun_stats_dentry_init()
+{
+ /*
+ * initialize debugfs.
+ */
+ vxlan_ctx.dentry = debugfs_create_dir("qca-nss-vxlanmgr", NULL);
+ if (!vxlan_ctx.dentry) {
+ nss_vxlanmgr_warn("Creating debug directory failed\n");
+ return false;
+ }
+ return true;
+}
diff --git a/vxlanmgr/nss_vxlanmgr_tun_stats.h b/vxlanmgr/nss_vxlanmgr_tun_stats.h
new file mode 100644
index 0000000..677ffb7
--- /dev/null
+++ b/vxlanmgr/nss_vxlanmgr_tun_stats.h
@@ -0,0 +1,65 @@
+/*
+ ******************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ * ****************************************************************************
+ */
+
+#ifndef __NSS_VXLANMGR_TUN_STATS_H
+#define __NSS_VXLANMGR_TUN_STATS_H
+
+/*
+ * VxLAN statistic counters
+ */
+enum nss_vxlanmgr_tun_stats_type {
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_PKTS,
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_BYTES,
+ NSS_VXLANMGR_TUN_STATS_TYPE_TX_PKTS,
+ NSS_VXLANMGR_TUN_STATS_TYPE_TX_BYTES,
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_QUEUE_0_DROPPED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_QUEUE_1_DROPPED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_QUEUE_2_DROPPED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_RX_QUEUE_3_DROPPED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_MAC_DB_LOOKUP_FAILED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_HEADROOM_INSUFFICIENT,
+ NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_MAC_MOVE,
+ NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_NO_POLICY_ID,
+ NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_EXTRA_FLAGS,
+ NSS_VXLANMGR_TUN_STATS_TYPE_EXCEPT_VNI_LOOKUP_FAILED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_DROP_MALFORMED,
+ NSS_VXLANMGR_TUN_STATS_TYPE_DROP_NEXT_NODE_QUEUE_FULL,
+ NSS_VXLANMGR_TUN_STATS_TYPE_MAX,
+};
+
+/*
+ * VxLAN tunnel statistics
+ */
+struct nss_vxlanmgr_tun_stats {
+ uint64_t inner_stats[NSS_VXLANMGR_TUN_STATS_TYPE_MAX];
+ uint64_t outer_stats[NSS_VXLANMGR_TUN_STATS_TYPE_MAX];
+ uint64_t host_packet_drop;
+ uint64_t mac_stats[NSS_VXLAN_MACDB_ENTRIES_MAX][2];
+};
+
+/*
+ * VxLAN statistics APIs
+ */
+extern void nss_vxlanmgr_tun_macdb_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm);
+extern void nss_vxlanmgr_tun_stats_sync(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm);
+extern void nss_vxlanmgr_tun_stats_deinit(struct nss_vxlanmgr_tun_ctx *tun_ctx);
+extern bool nss_vxlanmgr_tun_stats_init(struct nss_vxlanmgr_tun_ctx *tun_ctx);
+extern void nss_vxlanmgr_tun_stats_dentry_deinit(void);
+extern bool nss_vxlanmgr_tun_stats_dentry_init(void);
+extern void nss_vxlanmgr_tun_stats_dentry_remove(struct nss_vxlanmgr_tun_ctx *tun_ctx);
+extern bool nss_vxlanmgr_tun_stats_dentry_create(struct nss_vxlanmgr_tun_ctx *tun_ctx);
+
+#endif /* __NSS_VXLANMGR_TUN_STATS_H */
diff --git a/vxlanmgr/nss_vxlanmgr_tunnel.c b/vxlanmgr/nss_vxlanmgr_tunnel.c
new file mode 100644
index 0000000..0de1a55
--- /dev/null
+++ b/vxlanmgr/nss_vxlanmgr_tunnel.c
@@ -0,0 +1,1042 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/addrconf.h>
+#include <net/dst.h>
+#include <net/flow.h>
+#include <net/ipv6.h>
+#include <net/route.h>
+#include <net/vxlan.h>
+#include <nss_api_if.h>
+#include "nss_vxlanmgr.h"
+#include "nss_vxlanmgr_tun_stats.h"
+
+/*
+ * VxLAN context
+ */
+extern struct nss_vxlanmgr_ctx vxlan_ctx;
+
+/*
+ * nss_vxlanmgr_tunnel_ctx_dev_get()
+ * Find VxLAN tunnel context using netdev.
+ * Context lock must be held before calling this API.
+ */
+struct nss_vxlanmgr_tun_ctx *nss_vxlanmgr_tunnel_ctx_dev_get(struct net_device *dev)
+{
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+
+ list_for_each_entry(tun_ctx, &vxlan_ctx.list, head) {
+ if (tun_ctx->dev == dev) {
+ return tun_ctx;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_tx_msg()
+ * Transmit VxLAN tunnel operation messages asynchronously.
+ */
+static nss_tx_status_t nss_vxlanmgr_tunnel_tx_msg(struct nss_ctx_instance *ctx,
+ struct nss_vxlan_msg *msg,
+ uint32_t if_num,
+ enum nss_vxlan_msg_type type,
+ uint32_t len)
+{
+ nss_vxlan_msg_init(msg, if_num, type, len, NULL, NULL);
+ return nss_vxlan_tx_msg(ctx, msg);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_tx_msg_sync()
+ * Transmit VxLAN tunnel operation messages.
+ */
+static nss_tx_status_t nss_vxlanmgr_tunnel_tx_msg_sync(struct nss_ctx_instance *ctx,
+ struct nss_vxlan_msg *msg,
+ uint32_t if_num,
+ enum nss_vxlan_msg_type type,
+ uint32_t len)
+{
+ nss_vxlan_msg_init(msg, if_num, type, len, NULL, NULL);
+ return nss_vxlan_tx_msg_sync(ctx, msg);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_flags_parse()
+ * Function to parse vxlan flags.
+ */
+static uint16_t nss_vxlanmgr_tunnel_flags_parse(struct vxlan_dev *priv)
+{
+ uint16_t flags = 0;
+ uint32_t priv_flags = priv->flags;
+
+ if (priv_flags & VXLAN_F_GBP)
+ flags |= NSS_VXLAN_RULE_FLAG_GBP_ENABLED;
+ if (priv_flags & VXLAN_F_IPV6)
+ flags |= NSS_VXLAN_RULE_FLAG_IPV6;
+ else if (!(priv_flags & VXLAN_F_IPV6))
+ flags |= NSS_VXLAN_RULE_FLAG_IPV4;
+ if (priv->cfg.tos == 1)
+ flags |= NSS_VXLAN_RULE_FLAG_INHERIT_TOS;
+ if (priv_flags & VXLAN_F_UDP_CSUM)
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+ else if (!(priv_flags & VXLAN_F_UDP_ZERO_CSUM6_TX))
+ flags |= NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED;
+
+ return (flags | NSS_VXLAN_RULE_FLAG_UDP);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_fill_src_ip()
+ * Return src_ip using route lookup.
+ */
+static bool nss_vxlanmgr_tunnel_fill_src_ip(struct vxlan_dev *vxlan,
+ union vxlan_addr *src_ip,
+ union vxlan_addr *rem_ip,
+ sa_family_t sa_family,
+ uint32_t *new_src_ip)
+{
+ struct flowi4 fl4;
+ struct flowi6 fl6;
+ struct rtable *rt = NULL;
+ struct dst_entry *dst = NULL;
+ int err;
+
+ /*
+ * IPv4
+ */
+ if (sa_family == AF_INET) {
+ if (src_ip->sin.sin_addr.s_addr == htonl(INADDR_ANY)) {
+ /*
+ * Lookup
+ */
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_proto = IPPROTO_UDP;
+ fl4.daddr = rem_ip->sin.sin_addr.s_addr;
+ fl4.saddr = src_ip->sin.sin_addr.s_addr;
+
+ rt = ip_route_output_key(vxlan->net, &fl4);
+ if (IS_ERR(rt)) {
+ nss_vxlanmgr_warn("No route available.\n");
+ return false;
+ }
+ new_src_ip[0] = fl4.saddr;
+ return true;
+ }
+ new_src_ip[0] = src_ip->sin.sin_addr.s_addr;
+ return true;
+ }
+
+ /*
+ * IPv6
+ */
+ if (ipv6_addr_any(&src_ip->sin6.sin6_addr)) {
+ /*
+ * Lookup
+ */
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.daddr = rem_ip->sin6.sin6_addr;
+ fl6.saddr = src_ip->sin6.sin6_addr;
+
+ err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+ vxlan->vn6_sock->sock->sk, &dst, &fl6);
+ if (err < 0) {
+ nss_vxlanmgr_warn("No route, drop packet.\n");
+ return false;
+ }
+ memcpy(new_src_ip, &fl6.saddr, sizeof(struct in6_addr));
+ return true;
+ }
+ memcpy(new_src_ip, &src_ip->sin6.sin6_addr, sizeof(struct in6_addr));
+ return true;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_mac_del()
+ * VxLAN tunnel mac delete messages.
+ */
+static nss_tx_status_t nss_vxlanmgr_tunnel_mac_del(struct nss_vxlanmgr_tun_ctx *tun_ctx,
+ struct vxlan_fdb_event *vfe)
+{
+ struct net_device *dev;
+ struct nss_vxlan_mac_msg *mac_del_msg;
+ struct nss_vxlan_msg vxlanmsg;
+ struct vxlan_config *cfg;
+ struct vxlan_dev *priv;
+ union vxlan_addr *remote_ip, *src_ip;
+ uint32_t i, inner_ifnum;
+ nss_tx_status_t status = NSS_TX_FAILURE;
+
+ dev = vfe->dev;
+ dev_hold(dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ inner_ifnum = tun_ctx->inner_ifnum;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ /*
+ * Only non-zero mac entries should be sent to NSS.
+ */
+ if (is_zero_ether_addr(vfe->eth_addr)) {
+ nss_vxlanmgr_trace("Only non-zero mac entries should be sent to NSS.\n");
+ goto done;
+ }
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+
+ /*
+ * Set MAC rule message
+ */
+ mac_del_msg = &vxlanmsg.msg.mac_del;
+ mac_del_msg->vni = vfe->rdst->remote_vni;
+ ether_addr_copy((uint8_t *)mac_del_msg->mac_addr, (uint8_t *)vfe->eth_addr);
+
+ priv = netdev_priv(dev);
+ cfg = &priv->cfg;
+ src_ip = &cfg->saddr;
+ remote_ip = &vfe->rdst->remote_ip;
+
+ if (remote_ip->sa.sa_family == AF_INET){
+ if (remote_ip->sin.sin_addr.s_addr == htonl(INADDR_ANY)) {
+ nss_vxlanmgr_warn("%p: MAC deletion failed for unknown remote\n", dev);
+ goto done;
+ }
+ memcpy(&mac_del_msg->encap.dest_ip, &remote_ip->sin.sin_addr, sizeof(struct in_addr));
+ memcpy(&mac_del_msg->encap.src_ip, &src_ip->sin.sin_addr, sizeof(struct in_addr));
+ } else {
+ if (ipv6_addr_any(&remote_ip->sin6.sin6_addr)) {
+ nss_vxlanmgr_warn("%p: MAC deletion failed for unknown remote\n", dev);
+ goto done;
+ }
+ memcpy(&mac_del_msg->encap.dest_ip, &remote_ip->sin6.sin6_addr, sizeof(struct in6_addr));
+ memcpy(&mac_del_msg->encap.src_ip, &src_ip->sin6.sin6_addr, sizeof(struct in6_addr));
+ }
+
+ /*
+ * Send MAC del message asynchronously as it is called by chain
+ * notifier in atomic context from the vxlan driver.
+ */
+ status = nss_vxlanmgr_tunnel_tx_msg(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_MAC_DEL,
+ sizeof(struct nss_vxlan_mac_msg));
+ if (status != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: MAC deletion failed %d\n", dev, status);
+ }
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ for (i = 0; i < NSS_VXLAN_MACDB_ENTRIES_MAX; i++) {
+ if (ether_addr_equal((uint8_t *)&tun_ctx->stats->mac_stats[i][0], (uint8_t *)vfe->eth_addr)) {
+ tun_ctx->stats->mac_stats[i][0] = 0;
+ tun_ctx->stats->mac_stats[i][1] = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+done:
+ dev_put(dev);
+ return status;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_mac_add()
+ * VxLAN tunnel mac add messages.
+ */
+static nss_tx_status_t nss_vxlanmgr_tunnel_mac_add(struct nss_vxlanmgr_tun_ctx *tun_ctx,
+ struct vxlan_fdb_event *vfe)
+{
+ struct net_device *dev;
+ struct nss_vxlan_mac_msg *mac_add_msg;
+ struct nss_vxlan_msg vxlanmsg;
+ struct vxlan_config *cfg;
+ struct vxlan_dev *priv;
+ union vxlan_addr *remote_ip, *src_ip;
+ uint32_t i, inner_ifnum;
+ uint32_t new_src_ip[4] = {0};
+ nss_tx_status_t status = NSS_TX_FAILURE;
+
+ dev = vfe->dev;
+ dev_hold(dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ inner_ifnum = tun_ctx->inner_ifnum;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ /*
+ * Only non-zero mac entries should be sent to NSS.
+ */
+ if (is_zero_ether_addr(vfe->eth_addr)) {
+ nss_vxlanmgr_trace("Only non-zero mac entries should be sent to NSS.\n");
+ goto done;
+ }
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+
+ /*
+ * Set MAC rule message
+ */
+ mac_add_msg = &vxlanmsg.msg.mac_add;
+ mac_add_msg->vni = vfe->rdst->remote_vni;
+ ether_addr_copy((uint8_t *)mac_add_msg->mac_addr, (uint8_t *)vfe->eth_addr);
+
+ priv = netdev_priv(dev);
+ cfg = &priv->cfg;
+ src_ip = &cfg->saddr;
+ remote_ip = &vfe->rdst->remote_ip;
+
+ if (remote_ip->sa.sa_family == AF_INET){
+ if (remote_ip->sin.sin_addr.s_addr == htonl(INADDR_ANY)) {
+ nss_vxlanmgr_warn("%p: MAC addition failed for unknown remote\n", dev);
+ goto done;
+ }
+ memcpy(&mac_add_msg->encap.dest_ip[0], &remote_ip->sin.sin_addr, sizeof(struct in_addr));
+ if (!nss_vxlanmgr_tunnel_fill_src_ip(priv, src_ip, remote_ip, AF_INET, new_src_ip)) {
+ nss_vxlanmgr_warn("%p: MAC addition failed for unknown source\n", dev);
+ goto done;
+ }
+ mac_add_msg->encap.src_ip[0] = new_src_ip[0];
+ } else {
+ if (ipv6_addr_any(&remote_ip->sin6.sin6_addr)) {
+ nss_vxlanmgr_warn("%p: MAC addition failed for unknown remote\n", dev);
+ goto done;
+ }
+ memcpy(mac_add_msg->encap.dest_ip, &remote_ip->sin6.sin6_addr, sizeof(struct in6_addr));
+ if (!nss_vxlanmgr_tunnel_fill_src_ip(priv, src_ip, remote_ip, AF_INET6, new_src_ip)) {
+ nss_vxlanmgr_warn("%p: MAC addition failed for unknown source\n", dev);
+ goto done;
+ }
+ memcpy(mac_add_msg->encap.src_ip, new_src_ip, sizeof(struct in6_addr));
+ }
+
+ /*
+ * Send MAC add message asynchronously as it is called by chain
+ * notifier in atomic context from the vxlan driver.
+ */
+ status = nss_vxlanmgr_tunnel_tx_msg(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_MAC_ADD,
+ sizeof(struct nss_vxlan_mac_msg));
+ if (status != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: MAC addition failed %d\n", dev, status);
+ goto done;
+ }
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ for (i = 0; i < NSS_VXLAN_MACDB_ENTRIES_MAX; i++) {
+ if (!tun_ctx->stats->mac_stats[i][0]) {
+ ether_addr_copy((uint8_t *)&tun_ctx->stats->mac_stats[i][0],
+ (uint8_t *)vfe->eth_addr);
+ break;
+ }
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+done:
+ dev_put(dev);
+ return status;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_fdb_event()
+ * Event handler for VxLAN fdb updates
+ */
+static int nss_vxlanmgr_tunnel_fdb_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct vxlan_fdb_event *vfe;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+
+ vfe = (struct vxlan_fdb_event *)data;
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(vfe->dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", vfe->dev);
+ return NOTIFY_DONE;
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ switch(event) {
+ case RTM_DELNEIGH:
+ nss_vxlanmgr_tunnel_mac_del(tun_ctx, vfe);
+ break;
+ case RTM_NEWNEIGH:
+ nss_vxlanmgr_tunnel_mac_add(tun_ctx, vfe);
+ break;
+ default:
+ nss_vxlanmgr_warn("%lu: Unknown FDB event received.\n", event);
+ }
+ return NOTIFY_DONE;
+}
+
+/*
+ * Notifier to receive fdb events from VxLAN
+ */
+static struct notifier_block nss_vxlanmgr_tunnel_fdb_notifier = {
+ .notifier_call = nss_vxlanmgr_tunnel_fdb_event,
+};
+
+/*
+ * nss_vxlanmgr_tunnel_inner_stats()
+ * Update vxlan netdev stats with inner node stats
+ */
+static void nss_vxlanmgr_tunnel_inner_stats(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+{
+ struct nss_vxlan_stats_msg *stats;
+ struct pcpu_sw_netstats *tstats;
+ struct net_device *dev;
+ struct net_device_stats *netdev_stats;
+ uint32_t i;
+ uint64_t dropped = 0;
+
+ stats = &nvm->msg.stats;
+ dev = tun_ctx->dev;
+
+ dev_hold(dev);
+ netdev_stats = (struct net_device_stats *)&dev->stats;
+
+ /*
+ * Only look at the tx_packets/tx_bytes for both host_inner/outer interfaces.
+ * rx_bytes/rx_packets are increased when the packet is received by the node.
+ * Therefore, it includes both transmitted/dropped packets. tx_bytes/tx_packets
+ * reflect successfully transmitted packets.
+ */
+ for (i = 0; i < NSS_MAX_NUM_PRI; i++) {
+ dropped += stats->node_stats.rx_dropped[i];
+ }
+
+ tstats = this_cpu_ptr(dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->tx_packets += stats->node_stats.tx_packets;
+ tstats->tx_bytes += stats->node_stats.tx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+ netdev_stats->tx_dropped += dropped;
+ dev_put(dev);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_outer_stats()
+ * Update vxlan netdev stats with outer node stats
+ */
+static void nss_vxlanmgr_tunnel_outer_stats(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+{
+ struct nss_vxlan_stats_msg *stats;
+ struct pcpu_sw_netstats *tstats;
+ struct net_device *dev;
+ struct net_device_stats *netdev_stats;
+ uint32_t i;
+ uint64_t dropped = 0;
+
+ stats = &nvm->msg.stats;
+ dev = tun_ctx->dev;
+
+ dev_hold(dev);
+ netdev_stats = (struct net_device_stats *)&dev->stats;
+
+ /*
+ * Only look at the tx_packets/tx_bytes for both host_inner/outer interfaces.
+ * rx_bytes/rx_packets are increased when the packet is received by the node.
+ * Therefore, it includes both transmitted/dropped packets. tx_bytes/tx_packets
+ * reflect successfully transmitted packets.
+ */
+ for (i = 0; i < NSS_MAX_NUM_PRI; i++) {
+ dropped += stats->node_stats.rx_dropped[i];
+ }
+
+ tstats = this_cpu_ptr(dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets += stats->node_stats.tx_packets;
+ tstats->rx_bytes += stats->node_stats.tx_bytes;
+ u64_stats_update_end(&tstats->syncp);
+ netdev_stats->rx_dropped += dropped;
+ dev_put(dev);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_fdb_update()
+ * Update vxlan fdb entries
+ */
+static void nss_vxlanmgr_tunnel_fdb_update(struct nss_vxlanmgr_tun_ctx *tun_ctx, struct nss_vxlan_msg *nvm)
+{
+ uint8_t *mac;
+ uint16_t i, nentries;
+ struct vxlan_dev *priv;
+ struct nss_vxlan_macdb_stats_msg *db_stats;
+
+ db_stats = &nvm->msg.db_stats;
+ nentries = db_stats->cnt;
+ priv = netdev_priv(tun_ctx->dev);
+
+ dev_hold(tun_ctx->dev);
+
+ if (nentries > NSS_VXLAN_MACDB_ENTRIES_PER_MSG) {
+ nss_vxlanmgr_warn("%p: No more than 20 entries allowed per message.\n", tun_ctx->dev);
+ dev_put(tun_ctx->dev);
+ return;
+ }
+
+ for (i = 0; i < nentries; i++) {
+ if (likely(db_stats->entry[i].hits)) {
+ mac = (uint8_t *)db_stats->entry[i].mac;
+ vxlan_fdb_update_mac(priv, mac);
+ }
+ }
+ dev_put(tun_ctx->dev);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_inner_notifier()
+ * Notifier for vxlan tunnel encap node
+ */
+static void nss_vxlanmgr_tunnel_inner_notifier(void *app_data, struct nss_cmn_msg *ncm)
+{
+ struct net_device *dev = (struct net_device *)app_data;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlan_msg *nvm;
+
+ if (!ncm) {
+ nss_vxlanmgr_info("%p: NULL msg received.\n", dev);
+ return;
+ }
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ return;
+ }
+
+ nvm = (struct nss_vxlan_msg *)ncm;
+ switch (nvm->cm.type) {
+ case NSS_VXLAN_MSG_TYPE_STATS_SYNC:
+ nss_vxlanmgr_tunnel_inner_stats(tun_ctx, nvm);
+ nss_vxlanmgr_tun_stats_sync(tun_ctx, nvm);
+ break;
+ case NSS_VXLAN_MSG_TYPE_MACDB_STATS:
+ nss_vxlanmgr_tunnel_fdb_update(tun_ctx, nvm);
+ nss_vxlanmgr_tun_macdb_stats_sync(tun_ctx, nvm);
+ break;
+ default:
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_info("%p: Unknown Event from NSS", dev);
+ return;
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_outer_notifier()
+ * Notifier for vxlan tunnel decap node
+ */
+static void nss_vxlanmgr_tunnel_outer_notifier(void *app_data, struct nss_cmn_msg *ncm)
+{
+ struct net_device *dev = (struct net_device *)app_data;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlan_msg *nvm;
+
+ if (!ncm) {
+ nss_vxlanmgr_info("%p: NULL msg received.\n", dev);
+ return;
+ }
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ return;
+ }
+
+ nvm = (struct nss_vxlan_msg *)ncm;
+ switch (nvm->cm.type) {
+ case NSS_VXLAN_MSG_TYPE_STATS_SYNC:
+ nss_vxlanmgr_tunnel_outer_stats(tun_ctx, nvm);
+ nss_vxlanmgr_tun_stats_sync(tun_ctx, nvm);
+ break;
+ default:
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_info("%p: Unknown Event from NSS", dev);
+ return;
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_inner_recv()
+ * Receives a pkt from NSS
+ */
+static void nss_vxlanmgr_tunnel_inner_recv(struct net_device *dev, struct sk_buff *skb,
+ __attribute__((unused)) struct napi_struct *napi)
+{
+ dev_hold(dev);
+ nss_vxlanmgr_info("%p: (vxlan packet) Exception packet received.\n", dev);
+
+ /*
+ * These are decapped and exceptioned packets.
+ */
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ dev_put(dev);
+ return;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_outer_recv()
+ * Receives a pkt from NSS
+ */
+static void nss_vxlanmgr_tunnel_outer_recv(struct net_device *dev, struct sk_buff *skb,
+ __attribute__((unused)) struct napi_struct *napi)
+{
+ struct iphdr *iph;
+ size_t l3_hdr_size;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+
+ nss_vxlanmgr_info("%p: (vxlan packet) Exception packet received.\n", dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ iph = (struct iphdr *)skb->data;
+ switch (iph->version) {
+ case 4:
+ l3_hdr_size = sizeof(struct iphdr);
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 6:
+ l3_hdr_size = sizeof(struct ipv6hdr);
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ tun_ctx->stats->host_packet_drop++;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_trace("%p: Skb received with unknown IP version: %d.\n", dev, iph->version);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /*
+ * VxLAN encapsulated packet exceptioned, remove the encapsulation
+ * and transmit on VxLAN interface.
+ */
+ if (unlikely(!pskb_may_pull(skb, (l3_hdr_size + sizeof(struct udphdr)
+ + sizeof(struct vxlanhdr))))) {
+ tun_ctx->stats->host_packet_drop++;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_trace("%p: pskb_may_pull failed for skb:%p\n", dev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ skb_pull(skb, (l3_hdr_size + sizeof(struct udphdr) + sizeof(struct vxlanhdr)));
+
+ /*
+ * Inner ethernet payload.
+ */
+ if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
+ tun_ctx->stats->host_packet_drop++;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_trace("%p: pskb_may_pull failed for skb:%p\n", dev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb_reset_mac_len(skb);
+ dev_queue_xmit(skb);
+}
+
+/*
+ * nss_vxlanmgr_tunnel_deconfig()
+ * Function to send dynamic interface disable message
+ */
+int nss_vxlanmgr_tunnel_deconfig(struct net_device *dev)
+{
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ uint32_t inner_ifnum, outer_ifnum;
+ struct nss_vxlan_msg vxlanmsg;
+ nss_tx_status_t ret;
+
+ dev_hold(dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ goto done;
+ }
+
+ inner_ifnum = tun_ctx->inner_ifnum;
+ outer_ifnum = tun_ctx->outer_ifnum;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_DISABLE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to inner interface failed: %d\n", dev, ret);
+ goto done;
+ }
+
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ outer_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_DISABLE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to outer interface failed: %d\n", dev, ret);
+ }
+
+done:
+ dev_put(dev);
+ return NOTIFY_DONE;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_config()
+ * Function to send dynamic interface enable message
+ */
+int nss_vxlanmgr_tunnel_config(struct net_device *dev)
+{
+ uint32_t inner_ifnum, outer_ifnum;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlan_msg vxlanmsg;
+ nss_tx_status_t ret;
+
+ dev_hold(dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ goto done;
+ }
+
+ inner_ifnum = tun_ctx->inner_ifnum;
+ outer_ifnum = tun_ctx->outer_ifnum;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_ENABLE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to inner interface failed: %d\n", dev, ret);
+ goto done;
+ }
+
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ outer_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_ENABLE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to outer interface failed: %d\n", dev, ret);
+ /*
+ * Disable inner node.
+ */
+ nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_DISABLE, 0);
+ }
+
+done:
+ dev_put(dev);
+ return NOTIFY_DONE;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_destroy()
+ * Function to unregister and destroy dynamic interfaces.
+ */
+int nss_vxlanmgr_tunnel_destroy(struct net_device *dev)
+{
+ uint32_t inner_ifnum, outer_ifnum;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlan_msg vxlanmsg;
+ nss_tx_status_t ret;
+
+ dev_hold(dev);
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ if (!vxlan_ctx.tun_count) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: No more tunnels to destroy.\n", dev);
+ goto done;
+ }
+
+ tun_ctx = nss_vxlanmgr_tunnel_ctx_dev_get(dev);
+ if (!tun_ctx) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Invalid tunnel context\n", dev);
+ goto done;
+ }
+
+ inner_ifnum = tun_ctx->inner_ifnum;
+ outer_ifnum = tun_ctx->outer_ifnum;
+
+ /*
+ * Remove tunnel from global list.
+ */
+ list_del(&tun_ctx->head);
+
+ /*
+ * Decrement interface count.
+ */
+ vxlan_ctx.tun_count--;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ nss_vxlanmgr_tun_stats_deinit(tun_ctx);
+ nss_vxlanmgr_tun_stats_dentry_remove(tun_ctx);
+ kfree(tun_ctx);
+
+ if (!vxlan_ctx.tun_count) {
+ /*
+ * Unregister fdb notifier chain if
+ * all vxlan tunnels are destroyed.
+ */
+ vxlan_fdb_unregister_notify(&nss_vxlanmgr_tunnel_fdb_notifier);
+ }
+ nss_vxlanmgr_info("%p: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count);
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_UNCONFIGURE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to inner interface failed: %d\n", dev, ret);
+ }
+
+ if (!nss_vxlan_unregister_if(inner_ifnum)) {
+ nss_vxlanmgr_warn("%p: Inner interface not found\n", dev);
+ }
+ ret = nss_dynamic_interface_dealloc_node(inner_ifnum,
+ NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Failed to dealloc inner: %d\n", dev, ret);
+ }
+
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ outer_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_UNCONFIGURE, 0);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to outer interface failed: %d\n", dev, ret);
+ }
+
+ if (!nss_vxlan_unregister_if(outer_ifnum)) {
+ nss_vxlanmgr_warn("%p: Outer interface not found\n", dev);
+ }
+ ret = nss_dynamic_interface_dealloc_node(outer_ifnum,
+ NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER);
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Failed to dealloc outer: %d\n", dev, ret);
+ }
+
+done:
+ dev_put(dev);
+ return NOTIFY_DONE;
+}
+
+/*
+ * nss_vxlanmgr_tunnel_create()
+ * Function to create and register dynamic interfaces.
+ */
+int nss_vxlanmgr_tunnel_create(struct net_device *dev)
+{
+ struct vxlan_dev *priv;
+ struct nss_vxlan_msg vxlanmsg;
+ struct nss_vxlanmgr_tun_ctx *tun_ctx;
+ struct nss_vxlan_rule_msg *vxlan_cfg;
+ struct nss_ctx_instance *nss_ctx;
+ uint32_t inner_ifnum, outer_ifnum;
+ nss_tx_status_t ret;
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ if (vxlan_ctx.tun_count == NSS_VXLAN_MAX_TUNNELS) {
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_warn("%p: Max number of vxlan interfaces supported is %d\n", dev, NSS_VXLAN_MAX_TUNNELS);
+ return NOTIFY_DONE;
+ }
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+
+ dev_hold(dev);
+
+ tun_ctx = kzalloc(sizeof(struct nss_vxlanmgr_tun_ctx), GFP_ATOMIC);
+ if (!tun_ctx) {
+ nss_vxlanmgr_warn("Failed to allocate memory for tun_ctx\n");
+ goto ctx_alloc_fail;
+ }
+ tun_ctx->dev = dev;
+ tun_ctx->vxlan_ctx = &vxlan_ctx;
+ INIT_LIST_HEAD(&tun_ctx->head);
+
+ inner_ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER);
+ if (inner_ifnum < 0) {
+ nss_vxlanmgr_warn("%p: Inner interface allocation failed.\n", dev);
+ goto inner_alloc_fail;
+ }
+ tun_ctx->inner_ifnum = inner_ifnum;
+
+ outer_ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER);
+ if (outer_ifnum < 0) {
+ nss_vxlanmgr_warn("%p: Outer interface allocation failed.\n", dev);
+ goto outer_alloc_fail;
+ }
+ tun_ctx->outer_ifnum = outer_ifnum;
+
+ /*
+ * Register vxlan tunnel with NSS
+ */
+ nss_ctx = nss_vxlan_register_if(inner_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER,
+ nss_vxlanmgr_tunnel_inner_recv,
+ nss_vxlanmgr_tunnel_inner_notifier, dev, 0);
+ if (!nss_ctx) {
+ nss_vxlanmgr_warn("%p: Failed to register inner iface\n", dev);
+ goto inner_reg_fail;
+ }
+
+ nss_ctx = nss_vxlan_register_if(outer_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER,
+ nss_vxlanmgr_tunnel_outer_recv,
+ nss_vxlanmgr_tunnel_outer_notifier, dev, 0);
+ if (!nss_ctx) {
+ nss_vxlanmgr_warn("%p: Failed to register outer iface\n", dev);
+ goto outer_reg_fail;
+ }
+
+ nss_vxlanmgr_trace("%p: Successfully registered inner and outer iface for VxLAN\n", dev);
+
+ memset(&vxlanmsg, 0, sizeof(struct nss_vxlan_msg));
+ vxlan_cfg = &vxlanmsg.msg.vxlan_create;
+
+ priv = netdev_priv(dev);
+ vxlan_cfg->vni = priv->cfg.vni;
+ vxlan_cfg->tunnel_flags = nss_vxlanmgr_tunnel_flags_parse(priv);
+ vxlan_cfg->src_port_min = priv->cfg.port_min;
+ vxlan_cfg->src_port_max = priv->cfg.port_max;
+ vxlan_cfg->dest_port = priv->cfg.dst_port;
+ vxlan_cfg->tos = priv->cfg.tos;
+ vxlan_cfg->ttl = (priv->cfg.ttl ? priv->cfg.ttl : IPDEFTTL);
+
+ vxlan_cfg->sibling_if_num = outer_ifnum;
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ inner_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_CONFIGURE,
+ sizeof(struct nss_vxlan_rule_msg));
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to inner interface failed: %d\n", dev, ret);
+ goto config_fail;
+ }
+
+ vxlan_cfg->sibling_if_num = inner_ifnum;
+ ret = nss_vxlanmgr_tunnel_tx_msg_sync(vxlan_ctx.nss_ctx,
+ &vxlanmsg,
+ outer_ifnum,
+ NSS_VXLAN_MSG_TYPE_TUN_CONFIGURE,
+ sizeof(struct nss_vxlan_rule_msg));
+ if (ret != NSS_TX_SUCCESS) {
+ nss_vxlanmgr_warn("%p: Sending configuration to outer interface failed: %d\n", dev, ret);
+ goto config_fail;
+ }
+
+ if (!nss_vxlanmgr_tun_stats_dentry_create(tun_ctx)) {
+ nss_vxlanmgr_warn("%p: Tun stats dentry init failed\n", vxlan_ctx.nss_ctx);
+ goto config_fail;
+ }
+
+ if (!nss_vxlanmgr_tun_stats_init(tun_ctx)) {
+ nss_vxlanmgr_warn("%p: Tun stats init failed\n", vxlan_ctx.nss_ctx);
+ goto config_fail;
+ }
+
+ tun_ctx->vni = vxlan_cfg->vni;
+ tun_ctx->tunnel_flags = vxlan_cfg->tunnel_flags;
+ tun_ctx->flow_label = vxlan_cfg->flow_label;
+ tun_ctx->src_port_min = vxlan_cfg->src_port_min;
+ tun_ctx->src_port_max = vxlan_cfg->src_port_max;
+ tun_ctx->dest_port = vxlan_cfg->dest_port;
+ tun_ctx->tos = vxlan_cfg->tos;
+ tun_ctx->ttl = vxlan_cfg->ttl;
+
+ spin_lock_bh(&vxlan_ctx.tun_lock);
+ /*
+ * Add tunnel to global list.
+ */
+ list_add(&tun_ctx->head, &vxlan_ctx.list);
+
+ if (!vxlan_ctx.tun_count) {
+ /*
+ * Register with fdb notifier chain
+ * when first tunnel is created.
+ */
+ vxlan_fdb_register_notify(&nss_vxlanmgr_tunnel_fdb_notifier);
+ }
+
+ /*
+ * Increment vxlan tunnel interface count
+ */
+ vxlan_ctx.tun_count++;
+ spin_unlock_bh(&vxlan_ctx.tun_lock);
+ nss_vxlanmgr_info("%p: VxLAN interface count is #%d\n", dev, vxlan_ctx.tun_count);
+
+ dev_put(dev);
+ return NOTIFY_DONE;
+
+config_fail:
+ nss_vxlan_unregister_if(outer_ifnum);
+outer_reg_fail:
+ nss_vxlan_unregister_if(inner_ifnum);
+inner_reg_fail:
+ ret = nss_dynamic_interface_dealloc_node(outer_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER);
+ if (ret != NSS_TX_SUCCESS)
+ nss_vxlanmgr_warn("%p: Outer interface dealloc failed: %d\n", dev, ret);
+outer_alloc_fail:
+ ret = nss_dynamic_interface_dealloc_node(inner_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER);
+ if (ret != NSS_TX_SUCCESS)
+ nss_vxlanmgr_warn("%p: Inner interface dealloc failed: %d\n", dev, ret);
+inner_alloc_fail:
+ kfree(tun_ctx);
+ctx_alloc_fail:
+ dev_put(dev);
+ return NOTIFY_DONE;
+}