Merge "[qca-nss-drv] Correct alignment in peer msg structure"
diff --git a/exports/arch/nss_ipq807x.h b/exports/arch/nss_ipq807x.h
index 05122bc..1f80f98 100644
--- a/exports/arch/nss_ipq807x.h
+++ b/exports/arch/nss_ipq807x.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -28,6 +28,7 @@
#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS for the IPQ807x chipsets. */
#define NSS_HOST_CORES 4 /**< Number of host cores for the IPQ807x chipsets. */
+#define NSS_PPE_SUPPORTED /**< PPE supported flag for the IPQ807x chipsets. */
/**
* @}
diff --git a/exports/arch/nss_ipq807x_64.h b/exports/arch/nss_ipq807x_64.h
index ca6d07f..ae32665 100644
--- a/exports/arch/nss_ipq807x_64.h
+++ b/exports/arch/nss_ipq807x_64.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -28,6 +28,7 @@
#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS for the IPQ807x 64-bit chipsets. */
#define NSS_HOST_CORES 4 /**< Number of host cores for the IPQ807x 64-bit chipsets. */
+#define NSS_PPE_SUPPORTED /**< PPE supported flag for the IPQ807x 64-bit chipsets. */
/**
* @}
diff --git a/exports/nss_crypto_cmn.h b/exports/nss_crypto_cmn.h
index d8741c5..95e21e7 100644
--- a/exports/nss_crypto_cmn.h
+++ b/exports/nss_crypto_cmn.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -31,13 +31,16 @@
*/
#define NSS_CRYPTO_CMN_CTX_SPARE 4 /**< Context spare words size. */
#define NSS_CRYPTO_CMN_VER_WORDS 4 /**< Firmware version words size.*/
+#define NSS_CRYPTO_CIPHER_KEYLEN_MAX 32 /**< Maximum cipher keysize. */
+#define NSS_CRYPTO_AUTH_KEYLEN_MAX 64 /**< Maximum authorization keysize. */
+#define NSS_CRYPTO_NONCE_SIZE_MAX 4 /**< Maximum authorization keysize. */
/**
* nss_crypto_cmn_algo
* List of crypto algorithms supported.
*/
enum nss_crypto_cmn_algo {
- NSS_CRYPTO_CMN_ALGO_NULL, /**< NULL tranform */
+ NSS_CRYPTO_CMN_ALGO_NULL, /**< NULL transform. */
NSS_CRYPTO_CMN_ALGO_3DES_CBC, /**< Asynchronous block cipher. */
NSS_CRYPTO_CMN_ALGO_AES128_CBC, /**< Asynchronous block cipher. */
NSS_CRYPTO_CMN_ALGO_AES192_CBC, /**< Asynchronous block cipher. */
@@ -48,6 +51,9 @@
NSS_CRYPTO_CMN_ALGO_AES128_ECB, /**< Asynchronous block cipher. */
NSS_CRYPTO_CMN_ALGO_AES192_ECB, /**< Asynchronous block cipher. */
NSS_CRYPTO_CMN_ALGO_AES256_ECB, /**< Asynchronous block cipher. */
+ NSS_CRYPTO_CMN_ALGO_AES128_GCM, /**< Asynchronous block cipher. */
+ NSS_CRYPTO_CMN_ALGO_AES192_GCM, /**< Asynchronous block cipher. */
+ NSS_CRYPTO_CMN_ALGO_AES256_GCM, /**< Asynchronous block cipher. */
NSS_CRYPTO_CMN_ALGO_MD5_HASH, /**< Asynchronous digest. */
NSS_CRYPTO_CMN_ALGO_SHA160_HASH, /**< Asynchronous digest. */
NSS_CRYPTO_CMN_ALGO_SHA224_HASH, /**< Asynchronous digest. */
@@ -220,13 +226,13 @@
* Context message for setting up a crypto context in firmware.
*/
struct nss_crypto_cmn_ctx {
- uint32_t words; /**< Number of valid context words. */
- uint32_t addr; /**< Address to configuration. */
uint32_t spare[NSS_CRYPTO_CMN_CTX_SPARE]; /**< Context spare words. */
-
uint16_t index; /**< Crypto index. */
uint16_t sec_offset; /**< Secure offset for copying keys. */
+ uint8_t cipher_key[NSS_CRYPTO_CIPHER_KEYLEN_MAX]; /**< Array containing cipher keys. */
+ uint8_t auth_key[NSS_CRYPTO_AUTH_KEYLEN_MAX]; /**< Array containing authorization keys. */
+ uint8_t nonce[NSS_CRYPTO_NONCE_SIZE_MAX]; /**< Nonce value. */
enum nss_crypto_cmn_algo algo; /**< Crypto algorithm. */
enum nss_crypto_cmn_ctx_flags flags; /**< Context specific flags. */
};
diff --git a/exports/nss_edma.h b/exports/nss_edma.h
index 744c59b..4e2fc3e 100644
--- a/exports/nss_edma.h
+++ b/exports/nss_edma.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -139,7 +139,7 @@
/**
* nss_edma_misc_err_stats
- * EDMA error statistics.
+ * EDMA error statistics.
*/
struct nss_edma_misc_err_stats {
uint32_t axi_rd_err; /**< EDMA AXI read error. */
@@ -152,6 +152,7 @@
uint32_t pkt_len_le33_err; /**< EDMA packet length smaller than 33b error. */
uint32_t data_len_err; /**< EDMA data length error. */
uint32_t alloc_fail_cnt; /**< EDMA number of times the allocation of pbuf for statistics failed. */
+ uint32_t qos_inval_dst_drops; /**< EDMA number of QoS packet dropped due to invalid destination. */
};
/**
diff --git a/exports/nss_ipsec.h b/exports/nss_ipsec.h
index a5adfd7..ed62142 100644
--- a/exports/nss_ipsec.h
+++ b/exports/nss_ipsec.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -61,6 +61,7 @@
NSS_IPSEC_MSG_TYPE_SYNC_SA_STATS = 4,
NSS_IPSEC_MSG_TYPE_SYNC_FLOW_STATS = 5,
NSS_IPSEC_MSG_TYPE_SYNC_NODE_STATS = 6,
+ NSS_IPSEC_MSG_TYPE_CONFIGURE_NODE = 7,
NSS_IPSEC_MSG_TYPE_MAX
};
@@ -189,6 +190,15 @@
};
/**
+ * nss_ipsec_configure_node
+ * Push message for setting IPsec inline mode and initializing DMA rings.
+ */
+struct nss_ipsec_configure_node {
+ bool dma_redirect; /**< Program redirect DMA ring. */
+ bool dma_lookaside; /**< Program lookaside DMA ring. */
+};
+
+/**
* nss_ipsec_sa_stats
* Packet statistics per security association.
*/
@@ -230,6 +240,8 @@
uint32_t linearized; /**< Packet is linear. */
uint32_t exceptioned; /**< Packets exception from the NSS. */
uint32_t fail_enqueue; /**< Packets failed to enqueue. */
+ uint32_t redir_rx; /**< Packets received in redirect ring. */
+ uint32_t fail_redir; /**< Packets dropped in redirect ring. */
};
/**
@@ -260,6 +272,8 @@
union {
struct nss_ipsec_rule rule;
/**< IPsec rule message. */
+ struct nss_ipsec_configure_node node;
+ /**< IPsec node message. */
union nss_ipsec_stats stats;
/**< Retrieve statistics for the tunnel. */
} msg; /**< Message payload. */
@@ -307,6 +321,30 @@
extern nss_tx_status_t nss_ipsec_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ipsec_msg *msg);
/**
+ * nss_ipsec_tx_msg_sync
+ * Sends IPsec messages synchronously.
+ *
+ * @datatypes
+ * nss_ctx_instance \n
+ * nss_ipsec_msg_type \n
+ * nss_ipsec_msg \n
+ * nss_ipsec_error_type
+ *
+ * @param[in] nss_ctx Pointer to the NSS context.
+ * @param[in] if_num Configuration interface number.
+ * @param[in] type Type of the message.
+ * @param[in] len Size of the payload.
+ * @param[in] nim Pointer to the message data.
+ * @param[in,out] resp Response for the configuration.
+ *
+ * @return
+ * Status of the Tx operation.
+ */
+extern nss_tx_status_t nss_ipsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num,
+ enum nss_ipsec_msg_type type, uint16_t len,
+ struct nss_ipsec_msg *nim, enum nss_ipsec_error_type *resp);
+
+/**
* nss_ipsec_tx_buf
* Sends a plain text packet to NSS for IPsec encapsulation or decapsulation.
*
@@ -455,6 +493,25 @@
extern int32_t nss_ipsec_get_data_interface(void);
/**
+ * nss_ipsec_ppe_port_config
+ * Configure Packet Processing Engine IPsec port.
+ *
+ * @datatypes
+ * nss_ctx_instance \n
+ * net_device
+ *
+ * @param[in] ctx Pointer to the context of the HLOS driver.
+ * @param[in] netdev Pointer to the associated network device.
+ * @param[in] if_num Data interface number.
+ * @param[in] vsi_num Virtual switch instance number.
+ *
+ * @return
+ * True if successful, else false.
+ */
+extern bool nss_ipsec_ppe_port_config(struct nss_ctx_instance *ctx, struct net_device *netdev,
+ uint32_t if_num, uint32_t vsi_num);
+
+/**
* @}
*/
diff --git a/exports/nss_ppe.h b/exports/nss_ppe.h
index f3ac045..8bb6274 100644
--- a/exports/nss_ppe.h
+++ b/exports/nss_ppe.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -31,15 +31,24 @@
* NSS PORT defines
*/
#define NSS_PPE_NUM_PHY_PORTS_MAX 8
- /**< Maximum number of PPE phsyical ports. */
+ /**< Maximum number of PPE physical ports. */
+#define NSS_PPE_PORT_IPSEC 7
+ /**< Port number of PPE inline IPsec port. */
/**
- * nss_ppe_metadata_types
+ * nss_ppe_message_types
* Message types for Packet Processing Engine (PPE) requests and responses.
+ *
+ * Note: PPE messages are added as short term approach, expect all
+ * messages below to be deprecated for more integrated approach.
*/
-enum nss_ppe_metadata_types {
+enum nss_ppe_message_types {
NSS_PPE_MSG_SYNC_STATS,
NSS_PPE_MSG_L2_EXCEPTION,
+ NSS_PPE_MSG_IPSEC_PORT_CONFIG,
+ NSS_PPE_MSG_IPSEC_PORT_MTU_CHANGE,
+ NSS_PPE_MSG_IPSEC_ADD_INTF,
+ NSS_PPE_MSG_IPSEC_DEL_INTF,
NSS_PPE_MSG_MAX,
};
@@ -50,6 +59,13 @@
enum nss_ppe_msg_error_type {
PPE_MSG_ERROR_OK,
PPE_MSG_ERROR_UNKNOWN_TYPE,
+ PPE_MSG_ERROR_PORT_CREATION_FAIL,
+ PPE_MSG_ERROR_INVALID_PORT_VSI,
+ PPE_MSG_ERROR_INVALID_L3_IF,
+ PPE_MSG_ERROR_IPSEC_PORT_CONFIG,
+ PPE_MSG_ERROR_IPSEC_INTF_TABLE_FULL,
+ PPE_MSG_ERROR_IPSEC_INTF_ATTACHED,
+ PPE_MSG_ERROR_IPSEC_INTF_UNATTACHED
};
/**
@@ -104,12 +120,12 @@
uint32_t nss_ppe_fail_ppe_unresponsive;
/**< Request failed because the PPE is not responding. */
uint32_t nss_ppe_ce_opaque_invalid;
- /**< Request failed because of invalid opaque in connection entry */
+ /**< Request failed because of invalid opaque in connection entry. */
uint32_t nss_ppe_fail_fqg_full;
/**< Request failed because the flow QoS group is full. */
};
-/*
+/**
* nss_ppe_l2_exception_msg
* Message structure for L2 exception.
*/
@@ -118,6 +134,41 @@
};
/**
+ * nss_ppe_ipsec_port_config_msg
+ * Message structure for inline IPsec port configuration.
+ */
+struct nss_ppe_ipsec_port_config_msg {
+ uint32_t nss_ifnum; /**< NSS interface number corresponding to inline IPsec port. */
+ uint16_t mtu; /**< MTU value for inline IPsec port. */
+ uint8_t vsi_num; /**< Default port VSI for inline IPsec port. */
+};
+
+/**
+ * nss_ppe_ipsec_port_mtu_msg
+ * Message structure for inline IPsec port MTU change.
+ */
+struct nss_ppe_ipsec_port_mtu_msg {
+ uint32_t nss_ifnum; /**< NSS interface number corresponding to inline IPsec port. */
+ uint16_t mtu; /**< MTU value for inline IPsec port. */
+};
+
+/**
+ * nss_ppe_ipsec_add_intf_msg
+ * Message structure for adding dynamic IPsec/DTLS interface to inline IPsec port.
+ */
+struct nss_ppe_ipsec_add_intf_msg {
+ uint32_t nss_ifnum; /**< Dynamic IPsec/DTLS interface number. */
+};
+
+/**
+ * nss_ppe_ipsec_del_intf_msg
+ * Message structure for deleting dynamic IPsec/DTLS interface to inline IPsec port.
+ */
+struct nss_ppe_ipsec_del_intf_msg {
+ uint32_t nss_ifnum; /**< Dynamic IPsec/DTLS interface number. */
+};
+
+/**
* nss_ppe_msg
* Data for sending and receiving PPE host-to-NSS messages.
*/
@@ -132,6 +183,14 @@
/**< Synchronization statistics. */
struct nss_ppe_l2_exception_msg l2_exception;
/**< L2 exception message. */
+ struct nss_ppe_ipsec_port_config_msg ipsec_config;
+ /**< PPE inline IPsec port configuration message. */
+ struct nss_ppe_ipsec_port_mtu_msg ipsec_mtu;
+ /**< Inline IPsec port MTU change message. */
+ struct nss_ppe_ipsec_add_intf_msg ipsec_addif;
+ /**< Inline IPsec NSS interface attach message. */
+ struct nss_ppe_ipsec_del_intf_msg ipsec_delif;
+ /**< Inline IPsec NSS interface detach message. */
} msg; /**< Message payload. */
};
@@ -228,6 +287,53 @@
nss_tx_status_t nss_ppe_tx_l2_exception_msg(uint32_t if_num, bool exception_enable);
/**
+ * nss_ppe_tx_ipsec_config_msg
+ * Sends the PPE a message to configure inline IPsec port.
+ *
+ * @param[in] if_num Static IPsec interface number.
+ * @param[in] vsi_num Default VSI number associated with inline IPsec port.
+ * @param[in] mtu Default MTU of static inline IPsec port.
+ *
+ * @return
+ * Status of the Tx operation.
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_config_msg(uint32_t nss_ifnum, uint32_t vsi_num, uint16_t mtu);
+
+/**
+ * nss_ppe_tx_ipsec_mtu_msg
+ * Sends the PPE a message to configure MTU value on IPsec port.
+ *
+ * @param[in] nss_ifnum Static IPsec interface number.
+ * @param[in] mtu MTU of static IPsec interface.
+ *
+ * @return
+ * Status of the Tx operation.
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_mtu_msg(uint32_t nss_ifnum, uint16_t mtu);
+
+/**
+ * nss_ppe_tx_ipsec_add_intf_msg
+ * Sends the PPE a message to attach a dynamic interface number to IPsec port.
+ *
+ * @param[in] if_num Dynamic IPsec/DTLS interface number.
+ *
+ * @return
+ * Status of the Tx operation.
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_add_intf_msg(uint32_t nss_ifnum);
+
+/**
+ * nss_ppe_tx_ipsec_del_intf_msg
+ * Sends the PPE a message to detach a dynamic interface number to IPsec port.
+ *
+ * @param[in] if_num Dynamic IPsec/DTLS interface number.
+ *
+ * @return
+ * Status of the Tx operation.
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_del_intf_msg(uint32_t nss_ifnum);
+
+/**
* nss_ppe_stats_conn_get
* Gets PPE connection statistics.
*
diff --git a/exports/nss_wifi.h b/exports/nss_wifi.h
index 7925c10..6669b1e 100644
--- a/exports/nss_wifi.h
+++ b/exports/nss_wifi.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -134,6 +134,7 @@
NSS_WIFI_RX_EXT_INV_PEER_TYPE,
NSS_WIFI_RX_EXT_PKTLOG_TYPE,
NSS_WIFI_RX_STATS_V2_EXCEPTION,
+ NSS_WIFI_RX_MGMT_NULL_TYPE,
NSS_WIFI_RX_EXT_MAX_TYPE,
};
diff --git a/exports/nss_wifi_vdev.h b/exports/nss_wifi_vdev.h
index a424842..ea91d6e 100644
--- a/exports/nss_wifi_vdev.h
+++ b/exports/nss_wifi_vdev.h
@@ -591,6 +591,7 @@
uint8_t reserved; /**< Reserve bytes for alignment. */
enum wifi_vdev_ext_wds_info_type wds_type;
/**< WDS message type. */
+ uint8_t addr4_valid; /**< 802.11 4th address valid flag. */
};
/**
diff --git a/exports/nss_wifili_if.h b/exports/nss_wifili_if.h
index 1c136d6..a7ccf36 100644
--- a/exports/nss_wifili_if.h
+++ b/exports/nss_wifili_if.h
@@ -132,6 +132,7 @@
NSS_WIFILI_RADIO_CMD_MSG,
NSS_WIFILI_LINK_DESC_INFO_MSG,
NSS_WIFILI_PEER_SECURITY_TYPE_MSG,
+ NSS_WIFILI_PEER_NAWDS_ENABLE_MSG,
NSS_WIFILI_MAX_MSG
};
@@ -330,6 +331,10 @@
/**< Number of memory address. */
uint32_t ext_desc_page_num;
/**< Extended descriptor page number. */
+ uint32_t num_tx_desc_2;
+ /**< Count of the software descriptors for 2nd radio. */
+ uint32_t num_tx_desc_ext_2;
+ /**< Count of software extented descriptors for 2nd radio. */
};
/**
@@ -675,6 +680,10 @@
uint32_t ofdma; /**< Total number of OFDMA packets. */
uint32_t non_amsdu_cnt; /**< Number of MSDUs with no MSDU level aggregation. */
uint32_t amsdu_cnt; /**< Number of MSDUs part of AMSDU. */
+ uint32_t tx_mcast_cnt; /**< Total number of multicast packets sent. */
+ uint32_t tx_mcast_bytes; /**< Total number of multicast bytes sent. */
+ uint32_t tx_ucast_cnt; /**< Total number of unicast packets sent. */
+ uint32_t tx_ucast_bytes; /**< Total number of unicast bytes sent. */
struct nss_wifili_tx_dropped dropped; /**< Tx peer dropped. */
};
@@ -803,6 +812,15 @@
};
/**
+ * nss_wifili_peer_nawds_enable_msg
+ * Wifili NAWDS enable for this peer.
+ */
+struct nss_wifili_peer_nawds_enable_msg {
+ uint16_t peer_id; /**< Peer ID. */
+ uint16_t is_nawds; /**< Enable NAWDS on this peer. */
+};
+
+/**
* nss_wifili_reo_tidq_msg
* REO TID queue setup message.
*/
@@ -880,7 +898,9 @@
/**< Link descriptor buffer address information. */
struct nss_wifili_peer_security_type_msg securitymsg;
/**< Wifili peer security message. */
- } msg;
+ struct nss_wifili_peer_nawds_enable_msg nawdsmsg;
+ /**< Wifili peer enable NAWDS message. */
+ } msg; /**< Message payload. */
};
/**
diff --git a/nss_c2c_tx.c b/nss_c2c_tx.c
index dd866de..819e97d 100644
--- a/nss_c2c_tx.c
+++ b/nss_c2c_tx.c
@@ -81,7 +81,7 @@
case NSS_C2C_TX_MSG_TYPE_STATS:
nss_c2c_tx_stats_sync(nss_ctx, &nctm->msg.stats);
- return;
+ break;
}
/*
diff --git a/nss_core.c b/nss_core.c
index 8b19221..22779f7 100644
--- a/nss_core.c
+++ b/nss_core.c
@@ -773,10 +773,11 @@
* nss_core_rx_pbuf()
* Receive a pbuf from the NSS into Linux.
*/
-static inline void nss_core_rx_pbuf(struct nss_ctx_instance *nss_ctx, struct n2h_descriptor *desc, struct napi_struct *napi, uint8_t buffer_type, struct sk_buff *nbuf)
+static inline void nss_core_rx_pbuf(struct nss_ctx_instance *nss_ctx, struct napi_struct *napi,
+ uint8_t buffer_type, struct sk_buff *nbuf, uint32_t desc_ifnum, uint32_t bit_flags)
{
- unsigned int interface_num = NSS_INTERFACE_NUM_GET(desc->interface_num);
- unsigned int core_id = NSS_INTERFACE_NUM_GET_COREID(desc->interface_num);
+ unsigned int interface_num = NSS_INTERFACE_NUM_GET(desc_ifnum);
+ unsigned int core_id = NSS_INTERFACE_NUM_GET_COREID(desc_ifnum);
struct nss_shaper_bounce_registrant *reg = NULL;
int32_t status;
@@ -816,11 +817,11 @@
break;
case N2H_BUFFER_PACKET:
- nss_core_handle_buffer_pkt(nss_ctx, interface_num, nbuf, napi, desc->bit_flags);
+ nss_core_handle_buffer_pkt(nss_ctx, interface_num, nbuf, napi, bit_flags);
break;
case N2H_BUFFER_PACKET_EXT:
- nss_core_handle_ext_buffer_pkt(nss_ctx, interface_num, nbuf, napi, desc->bit_flags);
+ nss_core_handle_ext_buffer_pkt(nss_ctx, interface_num, nbuf, napi, bit_flags);
break;
case N2H_BUFFER_STATUS:
@@ -861,7 +862,7 @@
* nss_core_handle_nrfrag_skb()
* Handled the processing of fragmented skb's
*/
-static inline bool nss_core_handle_nr_frag_skb(struct nss_ctx_instance *nss_ctx, struct sk_buff **nbuf_ptr, struct sk_buff **jumbo_start_ptr, struct n2h_descriptor *desc, unsigned int buffer_type)
+static inline bool nss_core_handle_nr_frag_skb(struct nss_ctx_instance *nss_ctx, struct sk_buff **nbuf_ptr, struct sk_buff **jumbo_start_ptr, struct n2h_descriptor *desc, struct hlos_n2h_desc_ring *n2h_desc_ring, unsigned int buffer_type)
{
struct sk_buff *nbuf = *nbuf_ptr;
struct sk_buff *jumbo_start = *jumbo_start_ptr;
@@ -879,6 +880,13 @@
* chains (or it's not a scattered one).
*/
if (likely(bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT) && likely(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) {
+ /*
+ * This is considered safe for linear skb since
+ * every skb has a FIRST and LAST bit. So this will
+ * be updated for every packet.
+ */
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
/*
* We have received another head before we saw the last segment.
@@ -916,6 +924,17 @@
* Build a frags[] out of segments.
*/
if (unlikely((bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT))) {
+ /*
+ * We want to grab the head descripter information
+ * and consume the pbuf on that information only.
+ *
+ * This is considered safe since we will construct the
+ * chain in the order of seeing a FIRST and a LAST.
+ * Every other order will be dropped or destroyed.
+ *
+ */
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
/*
* We have received another head before we saw the last segment.
@@ -1006,7 +1025,7 @@
* Handler for processing linear skbs.
*/
static inline bool nss_core_handle_linear_skb(struct nss_ctx_instance *nss_ctx, struct sk_buff **nbuf_ptr, struct sk_buff **head_ptr,
- struct sk_buff **tail_ptr, struct n2h_descriptor *desc)
+ struct sk_buff **tail_ptr, struct hlos_n2h_desc_ring *n2h_desc_ring, struct n2h_descriptor *desc)
{
uint16_t bit_flags = desc->bit_flags;
struct sk_buff *nbuf = *nbuf_ptr;
@@ -1023,6 +1042,13 @@
prefetch((void *)(nbuf->data));
if (likely(bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT) && likely(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) {
+ /*
+ * This is considered safe for linear skb since
+ * every skb has a FIRST and LAST bit. So this will
+ * be updated for every packet.
+ */
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
/*
* We have received another head before we saw the last segment.
@@ -1055,6 +1081,17 @@
* Build a frag list out of segments.
*/
if (unlikely((bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT))) {
+ /*
+ * We want to grab the head descripter information
+ * and consume the pbuf on that information only.
+ *
+ * This is considered safe since we will construct the
+ * chain in the order of seeing a FIRST and a LAST.
+ * Every other order will be dropped or destroyed.
+ *
+ */
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
/*
* We have received another head before we saw the last segment.
@@ -1334,6 +1371,8 @@
* one of them is actually looked at.
*/
if ((unlikely(buffer_type == N2H_BUFFER_SHAPER_BOUNCED_INTERFACE)) || (unlikely(buffer_type == N2H_BUFFER_SHAPER_BOUNCED_BRIDGE))) {
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
dma_unmap_page(nss_ctx->dev, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_TO_DEVICE);
goto consume;
}
@@ -1343,6 +1382,8 @@
*
*/
if (unlikely((buffer_type == N2H_BUFFER_CRYPTO_RESP))) {
+ n2h_desc_ring->interface_num = desc->interface_num;
+ n2h_desc_ring->bit_flags = desc->bit_flags;
dma_unmap_single(NULL, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_FROM_DEVICE);
goto consume;
}
@@ -1364,7 +1405,7 @@
n2h_desc_ring->head = NULL;
}
- if (!nss_core_handle_nr_frag_skb(nss_ctx, &nbuf, &n2h_desc_ring->jumbo_start, desc, buffer_type)) {
+ if (!nss_core_handle_nr_frag_skb(nss_ctx, &nbuf, &n2h_desc_ring->jumbo_start, desc, n2h_desc_ring, buffer_type)) {
goto next;
}
NSS_PKT_STATS_INCREMENT(nss_ctx, &nss_ctx->nss_top->stats_drv[NSS_STATS_DRV_RX_NR_FRAGS]);
@@ -1388,12 +1429,12 @@
* This is a simple linear skb. Use the the linear skb
* handler to process it.
*/
- if (!nss_core_handle_linear_skb(nss_ctx, &nbuf, &n2h_desc_ring->head, &n2h_desc_ring->tail, desc)) {
+ if (!nss_core_handle_linear_skb(nss_ctx, &nbuf, &n2h_desc_ring->head, &n2h_desc_ring->tail, n2h_desc_ring, desc)) {
goto next;
}
consume:
- nss_core_rx_pbuf(nss_ctx, desc, &(int_ctx->napi), buffer_type, nbuf);
+ nss_core_rx_pbuf(nss_ctx, &(int_ctx->napi), buffer_type, nbuf, n2h_desc_ring->interface_num, n2h_desc_ring->bit_flags);
next:
diff --git a/nss_core.h b/nss_core.h
index 9357f5a..250009a 100644
--- a/nss_core.h
+++ b/nss_core.h
@@ -399,6 +399,8 @@
struct sk_buff *head; /* First segment of an skb fraglist */
struct sk_buff *tail; /* Last segment received of an skb fraglist */
struct sk_buff *jumbo_start; /* First segment of an skb with frags[] */
+ uint32_t interface_num; /* Store head descriptor interface number */
+ uint32_t bit_flags; /* Store head descriptor bit flags */
};
/*
diff --git a/nss_edma_stats.c b/nss_edma_stats.c
index 3f28df6..ae4cf4e 100644
--- a/nss_edma_stats.c
+++ b/nss_edma_stats.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -100,7 +100,8 @@
"pkt_len_la64k_err",
"pkt_len_le33_err",
"data_len_err",
- "alloc_fail_cnt"
+ "alloc_fail_cnt",
+ "qos_inval_dst_drops"
};
/*
@@ -133,10 +134,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_STATS_NODE_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -236,10 +234,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_PORT_RING_MAP_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -296,10 +291,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_STATS_TX_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -356,10 +348,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_STATS_RX_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -416,10 +405,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_STATS_TXCMPL_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -476,10 +462,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_STATS_RXFILL_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -535,10 +518,7 @@
return 0;
}
- /*
- * Note: The assumption here is that we do not have more than 64 stats
- */
- stats_shadow = kzalloc(64 * 8, GFP_KERNEL);
+ stats_shadow = kzalloc(NSS_EDMA_ERR_STATS_MAX * sizeof(uint64_t), GFP_KERNEL);
if (unlikely(stats_shadow == NULL)) {
nss_warning("Could not allocate memory for local shadow buffer");
kfree(lbuf);
@@ -877,6 +857,8 @@
edma_stats.misc_err[NSS_EDMA_PKT_LEN_LA64K_ERR] += nerss->msg_err_stats.pkt_len_la64k_err;
edma_stats.misc_err[NSS_EDMA_PKT_LEN_LE33_ERR] += nerss->msg_err_stats.pkt_len_le33_err;
edma_stats.misc_err[NSS_EDMA_DATA_LEN_ERR] += nerss->msg_err_stats.data_len_err;
+ edma_stats.misc_err[NSS_EDMA_ALLOC_FAIL_CNT] += nerss->msg_err_stats.alloc_fail_cnt;
+ edma_stats.misc_err[NSS_EDMA_QOS_INVAL_DST_DROPS] += nerss->msg_err_stats.qos_inval_dst_drops;
spin_unlock_bh(&nss_top->stats_lock);
}
diff --git a/nss_edma_stats.h b/nss_edma_stats.h
index 1c61075..063e553 100644
--- a/nss_edma_stats.h
+++ b/nss_edma_stats.h
@@ -1,6 +1,6 @@
/*
******************************************************************************
- * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -83,6 +83,7 @@
NSS_EDMA_PKT_LEN_LE33_ERR,
NSS_EDMA_DATA_LEN_ERR,
NSS_EDMA_ALLOC_FAIL_CNT,
+ NSS_EDMA_QOS_INVAL_DST_DROPS,
NSS_EDMA_ERR_STATS_MAX
};
diff --git a/nss_ipsec.c b/nss_ipsec.c
index 246f122..e67b959 100644
--- a/nss_ipsec.c
+++ b/nss_ipsec.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -45,6 +45,25 @@
#endif
/*
+ * Amount time the synchronous message should wait for response from
+ * NSS before the timeout happens. After the timeout the message
+ * response even if it arrives has to be discarded. Typically, the
+ * time needs to be selected based on the worst case time in case of
+ * peak throughput between host & NSS.
+ */
+#define NSS_IPSEC_TX_TIMEO_TICKS msecs_to_jiffies(3000) /* 3 Seconds */
+
+/*
+ * Private data structure to hold state for
+ * the ipsec specific NSS interaction
+ */
+struct nss_ipsec_pvt {
+ struct semaphore sem; /* used for synchronizing 'tx_msg_sync' */
+ struct completion complete; /* completion callback */
+ atomic_t resp; /* Response error type */
+} nss_ipsec;
+
+/*
* nss_ipsec_get_msg_ctx()
* return ipsec message context assoicated with the callback
*
@@ -147,6 +166,31 @@
*/
/*
+ * nss_ipsec_callback()
+ * Callback to handle the completion of NSS->HLOS messages.
+ */
+static void nss_ipsec_callback(void *app_data, struct nss_ipsec_msg *nim)
+{
+ struct nss_cmn_msg *ncm = &nim->cm;
+
+ /*
+ * This callback is for synchronous operation. The caller sends its
+ * response pointer which needs to be loaded with the response
+ * data arriving from the NSS
+ */
+ atomic_t *resp = (atomic_t *)app_data;
+
+ if (ncm->response == NSS_CMN_RESPONSE_ACK) {
+ atomic_set(resp, NSS_IPSEC_ERROR_TYPE_NONE);
+ complete(&nss_ipsec.complete);
+ return;
+ }
+
+ atomic_set(resp, ncm->error);
+ complete(&nss_ipsec.complete);
+}
+
+/*
* nss_ipsec_tx_msg
* Send ipsec rule to NSS.
*/
@@ -210,6 +254,96 @@
EXPORT_SYMBOL(nss_ipsec_tx_msg);
/*
+ * nss_ipsec_tx_msg_sync()
+ * Transmit a ipsec message to NSS firmware synchronously.
+ */
+nss_tx_status_t nss_ipsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num,
+ enum nss_ipsec_msg_type type, uint16_t len,
+ struct nss_ipsec_msg *nim, enum nss_ipsec_error_type *resp)
+{
+ struct nss_ipsec_msg nim_local = { {0} };
+ nss_tx_status_t status;
+ int ret;
+
+ /*
+ * Length of the message should be the based on type
+ */
+ if (len > sizeof(nim_local.msg)) {
+ nss_warning("%p: (%u)Bad message length(%u) for type (%d)", nss_ctx, if_num, len, type);
+ return NSS_TX_FAILURE_TOO_LARGE;
+ }
+
+ /*
+ * Response buffer is a required for copying the response for message
+ */
+ if (!resp) {
+ nss_warning("%p: (%u)Response buffer is empty, type(%d)", nss_ctx, if_num, type);
+ return NSS_TX_FAILURE_BAD_PARAM;
+ }
+
+ /*
+ * TODO: this can be removed in future as we need to ensure that the response
+ * memory is only updated when the current outstanding request is waiting.
+ * This can be solved by introducing sequence no. in messages and only completing
+ * the message if the sequence no. matches. For now this is solved by passing
+ * a known memory nss_ipsec.resp
+ */
+ down(&nss_ipsec.sem);
+
+ /*
+ * Initializing it to a fail error type
+ */
+ atomic_set(&nss_ipsec.resp, NSS_IPSEC_ERROR_TYPE_UNHANDLED_MSG);
+
+ /*
+ * We need to copy the message content into the actual message
+ * to be sent to NSS
+ *
+ * Note: Here pass the nss_ipsec.resp as the pointer. Since, the caller
+ * provided pointer is not allocated by us and may go away when this function
+ * returns with failure. The callback is not aware of this and may try to
+ * access the pointer incorrectly potentially resulting in a crash.
+ */
+ nss_ipsec_msg_init(&nim_local, if_num, type, len, nss_ipsec_callback, &nss_ipsec.resp);
+ memcpy(&nim_local.msg, &nim->msg, len);
+
+ status = nss_ipsec_tx_msg(nss_ctx, &nim_local);
+ if (status != NSS_TX_SUCCESS) {
+ nss_warning("%p: ipsec_tx_msg failed", nss_ctx);
+ goto done;
+ }
+
+ ret = wait_for_completion_timeout(&nss_ipsec.complete, NSS_IPSEC_TX_TIMEO_TICKS);
+ if (!ret) {
+ nss_warning("%p: IPsec msg tx failed due to timeout", nss_ctx);
+ status = NSS_TX_FAILURE_NOT_ENABLED;
+ goto done;
+ }
+
+ /*
+ * Read memory barrier
+ */
+ smp_rmb();
+
+ /*
+ * Copy the response received
+ */
+ *resp = atomic_read(&nss_ipsec.resp);
+
+ /*
+ * Only in case of non-error response we will
+ * indicate success
+ */
+ if (*resp != NSS_IPSEC_ERROR_TYPE_NONE)
+ status = NSS_TX_FAILURE;
+
+done:
+ up(&nss_ipsec.sem);
+ return status;
+}
+EXPORT_SYMBOL(nss_ipsec_tx_msg_sync);
+
+/*
* nss_ipsec_tx_buf
* Send data packet for ipsec processing
*/
@@ -397,7 +531,7 @@
/*
* nss_ipsec_get_context()
- * get NSS context instance for IPsec handle
+ * Get NSS context instance for IPsec handle
*/
struct nss_ctx_instance *nss_ipsec_get_context(void)
{
@@ -406,6 +540,28 @@
EXPORT_SYMBOL(nss_ipsec_get_context);
/*
+ * nss_ipsec_ppe_port_config()
+ * Configure PPE port for IPsec inline
+ */
+bool nss_ipsec_ppe_port_config(struct nss_ctx_instance *nss_ctx, struct net_device *dev,
+ uint32_t if_num, uint32_t vsi_num)
+{
+#ifdef NSS_PPE_SUPPORTED
+ if_num = NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num);
+
+ if (nss_ppe_tx_ipsec_config_msg(if_num, vsi_num, dev->mtu) != NSS_TX_SUCCESS) {
+ nss_warning("%p: Failed to configure PPE IPsec port", nss_ctx);
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
+}
+EXPORT_SYMBOL(nss_ipsec_ppe_port_config);
+
+/*
* nss_ipsec_register_handler()
*/
void nss_ipsec_register_handler()
@@ -415,6 +571,10 @@
BUILD_BUG_ON(NSS_IPSEC_ENCAP_INTERFACE_NUM < 0);
BUILD_BUG_ON(NSS_IPSEC_DECAP_INTERFACE_NUM < 0);
+ sema_init(&nss_ipsec.sem, 1);
+ init_completion(&nss_ipsec.complete);
+ atomic_set(&nss_ipsec.resp, NSS_IPSEC_ERROR_TYPE_NONE);
+
nss_ctx->nss_top->ipsec_encap_callback = NULL;
nss_ctx->nss_top->ipsec_decap_callback = NULL;
diff --git a/nss_ppe.c b/nss_ppe.c
index a914880..90f0c5d 100644
--- a/nss_ppe.c
+++ b/nss_ppe.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -196,6 +196,106 @@
EXPORT_SYMBOL(nss_ppe_tx_l2_exception_msg);
/*
+ * nss_ppe_tx_ipsec_config_msg
+ * API to send inline IPsec port configure message to NSS FW
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_config_msg(uint32_t nss_ifnum, uint32_t vsi_num, uint16_t mtu)
+{
+ struct nss_ctx_instance *nss_ctx = nss_ppe_get_context();
+ struct nss_ppe_msg npm = {0};
+
+ if (!nss_ctx) {
+ nss_warning("Can't get nss context\n");
+ return NSS_TX_FAILURE;
+ }
+
+ if (vsi_num >= NSS_PPE_VSI_NUM_MAX) {
+ nss_warning("Invalid vsi number:%u\n", vsi_num);
+ return NSS_TX_FAILURE;
+ }
+
+ nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_PORT_CONFIG,
+ sizeof(struct nss_ppe_ipsec_port_config_msg), NULL, NULL);
+
+ npm.msg.ipsec_config.nss_ifnum = nss_ifnum;
+ npm.msg.ipsec_config.vsi_num = vsi_num;
+ npm.msg.ipsec_config.mtu = mtu;
+
+ return nss_ppe_tx_msg_sync(nss_ctx, &npm);
+}
+EXPORT_SYMBOL(nss_ppe_tx_ipsec_config_msg);
+
+/*
+ * nss_ppe_tx_ipsec_mtu_msg
+ * API to send IPsec port MTU change message to NSS FW
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_mtu_msg(uint32_t nss_ifnum, uint16_t mtu)
+{
+ struct nss_ctx_instance *nss_ctx = nss_ppe_get_context();
+ struct nss_ppe_msg npm = {0};
+
+ if (!nss_ctx) {
+ nss_warning("Can't get nss context\n");
+ return NSS_TX_FAILURE;
+ }
+
+ nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_PORT_MTU_CHANGE,
+ sizeof(struct nss_ppe_ipsec_port_mtu_msg), NULL, NULL);
+
+ npm.msg.ipsec_mtu.nss_ifnum = nss_ifnum;
+ npm.msg.ipsec_mtu.mtu = mtu;
+
+ return nss_ppe_tx_msg_sync(nss_ctx, &npm);
+}
+EXPORT_SYMBOL(nss_ppe_tx_ipsec_mtu_msg);
+
+/*
+ * nss_ppe_tx_ipsec_add_intf_msg
+ * API to attach NSS interface to IPsec port
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_add_intf_msg(uint32_t nss_ifnum)
+{
+ struct nss_ctx_instance *nss_ctx = nss_ppe_get_context();
+ struct nss_ppe_msg npm = {0};
+
+ if (!nss_ctx) {
+ nss_warning("Can't get nss context\n");
+ return NSS_TX_FAILURE;
+ }
+
+ nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_ADD_INTF,
+ sizeof(struct nss_ppe_ipsec_add_intf_msg), NULL, NULL);
+
+ npm.msg.ipsec_addif.nss_ifnum = nss_ifnum;
+
+ return nss_ppe_tx_msg_sync(nss_ctx, &npm);
+}
+EXPORT_SYMBOL(nss_ppe_tx_ipsec_add_intf_msg);
+
+/*
+ * nss_ppe_tx_ipsec_del_intf_msg
+ * API to detach NSS interface to IPsec port
+ */
+nss_tx_status_t nss_ppe_tx_ipsec_del_intf_msg(uint32_t nss_ifnum)
+{
+ struct nss_ctx_instance *nss_ctx = nss_ppe_get_context();
+ struct nss_ppe_msg npm = {0};
+
+ if (!nss_ctx) {
+ nss_warning("Can't get nss context\n");
+ return NSS_TX_FAILURE;
+ }
+
+ nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_DEL_INTF,
+ sizeof(struct nss_ppe_ipsec_del_intf_msg), NULL, NULL);
+
+ npm.msg.ipsec_delif.nss_ifnum = nss_ifnum;
+
+ return nss_ppe_tx_msg_sync(nss_ctx, &npm);
+}
+EXPORT_SYMBOL(nss_ppe_tx_ipsec_del_intf_msg);
+
+/*
* nss_ppe_handler()
* Handle NSS -> HLOS messages for ppe
*/
diff --git a/nss_ppe.h b/nss_ppe.h
index bc5e53f..797793a 100644
--- a/nss_ppe.h
+++ b/nss_ppe.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
@@ -71,6 +71,11 @@
#define NSS_PPE_TX_TIMEOUT 1000 /* 1 Second */
/*
+ * Maximum number of VSI
+ */
+#define NSS_PPE_VSI_NUM_MAX 32
+
+/*
* ppe nss debug stats lock
*/
extern spinlock_t nss_ppe_stats_lock;