[qca-nss-sfe] Use pull mode for syncing status
pull mode invoked from the ecm, sfe put the sync stat in
a PAGE as many connection as possible.
Change-Id: Ia1d34cd21d519a01d5d9f4013a7cb24227f26273
Signed-off-by: Ken Zhu <quic_guigenz@quicinc.com>
diff --git a/sfe.c b/sfe.c
index 254d5b5..8dbfdf4 100644
--- a/sfe.c
+++ b/sfe.c
@@ -110,9 +110,11 @@
struct work_struct work; /* Work to send response message back to caller*/
sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_cb; /* Callback to call to sync ipv4 statistics */
+ sfe_ipv4_msg_callback_t __rcu ipv4_stats_sync_many_cb; /* Callback to call to sync many ipv4 statistics */
void *ipv4_stats_sync_data; /* Argument for above callback: ipv4_stats_sync_cb */
sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_cb; /* Callback to call to sync ipv6 statistics */
+ sfe_ipv6_msg_callback_t __rcu ipv6_stats_sync_many_cb; /* Callback to call to sync many ipv6 statistics */
void *ipv6_stats_sync_data; /* Argument for above callback: ipv6_stats_sync_cb */
u32 exceptions[SFE_EXCEPTION_MAX]; /* Statistics for exception */
@@ -458,32 +460,35 @@
}
/*
- * sfe_ipv4_stats_sync_callback()
- * Synchronize a connection's state.
+ * sfe_ipv4_stats_many_sync_callback()
+ * Synchronize many connection's state.
*
- * @param sis SFE statistics from SFE core engine
+ * @param SFE statistics from SFE core engine
*/
-static void sfe_ipv4_stats_sync_callback(struct sfe_connection_sync *sis)
+static void sfe_ipv4_stats_many_sync_callback(struct sfe_ipv4_msg *msg)
{
struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- struct sfe_ipv4_msg msg;
- struct sfe_ipv4_conn_sync *sync_msg;
sfe_ipv4_msg_callback_t sync_cb;
rcu_read_lock();
- sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
+ sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_many_cb);
+ rcu_read_unlock();
if (!sync_cb) {
- rcu_read_unlock();
sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
return;
}
+ sync_cb(sfe_ctx->ipv4_stats_sync_data, msg);
+}
- sync_msg = &msg.msg.conn_stats;
-
- memset(&msg, 0, sizeof(msg));
- sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
- sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
-
+/*
+ * sfe_ipv4_stats_convert()
+ * Convert the internal message format to ecm format.
+ *
+ * @param sync_msg stat msg to ecm
+ * @param sis SFE statistics from SFE core engine
+ */
+void sfe_ipv4_stats_convert(struct sfe_ipv4_conn_sync *sync_msg, struct sfe_connection_sync *sis)
+{
/*
* Fill connection specific information
*/
@@ -543,12 +548,42 @@
sync_msg->reason = SFE_RULE_SYNC_REASON_STATS;
break;
}
+ return;
+}
+
+/*
+ * sfe_ipv4_stats_one_sync_callback()
+ * Synchronize a connection's state.
+ *
+ * @param sis SFE statistics from SFE core engine
+ */
+static void sfe_ipv4_stats_one_sync_callback(struct sfe_connection_sync *sis)
+{
+ struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+ struct sfe_ipv4_msg msg;
+ struct sfe_ipv4_conn_sync *sync_msg;
+ sfe_ipv4_msg_callback_t sync_cb;
+
+ rcu_read_lock();
+ sync_cb = rcu_dereference(sfe_ctx->ipv4_stats_sync_cb);
+ rcu_read_unlock();
+ if (!sync_cb) {
+ sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
+ return;
+ }
+
+ sync_msg = &msg.msg.conn_stats;
+
+ memset(&msg, 0, sizeof(msg));
+ sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
+ sizeof(struct sfe_ipv4_conn_sync), NULL, NULL);
+
+ sfe_ipv4_stats_convert(sync_msg, sis);
/*
* SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
*/
sync_cb(sfe_ctx->ipv4_stats_sync_data, &msg);
- rcu_read_unlock();
}
/*
@@ -767,6 +802,26 @@
}
/*
+ * sfe_sync_ipv4_stats_many_msg()
+ * sync con stats msg from the ecm
+ *
+ * @param sfe_ctx SFE context
+ * @param msg The IPv4 message
+ *
+ * @return sfe_tx_status_t The status of the Tx operation
+ */
+sfe_tx_status_t sfe_sync_ipv4_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv4_msg *msg)
+{
+ struct sfe_ipv4_conn_sync_many_msg *nicsm;
+ nicsm = &(msg->msg.conn_stats_many);
+
+ if (sfe_ipv4_sync_invoke(nicsm->index)) {
+ return SFE_TX_SUCCESS;
+ }
+ return SFE_TX_FAILURE;
+}
+
+/*
* sfe_ipv4_tx()
* Transmit an IPv4 message to the sfe
*
@@ -782,6 +837,8 @@
return sfe_create_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
case SFE_TX_DESTROY_RULE_MSG:
return sfe_destroy_ipv4_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
+ case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
+ return sfe_sync_ipv4_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx),msg);
default:
sfe_incr_exceptions(SFE_EXCEPTION_IPV4_MSG_UNKNOW);
return SFE_TX_FAILURE_NOT_ENABLED;
@@ -819,7 +876,8 @@
*
* @return struct sfe_ctx_instance * The SFE context
*/
-struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t cb, void *app_data)
+struct sfe_ctx_instance *sfe_ipv4_notify_register(sfe_ipv4_msg_callback_t one_rule_cb,
+ sfe_ipv4_msg_callback_t many_rules_cb,void *app_data)
{
struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
@@ -827,11 +885,16 @@
/*
* Hook the shortcut sync callback.
*/
- if (cb && !sfe_ctx->ipv4_stats_sync_cb) {
- sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_sync_callback);
+ if (one_rule_cb && !sfe_ctx->ipv4_stats_sync_cb) {
+ sfe_ipv4_register_sync_rule_callback(sfe_ipv4_stats_one_sync_callback);
}
+ rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, one_rule_cb);
- rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, cb);
+ if (many_rules_cb && !sfe_ctx->ipv4_stats_sync_many_cb) {
+ sfe_ipv4_register_many_sync_callback(sfe_ipv4_stats_many_sync_callback);
+ }
+ rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, many_rules_cb);
+
sfe_ctx->ipv4_stats_sync_data = app_data;
spin_unlock_bh(&sfe_ctx->lock);
@@ -842,54 +905,68 @@
/*
* sfe_ipv4_notify_unregister()
- * Un-Register a notifier callback for IPv4 messages from SFE
+ * Un-Register the notifier callback for IPv4 messages from SFE
*/
void sfe_ipv4_notify_unregister(void)
{
struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
spin_lock_bh(&sfe_ctx->lock);
+
/*
- * Unregister our sync callback.
+ * Unregister our single rule msg sync callback.
*/
if (sfe_ctx->ipv4_stats_sync_cb) {
sfe_ipv4_register_sync_rule_callback(NULL);
rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_cb, NULL);
- sfe_ctx->ipv4_stats_sync_data = NULL;
}
+
+ /*
+ * Unregister our many rule msg sync callback.
+ */
+ if (sfe_ctx->ipv4_stats_sync_many_cb) {
+ sfe_ipv4_register_many_sync_callback(NULL);
+ rcu_assign_pointer(sfe_ctx->ipv4_stats_sync_many_cb, NULL);
+ }
+
+ sfe_ctx->ipv4_stats_sync_data = NULL;
+
spin_unlock_bh(&sfe_ctx->lock);
sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV4);
-
return;
}
EXPORT_SYMBOL(sfe_ipv4_notify_unregister);
/*
- * sfe_ipv6_stats_sync_callback()
- * Synchronize a connection's state.
+ * sfe_ipv6_many_stats_sync_callback()
+ * Synchronize many connection's state.
*/
-static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
+static void sfe_ipv6_many_stats_sync_callback(struct sfe_ipv6_msg *msg)
{
struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- struct sfe_ipv6_msg msg;
- struct sfe_ipv6_conn_sync *sync_msg;
sfe_ipv6_msg_callback_t sync_cb;
rcu_read_lock();
- sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
+ sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_many_cb);
+ rcu_read_unlock();
if (!sync_cb) {
- rcu_read_unlock();
sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
return;
}
- sync_msg = &msg.msg.conn_stats;
+ sync_cb(sfe_ctx->ipv6_stats_sync_data, msg);
+}
- memset(&msg, 0, sizeof(msg));
- sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
- sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
-
+/*
+ * sfe_ipv6_stats_convert()
+ * Convert the internal message format to ecm format.
+ *
+ * @param sync_msg stat msg to ecm
+ * @param sis SFE statistics from SFE core engine
+ */
+void sfe_ipv6_stats_convert(struct sfe_ipv6_conn_sync *sync_msg, struct sfe_connection_sync *sis)
+{
/*
* Fill connection specific information
*/
@@ -946,11 +1023,40 @@
break;
}
+ return;
+}
+
+/*
+ * sfe_ipv6_stats_sync_callback()
+ * Synchronize a connection's state.
+ */
+static void sfe_ipv6_stats_sync_callback(struct sfe_connection_sync *sis)
+{
+ struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+ struct sfe_ipv6_msg msg;
+ struct sfe_ipv6_conn_sync *sync_msg;
+ sfe_ipv6_msg_callback_t sync_cb;
+
+ rcu_read_lock();
+ sync_cb = rcu_dereference(sfe_ctx->ipv6_stats_sync_cb);
+ rcu_read_unlock();
+ if (!sync_cb) {
+ sfe_incr_exceptions(SFE_EXCEPTION_NO_SYNC_CB);
+ return;
+ }
+
+ sync_msg = &msg.msg.conn_stats;
+
+ memset(&msg, 0, sizeof(msg));
+ sfe_cmn_msg_init(&msg.cm, 0, SFE_RX_CONN_STATS_SYNC_MSG,
+ sizeof(struct sfe_ipv6_conn_sync), NULL, NULL);
+
+ sfe_ipv6_stats_convert(sync_msg, sis);
+
/*
* SFE sync calling is excuted in a timer, so we can redirect it to ECM directly.
*/
sync_cb(sfe_ctx->ipv6_stats_sync_data, &msg);
- rcu_read_unlock();
}
/*
@@ -1122,6 +1228,26 @@
}
/*
+ * sfe_sync_ipv6_stats_many_msg()
+ * sync con stats msg from the ecm
+ *
+ * @param sfe_ctx SFE context
+ * @param msg The IPv6 message
+ *
+ * @return sfe_tx_status_t The status of the Tx operation
+ */
+sfe_tx_status_t sfe_sync_ipv6_stats_many_msg(struct sfe_ctx_instance_internal *sfe_ctx, struct sfe_ipv6_msg *msg)
+{
+ struct sfe_ipv6_conn_sync_many_msg *nicsm;
+ nicsm = &(msg->msg.conn_stats_many);
+
+ if (sfe_ipv6_sync_invoke(nicsm->index)) {
+ return SFE_TX_SUCCESS;
+ }
+ return SFE_TX_FAILURE;
+}
+
+/*
* sfe_ipv6_tx()
* Transmit an IPv6 message to the sfe
*
@@ -1137,6 +1263,8 @@
return sfe_create_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
case SFE_TX_DESTROY_RULE_MSG:
return sfe_destroy_ipv6_rule_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
+ case SFE_TX_CONN_STATS_SYNC_MANY_MSG:
+ return sfe_sync_ipv6_stats_many_msg(SFE_CTX_TO_PRIVATE(sfe_ctx), msg);
default:
sfe_incr_exceptions(SFE_EXCEPTION_IPV6_MSG_UNKNOW);
return SFE_TX_FAILURE_NOT_ENABLED;
@@ -1169,12 +1297,14 @@
* sfe_ipv6_notify_register()
* Register a notifier callback for IPv6 messages from SFE
*
- * @param cb The callback pointer
+ * @param one_rule_cb The callback pointer of one rule sync
+ * @param many_rule_cb The callback pointer of many rule sync
* @param app_data The application context for this message
*
* @return struct sfe_ctx_instance * The SFE context
*/
-struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t cb, void *app_data)
+struct sfe_ctx_instance *sfe_ipv6_notify_register(sfe_ipv6_msg_callback_t one_rule_cb,
+ sfe_ipv6_msg_callback_t many_rule_cb, void *app_data)
{
struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
@@ -1182,11 +1312,16 @@
/*
* Hook the shortcut sync callback.
*/
- if (cb && !sfe_ctx->ipv6_stats_sync_cb) {
+ if (one_rule_cb && !sfe_ctx->ipv6_stats_sync_cb) {
sfe_ipv6_register_sync_rule_callback(sfe_ipv6_stats_sync_callback);
}
+ rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, one_rule_cb);
- rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, cb);
+ if (many_rule_cb && !sfe_ctx->ipv6_stats_sync_many_cb) {
+ sfe_ipv6_register_many_sync_callback(sfe_ipv6_many_stats_sync_callback);
+ }
+ rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, many_rule_cb);
+
sfe_ctx->ipv6_stats_sync_data = app_data;
spin_unlock_bh(&sfe_ctx->lock);
@@ -1210,12 +1345,17 @@
if (sfe_ctx->ipv6_stats_sync_cb) {
sfe_ipv6_register_sync_rule_callback(NULL);
rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_cb, NULL);
- sfe_ctx->ipv6_stats_sync_data = NULL;
}
+
+ if (sfe_ctx->ipv6_stats_sync_many_cb) {
+ sfe_ipv6_register_many_sync_callback(NULL);
+ rcu_assign_pointer(sfe_ctx->ipv6_stats_sync_many_cb, NULL);
+ }
+
+ sfe_ctx->ipv6_stats_sync_data = NULL;
spin_unlock_bh(&sfe_ctx->lock);
sfe_clean_response_msg_by_type(sfe_ctx, SFE_MSG_TYPE_IPV6);
-
return;
}
EXPORT_SYMBOL(sfe_ipv6_notify_unregister);