Apply patch from SPF11.1.CSU1 to SPF11.3.CSU1
Update nss-dp to SPF11.3.CSU1.
Only changes from our side were in the Makefile; I've carried those
forward in this commit.
Change-Id: I9eda7242527986ad72509106e9d4f9b1f035ac67
diff --git a/Makefile b/Makefile
index 08bc6b3..c6acee8 100644
--- a/Makefile
+++ b/Makefile
@@ -5,15 +5,15 @@
obj ?= .
obj-m += qca-nss-dp.o
+
+# Cradlepoint Makefile changes begin
ccflags-y += -I$(ROOT)/qcom/drivers/switch/qca-ssdk/include/
ccflags-y += -I$(ROOT)/qcom/drivers/switch/qca-ssdk/include/
ccflags-y += -I$(ROOT)/qcom/drivers/switch/qca-ssdk/include/common
ccflags-y += -I$(ROOT)/qcom/drivers/switch/qca-ssdk/include/sal/os
ccflags-y += -I$(ROOT)/qcom/drivers/switch/qca-ssdk/include/sal/os/linux
-qca-nss-dp-objs += edma/edma_cfg.o \
- edma/edma_data_plane.o \
- edma/edma_tx_rx.o \
- nss_dp_attach.o \
+
+qca-nss-dp-objs += nss_dp_attach.o \
nss_dp_ethtools.o \
nss_dp_main.o
@@ -21,10 +21,20 @@
qca-nss-dp-objs += nss_dp_switchdev.o
endif
-qca-nss-dp-objs += gmac_hal_ops/qcom/qcom_if.o
-qca-nss-dp-objs += gmac_hal_ops/syn/syn_if.o
+ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
+qca-nss-dp-objs += hal/edma/edma_cfg.o \
+ hal/edma/edma_data_plane.o \
+ hal/edma/edma_tx_rx.o \
+ hal/gmac_hal_ops/qcom/qcom_if.o \
+ hal/gmac_hal_ops/syn/xgmac/syn_if.o
+endif
-NSS_DP_INCLUDE = -I$(obj)/include -I$(obj)/exports -I$(obj)/gmac_hal_ops/include
+NSS_DP_INCLUDE = -I$(obj)/include -I$(obj)/exports -I$(obj)/gmac_hal_ops/include \
+ -I$(obj)/hal/include
+
+ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
+NSS_DP_INCLUDE += -I$(obj)/hal/gmac_hal_ops/syn/gmac
+endif
ccflags-y += $(NSS_DP_INCLUDE)
ccflags-y += -Werror
@@ -34,9 +44,20 @@
endif
ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64))
+qca-nss-dp-objs += hal/arch/ipq60xx/nss_ipq60xx.o
ccflags-y += -DNSS_DP_IPQ60XX
endif
ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64))
+qca-nss-dp-objs += hal/arch/ipq807x/nss_ipq807x.o
ccflags-y += -DNSS_DP_IPQ807X
endif
+
+ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64))
+qca-nss-dp-objs += hal/arch/ipq50xx/nss_ipq50xx.o \
+ hal/gmac_hal_ops/syn/gmac/syn_if.o \
+ hal/syn_gmac_dp/syn_data_plane.o \
+ hal/syn_gmac_dp/syn_dp_tx_rx.o \
+ hal/syn_gmac_dp/syn_dp_cfg.o
+ccflags-y += -DNSS_DP_IPQ50XX
+endif
diff --git a/exports/nss_dp_api_if.h b/exports/nss_dp_api_if.h
index bf95190..2710b79 100644
--- a/exports/nss_dp_api_if.h
+++ b/exports/nss_dp_api_if.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -15,13 +15,24 @@
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
-/*
- * nss_dp_api_if.h
- * nss-dp exported structure/apis.
+
+/**
+ * @file nss_dp_api_if.h
+ * nss-dp exported structures/apis.
+ *
+ * This file declares all the public interfaces
+ * for NSS data-plane driver.
*/
-#ifndef __DP_API_IF_H
-#define __DP_API_IF_H
+#ifndef __NSS_DP_API_IF_H
+#define __NSS_DP_API_IF_H
+
+#include "nss_dp_arch.h"
+
+/**
+ * @addtogroup nss_dp_subsystem
+ * @{
+ */
/*
* NSS DP status
@@ -30,20 +41,41 @@
#define NSS_DP_FAILURE -1
/*
+ * NSS DP platform specific defines
+ */
+#define NSS_DP_START_IFNUM NSS_DP_HAL_START_IFNUM
+ /**< First GMAC interface number (0/1) depending on SoC. */
+#define NSS_DP_MAX_MTU_SIZE NSS_DP_HAL_MAX_MTU_SIZE
+#define NSS_DP_MAX_PACKET_LEN NSS_DP_HAL_MAX_PACKET_LEN
+#define NSS_DP_MAX_INTERFACES (NSS_DP_HAL_MAX_PORTS + NSS_DP_HAL_START_IFNUM)
+ /**< Last interface index for the SoC, to be used by qca-nss-drv. */
+
+/*
* NSS PTP service code
*/
#define NSS_PTP_EVENT_SERVICE_CODE 0x9
-/*
- * data plane context base class
+/**
+ * nss_dp_data_plane_ctx
+ * Data plane context base class.
*/
struct nss_dp_data_plane_ctx {
struct net_device *dev;
};
-/*
- * NSS data plane ops, default would be slowpath and can be overridden by
- * nss-drv
+/**
+ * nss_dp_gmac_stats
+ * The per-GMAC statistics structure.
+ */
+struct nss_dp_gmac_stats {
+ struct nss_dp_hal_gmac_stats stats;
+};
+
+/**
+ * nss_dp_data_plane_ops
+ * Per data-plane ops structure.
+ *
+ * Default would be slowpath and can be overridden by nss-drv
*/
struct nss_dp_data_plane_ops {
int (*init)(struct nss_dp_data_plane_ctx *dpc);
@@ -62,50 +94,126 @@
int (*vsi_unassign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi);
int (*rx_flow_steer)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb,
uint32_t cpu, bool is_add);
+ void (*get_stats)(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats);
int (*deinit)(struct nss_dp_data_plane_ctx *dpc);
};
-/*
- * nss_dp_receive()
+/**
+ * nss_dp_receive
+ * Called by overlay drivers to deliver packets to nss-dp.
+ *
+ * @datatypes
+ * net_device
+ * sk_buff
+ * napi_struct
+ *
+ * @param[in] netdev Pointer to netdev structure on which packet is received.
+ * @param[in] skb Pointer to the received packet.
+ * @param[in] napi Pointer to napi context.
*/
void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb,
struct napi_struct *napi);
-/*
- * nss_dp_is_in_open_state()
+/**
+ * nss_dp_is_in_open_state
+ * Returns if a data plane is opened or not.
+ *
+ * @datatypes
+ * net_device
+ *
+ * @param[in] netdev Pointer to netdev structure.
+ *
+ * @return
+ * bool
*/
bool nss_dp_is_in_open_state(struct net_device *netdev);
-/*
- * nss_dp_override_data_palne()
+/**
+ * nss_dp_override_data_palne
+ * API to allow overlay drivers to override the data plane.
+ *
+ * @datatypes
+ * net_device
+ * nss_dp_data_plane_ops
+ * nss_dp_data_plane_ctx
+ *
+ * @param[in] netdev Pointer to netdev structure.
+ * @param[in] dp_ops Pointer to respective data plane ops structure.
+ * @param[in] dpc Pointer to data plane context.
+ *
+ * @return
+ * int
*/
int nss_dp_override_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ops *dp_ops,
struct nss_dp_data_plane_ctx *dpc);
-/*
- * nss_dp_start_data_plane()
+/**
+ * nss_dp_start_data_plane
+ * Dataplane API to inform netdev when it is ready to start.
+ *
+ * @datatypes
+ * net_device
+ * nss_dp_data_plane_ctx
+ *
+ * @param[in] netdev Pointer to netdev structure.
+ * @param[in] dpc Pointer to data plane context.
*/
void nss_dp_start_data_plane(struct net_device *netdev,
struct nss_dp_data_plane_ctx *dpc);
-/*
- * nss_dp_restore_data_plane()
+/**
+ * nss_dp_restore_data_plane
+ * Called by overlay drivers to detach itself from nss-dp.
+ *
+ * @datatypes
+ * net_device
+ *
+ * @param[in] netdev Pointer to netdev structure.
*/
void nss_dp_restore_data_plane(struct net_device *netdev);
-/*
- * nss_dp_get_netdev_by_macid()
+/**
+ * nss_dp_get_netdev_by_nss_if_num
+ * Returns the net device of the corresponding id if it exists.
+ *
+ * @datatypes
+ * int
+ *
+ * @param[in] interface ID of the physical mac port.
+ *
+ * @return
+ * Pointer to netdev structure.
*/
-struct net_device *nss_dp_get_netdev_by_macid(int macid);
+struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num);
-/*
- * nss_phy_tstamp_rx_buf()
+/**
+ * nss_phy_tstamp_rx_buf
+ * Receive timestamp packet.
+ *
+ * @datatypes
+ * sk_buff
+ *
+ * @param[in] app_data Pointer to the application context of the message.
+ * @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_rx_buf(void *app_data, struct sk_buff *skb);
-/*
- * nss_phy_tstamp_tx_buf()
+/**
+ * nss_phy_tstamp_tx_buf
+ * Transmit timestamp packet
+ *
+ * @datatypes
+ * net_device
+ * sk_buff
+ *
+ * @param[in] net_device Pointer to netdev structure.
+ * @param[in] skb Pointer to the packet.
*/
void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb);
-#endif /* __DP_API_IF_H */
+
+/**
+ *@}
+ */
+
+#endif /** __NSS_DP_API_IF_H */
diff --git a/hal/arch/ipq50xx/nss_ipq50xx.c b/hal/arch/ipq50xx/nss_ipq50xx.c
new file mode 100644
index 0000000..ba32ae8
--- /dev/null
+++ b/hal/arch/ipq50xx/nss_ipq50xx.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/ioport.h>
+#include <linux/qcom_scm.h>
+#include "nss_dp_hal.h"
+
+/*
+ * nss_dp_hal_tcsr_base_get
+ * Reads TCSR base address from DTS
+ */
+static uint32_t nss_dp_hal_tcsr_base_get(void)
+{
+ uint32_t tcsr_base_addr = 0;
+ struct device_node *dp_cmn;
+
+ /*
+ * Get reference to NSS dp common device node
+ */
+ dp_cmn = of_find_node_by_name(NULL, "nss-dp-common");
+ if (!dp_cmn) {
+ pr_info("%s: NSS DP common node not found\n", __func__);
+ return 0;
+ }
+
+ if (of_property_read_u32(dp_cmn, "qcom,tcsr-base", &tcsr_base_addr)) {
+ pr_err("%s: error reading TCSR base\n", __func__);
+ }
+ of_node_put(dp_cmn);
+
+ return tcsr_base_addr;
+}
+
+/*
+ * nss_dp_hal_tcsr_set
+ * Sets the TCSR axi cache override register
+ */
+static void nss_dp_hal_tcsr_set(void)
+{
+ void __iomem *tcsr_addr = NULL;
+ uint32_t tcsr_base;
+ int err;
+
+ tcsr_base = nss_dp_hal_tcsr_base_get();
+ if (!tcsr_base) {
+ pr_err("%s: Unable to get TCSR base address\n", __func__);
+ return;
+ }
+
+ /*
+ * Check if Trust Zone is enabled in the system.
+ * If yes, we need to go through SCM API call to program TCSR register.
+ * If TZ is not enabled, we can write to the register directly.
+ */
+ if (qcom_scm_is_available()) {
+ err = qcom_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
+ TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE);
+ if (err) {
+ pr_err("%s: SCM TCSR write error: %d\n", __func__, err);
+ }
+ } else {
+ tcsr_addr = ioremap_nocache((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET),
+ TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE);
+ if (!tcsr_addr) {
+ pr_err("%s: ioremap failed\n", __func__);
+ return;
+ }
+ writel(TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE, tcsr_addr);
+ iounmap(tcsr_addr);
+ }
+}
+
+/*
+ * nss_dp_hal_get_data_plane_ops
+ * Return the data plane ops for GMAC data plane.
+ */
+struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
+{
+ return &nss_dp_gmac_ops;
+}
+
+/*
+ * nss_dp_hal_clk_enable
+ * Function to enable GCC_SNOC_GMAC_AXI_CLK.
+ *
+ * These clocks are required for GMAC operations.
+ */
+void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv)
+{
+ struct platform_device *pdev = dp_priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct clk *gmac_clk = NULL;
+ int err;
+
+ gmac_clk = devm_clk_get(dev, NSS_SNOC_GMAC_AXI_CLK);
+ if (IS_ERR(gmac_clk)) {
+ pr_err("%s: cannot get clock: %s\n", __func__,
+ NSS_SNOC_GMAC_AXI_CLK);
+ return;
+ }
+
+ err = clk_prepare_enable(gmac_clk);
+ if (err) {
+ pr_err("%s: cannot enable clock: %s, err: %d\n", __func__,
+ NSS_SNOC_GMAC_AXI_CLK, err);
+ return;
+ }
+}
+
+/*
+ * nss_dp_hal_init
+ * Sets the gmac ops based on the GMAC type.
+ */
+bool nss_dp_hal_init(void)
+{
+ nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_GMAC);
+
+ /*
+ * Program the global GMAC AXI Cache override register
+ * for optimized AXI DMA operation.
+ */
+ nss_dp_hal_tcsr_set();
+ return true;
+}
+
+/*
+ * nss_dp_hal_cleanup
+ * Sets the gmac ops to NULL.
+ */
+void nss_dp_hal_cleanup(void)
+{
+ nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_GMAC);
+}
diff --git a/hal/arch/ipq50xx/nss_ipq50xx.h b/hal/arch/ipq50xx/nss_ipq50xx.h
new file mode 100644
index 0000000..cae6407
--- /dev/null
+++ b/hal/arch/ipq50xx/nss_ipq50xx.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __NSS_DP_ARCH_H__
+#define __NSS_DP_ARCH_H__
+
+#define NSS_DP_HAL_MAX_PORTS 2
+#define NSS_DP_HAL_CPU_NUM 2
+#define NSS_DP_HAL_START_IFNUM 0
+#define NSS_DP_GMAC_NORMAL_FRAME_MTU 1500
+#define NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU 1978
+#define NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU 9000
+#define NSS_DP_HAL_MAX_MTU_SIZE NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU
+#define NSS_DP_HAL_MAX_PACKET_LEN 65535
+
+/*
+ * TCSR_GMAC_AXI_CACHE_OVERRIDE register size
+ */
+#define TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE 4
+
+/*
+ * TCSR_GMAC_AXI_CACHE_OVERRIDE Register offset
+ */
+#define TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET 0x6224
+
+/*
+ * Value for TCSR_GMAC_AXI_CACHE_OVERRIDE register
+ */
+#define TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE 0x05050505
+
+/*
+ * GCC_SNOC_GMAC_AXI_CLOCK
+ */
+#define NSS_SNOC_GMAC_AXI_CLK "nss-snoc-gmac-axi-clk"
+
+/**
+ * nss_dp_hal_gmac_stats
+ * The per-GMAC statistics structure.
+ */
+struct nss_dp_hal_gmac_stats {
+ uint64_t rx_bytes; /**< Number of RX bytes */
+ uint64_t rx_packets; /**< Number of RX packets */
+ uint64_t rx_errors; /**< Number of RX errors */
+ uint64_t rx_receive_errors; /**< Number of RX receive errors */
+ uint64_t rx_descriptor_errors; /**< Number of RX descriptor errors */
+ uint64_t rx_late_collision_errors;
+ /**< Number of RX late collision errors */
+ uint64_t rx_dribble_bit_errors; /**< Number of RX dribble bit errors */
+ uint64_t rx_length_errors; /**< Number of RX length errors */
+ uint64_t rx_ip_header_errors; /**< Number of RX IP header errors read from rxdec */
+ uint64_t rx_ip_payload_errors; /**< Number of RX IP payload errors */
+ uint64_t rx_no_buffer_errors; /**< Number of RX no-buffer errors */
+ uint64_t rx_transport_csum_bypassed;
+ /**< Number of RX packets where the transport checksum was bypassed */
+ uint64_t tx_bytes; /**< Number of TX bytes */
+ uint64_t tx_packets; /**< Number of TX packets */
+ uint64_t tx_collisions; /**< Number of TX collisions */
+ uint64_t tx_errors; /**< Number of TX errors */
+ uint64_t tx_jabber_timeout_errors;
+ /**< Number of TX jabber timeout errors */
+ uint64_t tx_frame_flushed_errors;
+ /**< Number of TX frame flushed errors */
+ uint64_t tx_loss_of_carrier_errors;
+ /**< Number of TX loss of carrier errors */
+ uint64_t tx_no_carrier_errors; /**< Number of TX no carrier errors */
+ uint64_t tx_late_collision_errors;
+ /**< Number of TX late collision errors */
+ uint64_t tx_excessive_collision_errors;
+ /**< Number of TX excessive collision errors */
+ uint64_t tx_excessive_deferral_errors;
+ /**< Number of TX excessive deferral errors */
+ uint64_t tx_underflow_errors; /**< Number of TX underflow errors */
+ uint64_t tx_ip_header_errors; /**< Number of TX IP header errors */
+ uint64_t tx_ip_payload_errors; /**< Number of TX IP payload errors */
+ uint64_t tx_dropped; /**< Number of TX dropped packets */
+ uint64_t hw_errs[10]; /**< GMAC DMA error counters */
+ uint64_t rx_missed; /**< Number of RX packets missed by the DMA */
+ uint64_t fifo_overflows; /**< Number of RX FIFO overflows signalled by the DMA */
+ uint64_t rx_scatter_errors; /**< Number of scattered frames received by the DMA */
+ uint64_t tx_ts_create_errors; /**< Number of tx timestamp creation errors */
+ uint64_t gmac_total_ticks; /**< Total clock ticks spend inside the GMAC */
+ uint64_t gmac_worst_case_ticks; /**< Worst case iteration of the GMAC in ticks */
+ uint64_t gmac_iterations; /**< Number of iterations around the GMAC */
+ uint64_t tx_pause_frames; /**< Number of pause frames sent by the GMAC */
+ uint64_t mmc_rx_overflow_errors;
+ /**< Number of RX overflow errors */
+ uint64_t mmc_rx_watchdog_timeout_errors;
+ /**< Number of RX watchdog timeout errors */
+ uint64_t mmc_rx_crc_errors; /**< Number of RX CRC errors */
+ uint64_t mmc_rx_ip_header_errors;
+ /**< Number of RX IP header errors read from MMC counter*/
+ uint64_t mmc_rx_octets_g;
+ /**< Number of good octets received */
+ uint64_t mmc_rx_ucast_frames; /**< Number of Unicast frames received */
+ uint64_t mmc_rx_bcast_frames; /**< Number of Bcast frames received */
+ uint64_t mmc_rx_mcast_frames; /**< Number of Mcast frames received */
+ uint64_t mmc_rx_undersize;
+ /**< Number of RX undersize frames */
+ uint64_t mmc_rx_oversize;
+ /**< Number of RX oversize frames */
+ uint64_t mmc_rx_jabber; /**< Number of jabber frames */
+ uint64_t mmc_rx_octets_gb;
+ /**< Number of good/bad octets */
+ uint64_t mmc_rx_frag_frames_g; /**< Number of good ipv4 frag frames */
+ uint64_t mmc_tx_octets_g; /**< Number of good octets sent */
+ uint64_t mmc_tx_ucast_frames; /**< Number of Unicast frames sent*/
+ uint64_t mmc_tx_bcast_frames; /**< Number of Broadcast frames sent */
+ uint64_t mmc_tx_mcast_frames; /**< Number of Multicast frames sent */
+ uint64_t mmc_tx_deferred; /**< Number of Deferred frames sent */
+ uint64_t mmc_tx_single_col; /**< Number of single collisions */
+ uint64_t mmc_tx_multiple_col; /**< Number of multiple collisions */
+ uint64_t mmc_tx_octets_gb; /**< Number of good/bad octets sent*/
+};
+
+extern struct nss_dp_data_plane_ops nss_dp_gmac_ops;
+
+#endif /* __NSS_DP_ARCH_H__ */
diff --git a/hal/arch/ipq60xx/nss_ipq60xx.c b/hal/arch/ipq60xx/nss_ipq60xx.c
new file mode 100644
index 0000000..dab4276
--- /dev/null
+++ b/hal/arch/ipq60xx/nss_ipq60xx.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "nss_dp_hal.h"
+#include "edma.h"
+
+/*
+ * nss_dp_hal_get_data_plane_ops()
+ * Return the data plane ops for edma data plane.
+ */
+struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
+{
+ return &nss_dp_edma_ops;
+}
+
+/*
+ * nss_dp_hal_init()
+ * Initialize EDMA and set gmac ops.
+ */
+bool nss_dp_hal_init(void)
+{
+ nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
+ nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
+
+ if (edma_init()) {
+ return false;
+ }
+ return true;
+}
+
+/*
+ * nss_dp_hal_cleanup()
+ * Cleanup EDMA and set gmac ops to NULL.
+ */
+void nss_dp_hal_cleanup(void)
+{
+ nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
+ nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
+ edma_cleanup(false);
+}
diff --git a/hal/arch/ipq60xx/nss_ipq60xx.h b/hal/arch/ipq60xx/nss_ipq60xx.h
new file mode 100644
index 0000000..26dc767
--- /dev/null
+++ b/hal/arch/ipq60xx/nss_ipq60xx.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __NSS_DP_ARCH_H__
+#define __NSS_DP_ARCH_H__
+
+#define NSS_DP_HAL_MAX_PORTS 5
+#define NSS_DP_HAL_CPU_NUM 4
+#define NSS_DP_HAL_START_IFNUM 1
+#define NSS_DP_HAL_MAX_MTU_SIZE 9216
+#define NSS_DP_HAL_MAX_PACKET_LEN 65535
+#define NSS_DP_PREHEADER_SIZE 32
+
+/**
+ * nss_dp_hal_gmac_stats
+ * The per-GMAC statistics structure.
+ */
+struct nss_dp_hal_gmac_stats {
+};
+
+#endif /* __NSS_DP_ARCH_H__ */
diff --git a/hal/arch/ipq807x/nss_ipq807x.c b/hal/arch/ipq807x/nss_ipq807x.c
new file mode 100644
index 0000000..dab4276
--- /dev/null
+++ b/hal/arch/ipq807x/nss_ipq807x.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "nss_dp_hal.h"
+#include "edma.h"
+
+/*
+ * nss_dp_hal_get_data_plane_ops()
+ * Return the data plane ops for edma data plane.
+ */
+struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void)
+{
+ return &nss_dp_edma_ops;
+}
+
+/*
+ * nss_dp_hal_init()
+ * Initialize EDMA and set gmac ops.
+ */
+bool nss_dp_hal_init(void)
+{
+ nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM);
+ nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC);
+
+ if (edma_init()) {
+ return false;
+ }
+ return true;
+}
+
+/*
+ * nss_dp_hal_cleanup()
+ * Cleanup EDMA and set gmac ops to NULL.
+ */
+void nss_dp_hal_cleanup(void)
+{
+ nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM);
+ nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC);
+ edma_cleanup(false);
+}
diff --git a/hal/arch/ipq807x/nss_ipq807x.h b/hal/arch/ipq807x/nss_ipq807x.h
new file mode 100644
index 0000000..6926e56
--- /dev/null
+++ b/hal/arch/ipq807x/nss_ipq807x.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __NSS_DP_ARCH_H__
+#define __NSS_DP_ARCH_H__
+
+#define NSS_DP_HAL_MAX_PORTS 6
+#define NSS_DP_HAL_CPU_NUM 4
+#define NSS_DP_HAL_START_IFNUM 1
+#define NSS_DP_HAL_MAX_MTU_SIZE 9216
+#define NSS_DP_HAL_MAX_PACKET_LEN 65535
+#define NSS_DP_PREHEADER_SIZE 32
+
+/**
+ * nss_dp_hal_gmac_stats
+ * The per-GMAC statistics structure.
+ */
+struct nss_dp_hal_gmac_stats {
+};
+
+#endif /* __NSS_DP_ARCH_H__ */
diff --git a/edma/edma_cfg.c b/hal/edma/edma_cfg.c
similarity index 99%
rename from edma/edma_cfg.c
rename to hal/edma/edma_cfg.c
index 0a04328..d2563c8 100644
--- a/edma/edma_cfg.c
+++ b/hal/edma/edma_cfg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/edma/edma_data_plane.c b/hal/edma/edma_data_plane.c
similarity index 98%
rename from edma/edma_data_plane.c
rename to hal/edma/edma_data_plane.c
index d91c40a..d51c7f0 100644
--- a/edma/edma_data_plane.c
+++ b/hal/edma/edma_data_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -158,7 +158,7 @@
* Check for non-linear skb
*/
if (skb_is_nonlinear(skb)) {
- netdev_dbg(netdev, "cannot Tx non-linear skb:%p\n", skb);
+ netdev_dbg(netdev, "cannot Tx non-linear skb:%px\n", skb);
goto drop;
}
@@ -178,7 +178,7 @@
* Expand the skb. This also unclones a cloned skb.
*/
if (expand_skb && pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC)) {
- netdev_dbg(netdev, "cannot expand skb:%p\n", skb);
+ netdev_dbg(netdev, "cannot expand skb:%px\n", skb);
goto drop;
}
@@ -501,7 +501,7 @@
static int edma_register_netdevice(struct net_device *netdev, uint32_t macid)
{
if (!netdev) {
- pr_info("nss_dp_edma: Invalid netdev pointer %p\n", netdev);
+ pr_info("nss_dp_edma: Invalid netdev pointer %px\n", netdev);
return -EINVAL;
}
@@ -594,7 +594,6 @@
* edma_of_get_pdata()
* Read the device tree details for EDMA
*/
-
static int edma_of_get_pdata(struct resource *edma_res)
{
/*
@@ -613,7 +612,7 @@
*/
edma_hw.pdev = of_find_device_by_node(edma_hw.device_node);
if (!edma_hw.pdev) {
- pr_warn("Platform device for node %p(%s) not found\n",
+ pr_warn("Platform device for node %px(%s) not found\n",
edma_hw.device_node,
(edma_hw.device_node)->name);
return -EINVAL;
@@ -725,7 +724,7 @@
*/
if (edma_of_get_pdata(&res_edma) < 0) {
pr_warn("Unable to get EDMA DTS data.\n");
- return 0;
+ return -EINVAL;
}
/*
diff --git a/edma/edma_data_plane.h b/hal/edma/edma_data_plane.h
similarity index 97%
rename from edma/edma_data_plane.h
rename to hal/edma/edma_data_plane.h
index e17eb26..226c024 100644
--- a/edma/edma_data_plane.h
+++ b/hal/edma/edma_data_plane.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2018-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -28,8 +28,8 @@
#define EDMA_TX_PREHDR_SIZE (sizeof(struct edma_tx_preheader))
#define EDMA_RING_SIZE 128
#define EDMA_NAPI_WORK 100
-#define EDMA_START_GMACS NSS_DP_START_PHY_PORT
-#define EDMA_MAX_GMACS NSS_DP_MAX_PHY_PORTS
+#define EDMA_START_GMACS NSS_DP_START_IFNUM
+#define EDMA_MAX_GMACS NSS_DP_HAL_MAX_PORTS
#define EDMA_TX_PKT_MIN_SIZE 33
#if defined(NSS_DP_IPQ60XX)
#define EDMA_MAX_TXCMPL_RINGS 24 /* Max TxCmpl rings */
diff --git a/edma/edma_regs.h b/hal/edma/edma_regs.h
similarity index 99%
rename from edma/edma_regs.h
rename to hal/edma/edma_regs.h
index 4ebffc3..e724cc7 100644
--- a/edma/edma_regs.h
+++ b/hal/edma/edma_regs.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016,2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
diff --git a/edma/edma_tx_rx.c b/hal/edma/edma_tx_rx.c
similarity index 95%
rename from edma/edma_tx_rx.c
rename to hal/edma/edma_tx_rx.c
index 0443def..eeae567 100644
--- a/edma/edma_tx_rx.c
+++ b/hal/edma/edma_tx_rx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -14,7 +14,9 @@
* USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/version.h>
#include <linux/interrupt.h>
+#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/debugfs.h>
@@ -182,7 +184,7 @@
len = skb_headlen(skb);
daddr = (dma_addr_t)virt_to_phys(skb->data);
- pr_debug("skb:%p cons_idx:%d prod_idx:%d word1:0x%x\n",
+ pr_debug("skb:%px cons_idx:%d prod_idx:%d word1:0x%x\n",
skb, cons_idx, prod_idx, txcmpl->status);
dma_unmap_single(&pdev->dev, daddr,
@@ -324,7 +326,7 @@
src_port_num = rxph->src_info &
EDMA_PREHDR_PORTNUM_BITS;
} else {
- pr_warn("WARN: src_info_type:0x%x. Drop skb:%p\n",
+ pr_warn("WARN: src_info_type:0x%x. Drop skb:%px\n",
EDMA_RXPH_SRC_INFO_TYPE_GET(rxph), skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
@@ -335,9 +337,9 @@
*/
pkt_length = rxdesc_desc->status & EDMA_RXDESC_PACKET_LEN_MASK;
- if (unlikely((src_port_num < NSS_DP_START_PHY_PORT) ||
- (src_port_num > NSS_DP_MAX_PHY_PORTS))) {
- pr_warn("WARN: Port number error :%d. Drop skb:%p\n",
+ if (unlikely((src_port_num < NSS_DP_START_IFNUM) ||
+ (src_port_num > NSS_DP_HAL_MAX_PORTS))) {
+ pr_warn("WARN: Port number error :%d. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
@@ -351,7 +353,7 @@
*/
ndev = ehw->netdev_arr[src_port_num - 1];
if (unlikely(!ndev)) {
- pr_warn("WARN: netdev Null src_info_type:0x%x. Drop skb:%p\n",
+ pr_warn("WARN: netdev Null src_info_type:0x%x. Drop skb:%px\n",
src_port_num, skb);
dev_kfree_skb_any(skb);
goto next_rx_desc;
@@ -375,12 +377,19 @@
skb_put(skb, pkt_length);
skb->protocol = eth_type_trans(skb, skb->dev);
#ifdef CONFIG_NET_SWITCHDEV
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = ndev->offload_fwd_mark;
- pr_debug("skb:%p ring_idx:%u pktlen:%d proto:0x%x mark:%u\n",
+#else
+ /*
+ * TODO: Implement ndo_get_devlink_port()
+ */
+ skb->offload_fwd_mark = 0;
+#endif
+ pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x mark:%u\n",
skb, cons_idx, pkt_length, skb->protocol,
skb->offload_fwd_mark);
#else
- pr_debug("skb:%p ring_idx:%u pktlen:%d proto:0x%x\n",
+ pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x\n",
skb, cons_idx, pkt_length, skb->protocol);
#endif
/*
@@ -616,7 +625,7 @@
txdesc->word1 |= ((buf_len & EDMA_TXDESC_DATA_LENGTH_MASK)
<< EDMA_TXDESC_DATA_LENGTH_SHIFT);
- netdev_dbg(netdev, "skb:%p tx_ring:%u proto:0x%x\n",
+ netdev_dbg(netdev, "skb:%px tx_ring:%u proto:0x%x\n",
skb, txdesc_ring->id, ntohs(skb->protocol));
netdev_dbg(netdev, "port:%u prod_idx:%u cons_idx:%u\n",
dp_dev->macid, hw_next_to_use, hw_next_to_clean);
diff --git a/gmac_hal_ops/qcom/qcom_dev.h b/hal/gmac_hal_ops/qcom/qcom_dev.h
similarity index 99%
rename from gmac_hal_ops/qcom/qcom_dev.h
rename to hal/gmac_hal_ops/qcom/qcom_dev.h
index 3ed9d15..79da086 100644
--- a/gmac_hal_ops/qcom/qcom_dev.h
+++ b/hal/gmac_hal_ops/qcom/qcom_dev.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
diff --git a/gmac_hal_ops/qcom/qcom_if.c b/hal/gmac_hal_ops/qcom/qcom_if.c
similarity index 97%
rename from gmac_hal_ops/qcom/qcom_if.c
rename to hal/gmac_hal_ops/qcom/qcom_if.c
index 208c664..b9b5968 100644
--- a/gmac_hal_ops/qcom/qcom_if.c
+++ b/hal/gmac_hal_ops/qcom/qcom_if.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -162,7 +162,6 @@
qcom_clear_tx_flow_ctrl(nghd);
}
-
/*
* qcom_get_mib_stats()
*/
@@ -272,14 +271,14 @@
case ETH_SS_STATS:
for (i = 0; i < QCOM_STATS_LEN; i++) {
memcpy(data, qcom_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ strlen(qcom_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < QCOM_PRIV_FLAGS_LEN; i++) {
memcpy(data, qcom_strings_priv_flags[i],
- ETH_GSTRING_LEN);
+ strlen(qcom_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
break;
@@ -343,7 +342,7 @@
qcom_tx_enable(nghd);
qcom_rx_enable(nghd);
- netdev_dbg(nghd->netdev, "%s: mac_base:0x%p mac_enable:0x%x\n",
+ netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
@@ -358,7 +357,7 @@
qcom_tx_disable(nghd);
qcom_rx_disable(nghd);
- netdev_dbg(nghd->netdev, "%s: mac_base:0x%p mac_enable:0x%x\n",
+ netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n",
__func__, nghd->mac_base,
hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE));
return 0;
@@ -410,7 +409,7 @@
spin_lock_init(&qhd->nghd.slock);
- netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%p\n",
+ netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
qhd->nghd.mac_base);
diff --git a/gmac_hal_ops/qcom/qcom_reg.h b/hal/gmac_hal_ops/qcom/qcom_reg.h
similarity index 98%
rename from gmac_hal_ops/qcom/qcom_reg.h
rename to hal/gmac_hal_ops/qcom/qcom_reg.h
index 98fef59..9210c2a 100644
--- a/gmac_hal_ops/qcom/qcom_reg.h
+++ b/hal/gmac_hal_ops/qcom/qcom_reg.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
diff --git a/hal/gmac_hal_ops/syn/gmac/syn_dev.h b/hal/gmac_hal_ops/syn/gmac/syn_dev.h
new file mode 100644
index 0000000..0bfec1b
--- /dev/null
+++ b/hal/gmac_hal_ops/syn/gmac/syn_dev.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __SYN_DEV_H__
+#define __SYN_DEV_H__
+
+#include <nss_dp_dev.h>
+
+/*
+ * Subclass for base nss_gmac_hal_dev
+ */
+struct syn_hal_dev {
+ struct nss_gmac_hal_dev nghd; /* Base class */
+ struct nss_dp_gmac_stats stats; /* Stats structure */
+};
+
+#endif /*__SYN_DEV_H__*/
diff --git a/hal/gmac_hal_ops/syn/gmac/syn_if.c b/hal/gmac_hal_ops/syn/gmac/syn_if.c
new file mode 100644
index 0000000..2601ff2
--- /dev/null
+++ b/hal/gmac_hal_ops/syn/gmac/syn_if.c
@@ -0,0 +1,959 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <fal/fal_mib.h>
+#include <fal/fal_port_ctrl.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <nss_dp_hal.h>
+#include "syn_dev.h"
+#include "syn_reg.h"
+
+#define SYN_STAT(m) offsetof(struct nss_dp_hal_gmac_stats, m)
+#define HW_ERR_SIZE sizeof(uint64_t)
+
+/*
+ * Array to store ethtool statistics
+ */
+struct syn_ethtool_stats {
+ uint8_t stat_string[ETH_GSTRING_LEN];
+ uint64_t stat_offset;
+};
+
+/*
+ * Array of strings describing statistics
+ */
+static const struct syn_ethtool_stats syn_gstrings_stats[] = {
+ {"rx_bytes", SYN_STAT(rx_bytes)},
+ {"rx_packets", SYN_STAT(rx_packets)},
+ {"rx_errors", SYN_STAT(rx_errors)},
+ {"rx_receive_errors", SYN_STAT(rx_receive_errors)},
+ {"rx_descriptor_errors", SYN_STAT(rx_descriptor_errors)},
+ {"rx_late_collision_errors", SYN_STAT(rx_late_collision_errors)},
+ {"rx_dribble_bit_errors", SYN_STAT(rx_dribble_bit_errors)},
+ {"rx_length_errors", SYN_STAT(rx_length_errors)},
+ {"rx_ip_header_errors", SYN_STAT(rx_ip_header_errors)},
+ {"rx_ip_payload_errors", SYN_STAT(rx_ip_payload_errors)},
+ {"rx_no_buffer_errors", SYN_STAT(rx_no_buffer_errors)},
+ {"rx_transport_csum_bypassed", SYN_STAT(rx_transport_csum_bypassed)},
+ {"tx_bytes", SYN_STAT(tx_bytes)},
+ {"tx_packets", SYN_STAT(tx_packets)},
+ {"tx_collisions", SYN_STAT(tx_collisions)},
+ {"tx_errors", SYN_STAT(tx_errors)},
+ {"tx_jabber_timeout_errors", SYN_STAT(tx_jabber_timeout_errors)},
+ {"tx_frame_flushed_errors", SYN_STAT(tx_frame_flushed_errors)},
+ {"tx_loss_of_carrier_errors", SYN_STAT(tx_loss_of_carrier_errors)},
+ {"tx_no_carrier_errors", SYN_STAT(tx_no_carrier_errors)},
+ {"tx_late_collision_errors", SYN_STAT(tx_late_collision_errors)},
+ {"tx_excessive_collision_errors", SYN_STAT(tx_excessive_collision_errors)},
+ {"tx_excessive_deferral_errors", SYN_STAT(tx_excessive_deferral_errors)},
+ {"tx_underflow_errors", SYN_STAT(tx_underflow_errors)},
+ {"tx_ip_header_errors", SYN_STAT(tx_ip_header_errors)},
+ {"tx_ip_payload_errors", SYN_STAT(tx_ip_payload_errors)},
+ {"tx_dropped", SYN_STAT(tx_dropped)},
+ {"rx_missed", SYN_STAT(rx_missed)},
+ {"fifo_overflows", SYN_STAT(fifo_overflows)},
+ {"rx_scatter_errors", SYN_STAT(rx_scatter_errors)},
+ {"tx_ts_create_errors", SYN_STAT(tx_ts_create_errors)},
+ {"pmt_interrupts", SYN_STAT(hw_errs[0])},
+ {"mmc_interrupts", SYN_STAT(hw_errs[0]) + (1 * HW_ERR_SIZE)},
+ {"line_interface_interrupts", SYN_STAT(hw_errs[0]) + (2 * HW_ERR_SIZE)},
+ {"fatal_bus_error_interrupts", SYN_STAT(hw_errs[0]) + (3 * HW_ERR_SIZE)},
+ {"rx_buffer_unavailable_interrupts", SYN_STAT(hw_errs[0]) + (4 * HW_ERR_SIZE)},
+ {"rx_process_stopped_interrupts", SYN_STAT(hw_errs[0]) + (5 * HW_ERR_SIZE)},
+ {"tx_underflow_interrupts", SYN_STAT(hw_errs[0]) + (6 * HW_ERR_SIZE)},
+ {"rx_overflow_interrupts", SYN_STAT(hw_errs[0]) + (7 * HW_ERR_SIZE)},
+ {"tx_jabber_timeout_interrutps", SYN_STAT(hw_errs[0]) + (8 * HW_ERR_SIZE)},
+ {"tx_process_stopped_interrutps", SYN_STAT(hw_errs[0]) + (9 * HW_ERR_SIZE)},
+ {"gmac_total_ticks", SYN_STAT(gmac_total_ticks)},
+ {"gmac_worst_case_ticks", SYN_STAT(gmac_worst_case_ticks)},
+ {"gmac_iterations", SYN_STAT(gmac_iterations)},
+ {"tx_pause_frames", SYN_STAT(tx_pause_frames)},
+ {"mmc_rx_overflow_errors", SYN_STAT(mmc_rx_overflow_errors)},
+ {"mmc_rx_watchdog_timeout_errors", SYN_STAT(mmc_rx_watchdog_timeout_errors)},
+ {"mmc_rx_crc_errors", SYN_STAT(mmc_rx_crc_errors)},
+ {"mmc_rx_ip_header_errors", SYN_STAT(mmc_rx_ip_header_errors)},
+ {"mmc_rx_octets_g", SYN_STAT(mmc_rx_octets_g)},
+ {"mmc_rx_ucast_frames", SYN_STAT(mmc_rx_ucast_frames)},
+ {"mmc_rx_bcast_frames", SYN_STAT(mmc_rx_bcast_frames)},
+ {"mmc_rx_mcast_frames", SYN_STAT(mmc_rx_mcast_frames)},
+ {"mmc_rx_undersize", SYN_STAT(mmc_rx_undersize)},
+ {"mmc_rx_oversize", SYN_STAT(mmc_rx_oversize)},
+ {"mmc_rx_jabber", SYN_STAT(mmc_rx_jabber)},
+ {"mmc_rx_octets_gb", SYN_STAT(mmc_rx_octets_gb)},
+ {"mmc_rx_frag_frames_g", SYN_STAT(mmc_rx_frag_frames_g)},
+ {"mmc_tx_octets_g", SYN_STAT(mmc_tx_octets_g)},
+ {"mmc_tx_ucast_frames", SYN_STAT(mmc_tx_ucast_frames)},
+ {"mmc_tx_bcast_frames", SYN_STAT(mmc_tx_bcast_frames)},
+ {"mmc_tx_mcast_frames", SYN_STAT(mmc_tx_mcast_frames)},
+ {"mmc_tx_deferred", SYN_STAT(mmc_tx_deferred)},
+ {"mmc_tx_single_col", SYN_STAT(mmc_tx_single_col)},
+ {"mmc_tx_multiple_col", SYN_STAT(mmc_tx_multiple_col)},
+ {"mmc_tx_octets_gb", SYN_STAT(mmc_tx_octets_gb)},
+};
+
+#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
+
+/*
+ * syn_set_rx_flow_ctrl()
+ */
+static inline void syn_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
+ SYN_MAC_FC_RX_FLOW_CONTROL);
+}
+
+/*
+ * syn_clear_rx_flow_ctrl()
+ */
+static inline void syn_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
+{
+ hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
+ SYN_MAC_FC_RX_FLOW_CONTROL);
+
+}
+
+/*
+ * syn_set_tx_flow_ctrl()
+ */
+static inline void syn_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
+ SYN_MAC_FC_TX_FLOW_CONTROL);
+}
+
+/*
+ * syn_send_tx_pause_frame()
+ */
+static inline void syn_send_tx_pause_frame(struct nss_gmac_hal_dev *nghd)
+{
+ syn_set_tx_flow_ctrl(nghd);
+ hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
+ SYN_MAC_FC_SEND_PAUSE_FRAME);
+}
+
+/*
+ * syn_clear_tx_flow_ctrl()
+ */
+static inline void syn_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd)
+{
+ hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL,
+ SYN_MAC_FC_TX_FLOW_CONTROL);
+}
+
+/*
+ * syn_rx_enable()
+ */
+static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_RX);
+ hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
+}
+
+/*
+ * syn_tx_enable()
+ */
+static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_TX);
+}
+
+/************Ip checksum offloading APIs*************/
+
+/*
+ * syn_enable_rx_chksum_offload()
+ * Enable IPv4 header and IPv4/IPv6 TCP/UDP checksum calculation by GMAC.
+ */
+static inline void syn_enable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd,
+ SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
+}
+
+/*
+ * syn_disable_rx_chksum_offload()
+ * Disable the IP checksum offloading in receive path.
+ */
+static inline void syn_disable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd)
+{
+ hal_clear_reg_bits(nghd,
+ SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD);
+}
+
+/*
+ * syn_rx_tcpip_chksum_drop_enable()
+ * Instruct the DMA to drop the packets that fail TCP/IP checksum.
+ *
+ * This is to instruct the receive DMA engine to drop the recevied
+ * packet if they fails the tcp/ip checksum in hardware. Valid only when
+ * full checksum offloading is enabled(type-2).
+ */
+static inline void syn_rx_tcpip_chksum_drop_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_clear_reg_bits(nghd,
+ SYN_DMA_OPERATION_MODE, SYN_DMA_DISABLE_DROP_TCP_CS);
+}
+
+/*******************Ip checksum offloading APIs**********************/
+
+/*
+ * syn_ipc_offload_init()
+ * Initialize IPC Checksum offloading.
+ */
+static inline void syn_ipc_offload_init(struct nss_gmac_hal_dev *nghd)
+{
+ struct nss_dp_dev *dp_priv;
+ dp_priv = netdev_priv(nghd->netdev);
+
+ if (test_bit(__NSS_DP_RXCSUM, &dp_priv->flags)) {
+ /*
+ * Enable the offload engine in the receive path
+ */
+ syn_enable_rx_chksum_offload(nghd);
+
+ /*
+ * DMA drops the packets if error in encapsulated ethernet
+ * payload.
+ */
+ syn_rx_tcpip_chksum_drop_enable(nghd);
+ netdev_dbg(nghd->netdev, "%s: enable Rx checksum\n", __func__);
+ } else {
+ syn_disable_rx_chksum_offload(nghd);
+ netdev_dbg(nghd->netdev, "%s: disable Rx checksum\n", __func__);
+ }
+}
+
+/*
+ * syn_disable_mac_interrupt()
+ * Disable all the interrupts.
+ */
+static inline void syn_disable_mac_interrupt(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_INTERRUPT_MASK, 0xffffffff);
+}
+
+/*
+ * syn_disable_mmc_tx_interrupt()
+ * Disable the MMC Tx interrupt.
+ *
+ * The MMC tx interrupts are masked out as per the mask specified.
+ */
+static inline void syn_disable_mmc_tx_interrupt(struct nss_gmac_hal_dev *nghd,
+ uint32_t mask)
+{
+ hal_set_reg_bits(nghd, SYN_MMC_TX_INTERRUPT_MASK, mask);
+}
+
+/*
+ * syn_disable_mmc_rx_interrupt()
+ * Disable the MMC Rx interrupt.
+ *
+ * The MMC rx interrupts are masked out as per the mask specified.
+ */
+static inline void syn_disable_mmc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
+ uint32_t mask)
+{
+ hal_set_reg_bits(nghd, SYN_MMC_RX_INTERRUPT_MASK, mask);
+}
+
+/*
+ * syn_disable_mmc_ipc_rx_interrupt()
+ * Disable the MMC ipc rx checksum offload interrupt.
+ *
+ * The MMC ipc rx checksum offload interrupts are masked out as
+ * per the mask specified.
+ */
+static inline void syn_disable_mmc_ipc_rx_interrupt(struct nss_gmac_hal_dev *nghd,
+ uint32_t mask)
+{
+ hal_set_reg_bits(nghd, SYN_MMC_IPC_RX_INTR_MASK, mask);
+}
+
+/*
+ * syn_disable_dma_interrupt()
+ * Disables all DMA interrupts.
+ */
+void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_DISABLE);
+}
+
+/*
+ * syn_enable_dma_interrupt()
+ * Enables all DMA interrupts.
+ */
+void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_EN);
+}
+
+/*
+ * syn_disable_interrupt_all()
+ * Disable all the interrupts.
+ */
+static inline void syn_disable_interrupt_all(struct nss_gmac_hal_dev *nghd)
+{
+ syn_disable_mac_interrupt(nghd);
+ syn_disable_dma_interrupt(nghd);
+ syn_disable_mmc_tx_interrupt(nghd, 0xFFFFFFFF);
+ syn_disable_mmc_rx_interrupt(nghd, 0xFFFFFFFF);
+ syn_disable_mmc_ipc_rx_interrupt(nghd, 0xFFFFFFFF);
+}
+
+/*
+ * syn_dma_bus_mode_init()
+ * Function to program DMA bus mode register.
+ */
+static inline void syn_dma_bus_mode_init(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_BUS_MODE, SYN_DMA_BUS_MODE_VAL);
+}
+
+/*
+ * syn_clear_dma_status()
+ * Clear all the pending dma interrupts.
+ */
+void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t data;
+
+ data = hal_read_reg(nghd->mac_base, SYN_DMA_STATUS);
+ hal_write_reg(nghd->mac_base, SYN_DMA_STATUS, data);
+}
+
+/*
+ * syn_enable_dma_rx()
+ * Enable Rx GMAC operation
+ */
+void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t data;
+
+ data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
+ data |= SYN_DMA_RX_START;
+ hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
+}
+
+/*
+ * syn_disable_dma_rx()
+ * Disable Rx GMAC operation
+ */
+void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t data;
+
+ data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
+ data &= ~SYN_DMA_RX_START;
+ hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
+}
+
+/*
+ * syn_enable_dma_tx()
+ * Enable Rx GMAC operation
+ */
+void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t data;
+
+ data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
+ data |= SYN_DMA_TX_START;
+ hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
+}
+
+/*
+ * syn_disable_dma_tx()
+ * Disable Rx GMAC operation
+ */
+void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t data;
+
+ data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE);
+ data &= ~SYN_DMA_TX_START;
+ hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data);
+}
+
+/*
+ * syn_resume_dma_tx
+ * Resumes the DMA Transmission.
+ */
+void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_TX_POLL_DEMAND, 0);
+}
+
+/*
+ * syn_get_rx_missed
+ * Get Rx missed errors
+ */
+uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t missed_frame_buff_overflow;
+ missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
+ return missed_frame_buff_overflow & 0xFFFF;
+}
+
+/*
+ * syn_get_fifo_overflows
+ * Get FIFO overflows
+ */
+uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd)
+{
+ uint32_t missed_frame_buff_overflow;
+ missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER);
+ return (missed_frame_buff_overflow >> 17) & 0x7ff;
+}
+
+/*
+ * syn_init_tx_desc_base()
+ * Programs the Dma Tx Base address with the starting address of the descriptor ring or chain.
+ */
+void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS, tx_desc_dma);
+}
+
+/*
+ * syn_init_rx_desc_base()
+ * Programs the Dma Rx Base address with the starting address of the descriptor ring or chain.
+ */
+void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS, rx_desc_dma);
+}
+
+/*
+ * syn_dma_axi_bus_mode_init()
+ * Function to program DMA AXI bus mode register.
+ */
+static inline void syn_dma_axi_bus_mode_init(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_AXI_BUS_MODE,
+ SYN_DMA_AXI_BUS_MODE_VAL);
+}
+
+/*
+ * syn_dma_operation_mode_init()
+ * Function to program DMA Operation Mode register.
+ */
+static inline void syn_dma_operation_mode_init(struct nss_gmac_hal_dev *nghd)
+{
+ hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, SYN_DMA_OMR);
+}
+
+/*
+ * syn_broadcast_enable()
+ * Enables Broadcast frames.
+ *
+ * When enabled Address filtering module passes all incoming broadcast frames.
+ */
+static inline void syn_broadcast_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_clear_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_BROADCAST);
+}
+
+/*
+ * syn_multicast_enable()
+ * Enables Multicast frames.
+ *
+ * When enabled all multicast frames are passed.
+ */
+static inline void syn_multicast_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_MULTICAST_FILTER);
+}
+
+/*
+ * syn_promisc_enable()
+ * Enables promiscous mode.
+ *
+ * When enabled Address filter modules pass all incoming frames
+ * regardless of their Destination and source addresses.
+ */
+static inline void syn_promisc_enable(struct nss_gmac_hal_dev *nghd)
+{
+ hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF);
+ hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER,
+ SYN_MAC_PROMISCUOUS_MODE_ON);
+}
+
+/*
+ * syn_get_stats()
+ */
+static int syn_get_stats(struct nss_gmac_hal_dev *nghd)
+{
+ struct nss_dp_dev *dp_priv;
+ struct syn_hal_dev *shd;
+ struct nss_dp_gmac_stats *stats;
+
+ BUG_ON(nghd == NULL);
+
+ shd = (struct syn_hal_dev *)nghd;
+ stats = &(shd->stats);
+
+ dp_priv = netdev_priv(nghd->netdev);
+ if (!dp_priv->data_plane_ops)
+ return -1;
+
+ dp_priv->data_plane_ops->get_stats(dp_priv->dpc, stats);
+
+ return 0;
+}
+
+/*
+ * syn_rx_flow_control()
+ */
+static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd,
+ bool enabled)
+{
+ BUG_ON(nghd == NULL);
+
+ if (enabled)
+ syn_set_rx_flow_ctrl(nghd);
+ else
+ syn_clear_rx_flow_ctrl(nghd);
+}
+
+/*
+ * syn_tx_flow_control()
+ */
+static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd,
+ bool enabled)
+{
+ BUG_ON(nghd == NULL);
+
+ if (enabled)
+ syn_set_tx_flow_ctrl(nghd);
+ else
+ syn_clear_tx_flow_ctrl(nghd);
+}
+
+/*
+ * syn_get_max_frame_size()
+ */
+static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd)
+{
+ int ret;
+ uint32_t mtu;
+
+ BUG_ON(nghd == NULL);
+
+ ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu);
+
+ if (!ret)
+ return mtu;
+
+ return ret;
+}
+
+/*
+ * syn_set_max_frame_size()
+ */
+static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd,
+ uint32_t val)
+{
+ BUG_ON(nghd == NULL);
+
+ return fal_port_max_frame_size_set(0, nghd->mac_id, val);
+}
+
+/*
+ * syn_set_mac_speed()
+ */
+static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd,
+ uint32_t mac_speed)
+{
+ struct net_device *netdev;
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ netdev_warn(netdev, "API deprecated\n");
+ return 0;
+}
+
+/*
+ * syn_get_mac_speed()
+ */
+static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd)
+{
+ struct net_device *netdev;
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ netdev_warn(netdev, "API deprecated\n");
+ return 0;
+}
+
+/*
+ * syn_set_duplex_mode()
+ */
+static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd,
+ uint8_t duplex_mode)
+{
+ struct net_device *netdev;
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ netdev_warn(netdev, "API deprecated\n");
+}
+
+/*
+ * syn_get_duplex_mode()
+ */
+static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd)
+{
+ struct net_device *netdev;
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ netdev_warn(netdev, "API deprecated\n");
+ return 0;
+}
+
+/*
+ * syn_get_netdev_stats()
+ */
+static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd,
+ struct rtnl_link_stats64 *stats)
+{
+ struct syn_hal_dev *shd;
+ struct nss_dp_hal_gmac_stats *ndo_stats;
+
+ BUG_ON(nghd == NULL);
+
+ shd = (struct syn_hal_dev *)nghd;
+ ndo_stats = &(shd->stats.stats);
+
+ /*
+ * Read stats from the registered dataplane.
+ */
+ if (syn_get_stats(nghd))
+ return -1;
+
+ stats->rx_packets = ndo_stats->rx_packets;
+ stats->rx_bytes = ndo_stats->rx_bytes;
+ stats->rx_errors = ndo_stats->rx_errors;
+ stats->rx_dropped = ndo_stats->rx_errors;
+ stats->rx_length_errors = ndo_stats->rx_length_errors;
+ stats->rx_over_errors = ndo_stats->mmc_rx_overflow_errors;
+ stats->rx_crc_errors = ndo_stats->mmc_rx_crc_errors;
+ stats->rx_frame_errors = ndo_stats->rx_dribble_bit_errors;
+ stats->rx_fifo_errors = ndo_stats->fifo_overflows;
+ stats->rx_missed_errors = ndo_stats->rx_missed;
+ stats->collisions = ndo_stats->tx_collisions + ndo_stats->rx_late_collision_errors;
+ stats->tx_packets = ndo_stats->tx_packets;
+ stats->tx_bytes = ndo_stats->tx_bytes;
+ stats->tx_errors = ndo_stats->tx_errors;
+ stats->tx_dropped = ndo_stats->tx_dropped;
+ stats->tx_carrier_errors = ndo_stats->tx_loss_of_carrier_errors + ndo_stats->tx_no_carrier_errors;
+ stats->tx_fifo_errors = ndo_stats->tx_underflow_errors;
+ stats->tx_window_errors = ndo_stats->tx_late_collision_errors;
+
+ return 0;
+}
+
+/*
+ * syn_get_eth_stats()
+ */
+static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd,
+ uint64_t *data)
+{
+ struct syn_hal_dev *shd;
+ struct nss_dp_gmac_stats *stats;
+ uint8_t *p = NULL;
+ int i;
+
+ BUG_ON(nghd == NULL);
+
+ shd = (struct syn_hal_dev *)nghd;
+ stats = &(shd->stats);
+
+ /*
+ * Read stats from the registered dataplane.
+ */
+ if (syn_get_stats(nghd))
+ return -1;
+
+ for (i = 0; i < SYN_STATS_LEN; i++) {
+ p = ((uint8_t *)(stats) +
+ syn_gstrings_stats[i].stat_offset);
+ data[i] = *(uint32_t *)p;
+ }
+
+ return 0;
+}
+
+/*
+ * syn_get_strset_count()
+ */
+static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd,
+ int32_t sset)
+{
+ struct net_device *netdev;
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return SYN_STATS_LEN;
+ }
+
+ netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
+ return -EPERM;
+}
+
+/*
+ * syn_get_strings()
+ */
+static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd,
+ int32_t stringset, uint8_t *data)
+{
+ struct net_device *netdev;
+ int i;
+
+ BUG_ON(nghd == NULL);
+
+ netdev = nghd->netdev;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < SYN_STATS_LEN; i++) {
+ memcpy(data, syn_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+
+ default:
+ netdev_dbg(netdev, "%s: Invalid string set\n", __func__);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * syn_send_pause_frame()
+ */
+static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd)
+{
+ BUG_ON(nghd == NULL);
+
+ syn_send_tx_pause_frame(nghd);
+}
+
+/*
+ * syn_set_mac_address()
+ */
+static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd,
+ uint8_t *macaddr)
+{
+ uint32_t data;
+
+ BUG_ON(nghd == NULL);
+
+ if (!macaddr) {
+ netdev_warn(nghd->netdev, "macaddr is not valid.\n");
+ return;
+ }
+
+ data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_HIGH_AE;
+ hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data);
+ data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8)
+ | macaddr[0];
+ hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data);
+}
+
+/*
+ * syn_get_mac_address()
+ */
+static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd,
+ uint8_t *macaddr)
+{
+ uint32_t data;
+
+ BUG_ON(nghd == NULL);
+
+ if (!macaddr) {
+ netdev_warn(nghd->netdev, "macaddr is not valid.\n");
+ return;
+ }
+
+ data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH);
+ macaddr[5] = (data >> 8) & 0xff;
+ macaddr[4] = (data) & 0xff;
+
+ data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW);
+ macaddr[3] = (data >> 24) & 0xff;
+ macaddr[2] = (data >> 16) & 0xff;
+ macaddr[1] = (data >> 8) & 0xff;
+ macaddr[0] = (data) & 0xff;
+}
+
+/*
+ * syn_dma_init()
+ * Initialize settings for GMAC DMA and AXI bus.
+ */
+static void syn_dma_init(struct nss_gmac_hal_dev *nghd)
+{
+ struct net_device *ndev = nghd->netdev;
+ struct nss_dp_dev *dp_priv = netdev_priv(ndev);
+
+ /*
+ * Enable SoC specific GMAC clocks.
+ */
+ nss_dp_hal_clk_enable(dp_priv);
+
+ /*
+ * Configure DMA registers.
+ */
+ syn_dma_bus_mode_init(nghd);
+ syn_dma_axi_bus_mode_init(nghd);
+ syn_dma_operation_mode_init(nghd);
+}
+
+/*
+ * syn_init()
+ */
+static void *syn_init(struct gmac_hal_platform_data *gmacpdata)
+{
+ struct syn_hal_dev *shd = NULL;
+ struct net_device *ndev = NULL;
+ struct nss_dp_dev *dp_priv = NULL;
+ struct resource *res;
+
+ ndev = gmacpdata->netdev;
+ dp_priv = netdev_priv(ndev);
+
+ res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ netdev_dbg(ndev, "Resource get failed.\n");
+ return NULL;
+ }
+
+ shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev,
+ sizeof(struct syn_hal_dev),
+ GFP_KERNEL);
+ if (!shd) {
+ netdev_dbg(ndev, "kzalloc failed. Returning...\n");
+ return NULL;
+ }
+
+ shd->nghd.mac_reg_len = resource_size(res);
+ shd->nghd.memres = devm_request_mem_region(&dp_priv->pdev->dev,
+ res->start,
+ resource_size(res),
+ ndev->name);
+ if (!shd->nghd.memres) {
+ netdev_dbg(ndev, "Request mem region failed. Returning...\n");
+ devm_kfree(&dp_priv->pdev->dev, shd);
+ return NULL;
+ }
+
+ /*
+ * Save netdev context in syn HAL context
+ */
+ shd->nghd.netdev = gmacpdata->netdev;
+ shd->nghd.mac_id = gmacpdata->macid;
+ shd->nghd.duplex_mode = DUPLEX_FULL;
+
+ set_bit(__NSS_DP_RXCSUM, &dp_priv->flags);
+
+ /*
+ * Populate the mac base addresses
+ */
+ shd->nghd.mac_base =
+ devm_ioremap_nocache(&dp_priv->pdev->dev, res->start,
+ resource_size(res));
+ if (!shd->nghd.mac_base) {
+ netdev_dbg(ndev, "ioremap fail.\n");
+ devm_kfree(&dp_priv->pdev->dev, shd);
+ return NULL;
+ }
+
+ spin_lock_init(&shd->nghd.slock);
+
+ netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
+ gmacpdata->reg_len,
+ ndev->base_addr,
+ shd->nghd.mac_base);
+
+ syn_disable_interrupt_all(&shd->nghd);
+ syn_dma_init(&shd->nghd);
+ syn_ipc_offload_init(&shd->nghd);
+ syn_promisc_enable(&shd->nghd);
+ syn_broadcast_enable(&shd->nghd);
+ syn_multicast_enable(&shd->nghd);
+ syn_rx_enable(&shd->nghd);
+ syn_tx_enable(&shd->nghd);
+
+ /*
+ * Reset MIB Stats
+ */
+ if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) {
+ netdev_dbg(ndev, "MIB stats Reset fail.\n");
+ }
+
+ return (struct nss_gmac_hal_dev *)shd;
+}
+
+/*
+ * syn_exit()
+ */
+static void syn_exit(struct nss_gmac_hal_dev *nghd)
+{
+ struct nss_dp_dev *dp_priv = NULL;
+
+ dp_priv = netdev_priv(nghd->netdev);
+ devm_iounmap(&dp_priv->pdev->dev,
+ (void *)nghd->mac_base);
+ devm_release_mem_region(&dp_priv->pdev->dev,
+ (nghd->memres)->start,
+ nghd->mac_reg_len);
+
+ nghd->memres = NULL;
+ nghd->mac_base = NULL;
+}
+
+struct nss_gmac_hal_ops syn_hal_ops = {
+ .init = &syn_init,
+ .start = NULL,
+ .stop = NULL,
+ .exit = &syn_exit,
+ .setmacaddr = &syn_set_mac_address,
+ .getmacaddr = &syn_get_mac_address,
+ .rxflowcontrol = &syn_rx_flow_control,
+ .txflowcontrol = &syn_tx_flow_control,
+ .setspeed = &syn_set_mac_speed,
+ .getspeed = &syn_get_mac_speed,
+ .setduplex = &syn_set_duplex_mode,
+ .getduplex = &syn_get_duplex_mode,
+ .setmaxframe = &syn_set_max_frame_size,
+ .getmaxframe = &syn_get_max_frame_size,
+ .getndostats = &syn_get_netdev_stats,
+ .getssetcount = &syn_get_strset_count,
+ .getstrings = &syn_get_strings,
+ .getethtoolstats = &syn_get_eth_stats,
+ .sendpause = &syn_send_pause_frame,
+};
diff --git a/hal/gmac_hal_ops/syn/gmac/syn_reg.h b/hal/gmac_hal_ops/syn/gmac/syn_reg.h
new file mode 100644
index 0000000..aba916e
--- /dev/null
+++ b/hal/gmac_hal_ops/syn/gmac/syn_reg.h
@@ -0,0 +1,531 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __SYN_REG_H__
+#define __SYN_REG_H__
+
+/*
+ * MAC register offset
+ */
+#define SYN_MAC_CONFIGURATION 0x0000
+#define SYN_MAC_FRAME_FILTER 0x0004
+#define SYN_MAC_FLOW_CONTROL 0x0018
+#define SYN_VLAN_TAG 0x001C
+#define SYN_VERSION 0x0020
+#define SYN_DEBUG 0x0024
+#define SYN_REMOTE_WAKE_UP_FRAME_FILTER 0x0028
+#define SYN_PMT_CONTROL_STATUS 0x002C
+#define SYN_LPI_CONTROL_STATUS 0x0030
+#define SYN_LPI_TIMERS_CONTROL 0x0034
+#define SYN_INTERRUPT_STATUS 0x0038
+#define SYN_INTERRUPT_MASK 0x003C
+
+/*
+ * MAC address register offset
+ */
+#define SYN_MAC_ADDR0_HIGH 0x0040
+#define SYN_MAC_ADDR0_LOW 0x0044
+#define SYN_MAC_ADDR1_HIGH 0x0048
+#define SYN_MAC_ADDR1_LOW 0x004C
+#define SYN_MAC_ADDR2_HIGH 0x0050
+#define SYN_MAC_ADDR2_LOW 0x0054
+#define SYN_MAC_ADDR3_HIGH 0x0058
+#define SYN_MAC_ADDR3_LOW 0x005C
+#define SYN_MAC_ADDR4_HIGH 0x0060
+#define SYN_MAC_ADDR4_LOW 0x0064
+
+/*
+ * Watchdog timeout register
+ */
+#define SYN_WDOG_TIMEOUT 0x00DC
+
+/*
+ * Mac Management Counters (MMC) register offset
+ */
+#define SYN_MMC_CONTROL 0x0100
+#define SYN_MMC_RX_INTERRUPT 0x0104
+#define SYN_MMC_TX_INTERRUPT 0x0108
+#define SYN_MMC_RX_INTERRUPT_MASK 0x010C
+#define SYN_MMC_TX_INTERRUPT_MASK 0x0110
+#define SYN_MMC_IPC_RX_INTR_MASK 0x0200
+
+/*
+ * DMA Register offset
+ */
+#define SYN_DMA_BUS_MODE 0x1000
+#define SYN_DMA_TX_POLL_DEMAND 0x1004
+#define SYN_DMA_RX_POLL_DEMAND 0x1008
+#define SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS 0x100C
+#define SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS 0x1010
+#define SYN_DMA_STATUS 0x1014
+#define SYN_DMA_OPERATION_MODE 0x1018
+#define SYN_DMA_INT_ENABLE 0x101C
+#define SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER 0x1020
+#define SYN_DMA_RX_INTERRUPT_WATCHDOG_TIMER 0x1024
+#define SYN_DMA_AXI_BUS_MODE 0x1028
+#define SYN_DMA_AHB_OR_AXI_STATUS 0x102C
+#define SYN_DMA_CURRENT_HOST_TX_DESCRIPTOR 0x1048
+#define SYN_DMA_CURRENT_HOST_RX_DESCRIPTOR 0x104C
+#define SYN_DMA_CURRENT_HOST_TX_BUFFER_ADDRESS 0x1050
+#define SYN_DMA_CURRENT_HOST_RX_BUFFER_ADDRESS 0x1054
+
+/*
+ * Optional HW feature register
+ */
+#define SYN_HW_FEATURE 0x1058
+
+/*
+ * Register Bit Definitions
+ */
+
+/*
+ * SYN_MAC_CONFIGURATION = 0x0000, MAC config Register Layout
+ */
+enum syn_mac_config_reg {
+ SYN_MAC_TWOKPE = 0x08000000, /* Support for 2K packets */
+ SYN_MAC_TWOKPE_ENABLE = 0x08000000,
+ SYN_MAC_TWOKPE_DISABLE = 0x00000000,
+ SYN_MAC_CST = 0x02000000, /* (CST) CRC Stripping for Type Frames */
+ SYN_MAC_CST_ENABLE = 0x02000000,
+ SYN_MAC_CST_DISABLE = 0x02000000,
+ SYN_MAC_TC = 0x01000000, /* (TC) Transmit configuration */
+ SYN_MAC_WATCHDOG = 0x00800000,
+ SYN_MAC_WATCHDOG_ENABLE = 0x00000000, /* Enable watchdog timer */
+ SYN_MAC_WATCHDOG_DISABLE = 0x00800000, /* (WD)Disable watchdog timer on Rx */
+ SYN_MAC_JABBER = 0x00400000,
+ SYN_MAC_JABBER_ENABLE = 0x00000000, /* Enable jabber timer */
+ SYN_MAC_JABBER_DISABLE = 0x00400000, /* (JD)Disable jabber timer on Tx */
+ SYN_MAC_FRAME_BURST = 0x00200000,
+ SYN_MAC_FRAME_BURST_ENABLE = 0x00200000, /* (BE)Enable frame bursting
+ during Tx */
+ SYN_MAC_FRAME_BURST_DISABLE = 0x00000000, /* Disable frame bursting */
+ SYN_MAC_JUMBO_FRAME = 0x00100000,
+ SYN_MAC_JUMBO_FRAME_ENABLE = 0x00100000, /* (JE)Enable jumbo frame for Rx */
+ SYN_MAC_JUMBO_FRAME_DISABLE = 0x00000000, /* Disable jumbo frame */
+ SYN_MAC_INTER_FRAME_GAP7 = 0x000E0000, /* (IFG) Config7 - 40bit times */
+ SYN_MAC_INTER_FRAME_GAP6 = 0x000C0000, /* (IFG) Config6 - 48bit times */
+ SYN_MAC_INTER_FRAME_GAP5 = 0x000A0000, /* (IFG) Config5 - 56bit times */
+ SYN_MAC_INTER_FRAME_GAP4 = 0x00080000, /* (IFG) Config4 - 64bit times */
+ SYN_MAC_INTER_FRAME_GAP3 = 0x00060000, /* (IFG) Config3 - 72bit times */
+ SYN_MAC_INTER_FRAME_GAP2 = 0x00040000, /* (IFG) Config2 - 80bit times */
+ SYN_MAC_INTER_FRAME_GAP1 = 0x00020000, /* (IFG) Config1 - 88bit times */
+ SYN_MAC_INTER_FRAME_GAP0 = 0x00000000, /* (IFG) Config0 - 96bit times */
+ SYN_MAC_DISABLE_CRS = 0x00010000, /* (DCRS) Disable Carrier Sense During Transmission */
+ SYN_MAC_MII_GMII = 0x00008000,
+ SYN_MAC_SELECT_MII = 0x00008000, /* (PS)Port Select-MII mode */
+ SYN_MAC_SELECT_GMII = 0x00000000, /* GMII mode */
+ SYN_MAC_FE_SPEED100 = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
+ SYN_MAC_FE_SPEED = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */
+ SYN_MAC_FE_SPEED10 = 0x00000000, /* (FES)Fast Ethernet speed 10Mbps */
+ SYN_MAC_RX_OWN = 0x00002000,
+ SYN_MAC_DISABLE_RX_OWN = 0x00002000, /* (DO)Disable receive own packets */
+ SYN_MAC_ENABLE_RX_OWN = 0x00000000, /* Enable receive own packets */
+ SYN_MAC_LOOPBACK = 0x00001000,
+ SYN_MAC_LOOPBACK_ON = 0x00001000, /* (LM)Loopback mode for GMII/MII */
+ SYN_MAC_LOOPBACK_OFF = 0x00000000, /* Normal mode */
+ SYN_MAC_DUPLEX = 0x00000800,
+ SYN_MAC_FULL_DUPLEX = 0x00000800, /* (DM)Full duplex mode */
+ SYN_MAC_HALF_DUPLEX = 0x00000000, /* Half duplex mode */
+ SYN_MAC_RX_IPC_OFFLOAD = 0x00000400, /* IPC checksum offload */
+ SYN_MAC_RX_IPC_OFFLOAD_ENABLE = 0x00000400,
+ SYN_MAC_RX_IPC_OFFLOAD_DISABLE = 0x00000000,
+ SYN_MAC_RETRY = 0x00000200,
+ SYN_MAC_RETRY_DISABLE = 0x00000200, /* (DR)Disable Retry */
+ SYN_MAC_RETRY_ENABLE = 0x00000000, /* Enable retransmission as per BL */
+ SYN_MAC_LINK_UP = 0x00000100, /* (LUD)Link UP */
+ SYN_MAC_LINK_DOWN = 0x00000100, /* Link Down */
+ SYN_MAC_PAD_CRC_STRIP = 0x00000080,
+ SYN_MAC_PAD_CRC_STRIP_ENABLE = 0x00000080, /* (ACS) Automatic Pad/Crc strip enable */
+ SYN_MAC_PAD_CRC_STRIP_DISABLE = 0x00000000, /* Automatic Pad/Crc stripping disable */
+ SYN_MAC_BACKOFF_LIMIT = 0x00000060,
+ SYN_MAC_BACKOFF_LIMIT3 = 0x00000060, /* (BL)Back-off limit in HD mode */
+ SYN_MAC_BACKOFF_LIMIT2 = 0x00000040,
+ SYN_MAC_BACKOFF_LIMIT1 = 0x00000020,
+ SYN_MAC_BACKOFF_LIMIT0 = 0x00000000,
+ SYN_MAC_DEFERRAL_CHECK = 0x00000010,
+ SYN_MAC_DEFERRAL_CHECK_ENABLE = 0x00000010, /* (DC)Deferral check enable in HD mode */
+ SYN_MAC_DEFERRAL_CHECK_DISABLE = 0x00000000, /* Deferral check disable */
+ SYN_MAC_TX = 0x00000008,
+ SYN_MAC_TX_ENABLE = 0x00000008, /* (TE)Transmitter enable */
+ SYN_MAC_TX_DISABLE = 0x00000000, /* Transmitter disable */
+ SYN_MAC_RX = 0x00000004,
+ SYN_MAC_RX_ENABLE = 0x00000004, /* (RE)Receiver enable */
+ SYN_MAC_RX_DISABLE = 0x00000000, /* Receiver disable */
+ SYN_MAC_PRELEN_RESERVED = 0x00000003, /* Preamble Length for Transmit Frames */
+ SYN_MAC_PRELEN_3B = 0x00000002,
+ SYN_MAC_PRELEN_5B = 0x00000001,
+ SYN_MAC_PRELEN_7B = 0x00000000,
+};
+
+/*
+ * SYN_MAC_FRAME_FILTER = 0x0004, Mac frame filtering controls Register
+ */
+enum syn_mac_frame_filter_reg {
+ SYN_MAC_FILTER = 0x80000000,
+ SYN_MAC_FILTER_OFF = 0x80000000, /* (RA)Receive all incoming packets */
+ SYN_MAC_FILTER_ON = 0x00000000, /* Receive filtered pkts only */
+ SYN_MAC_HASH_PERFECT_FILTER = 0x00000400, /* Hash or Perfect Filter enable */
+ SYN_MAC_SRC_ADDR_FILTER = 0x00000200,
+ SYN_MAC_SRC_ADDR_FILTER_ENABLE = 0x00000200, /* (SAF)Source Address Filter enable */
+ SYN_MAC_SRC_ADDR_FILTER_DISABLE = 0x00000000,
+ SYN_MAC_SRC_INVA_ADDR_FILTER = 0x00000100,
+ SYN_MAC_SRC_INV_ADDR_FILTER_EN = 0x00000100, /* (SAIF)Inv Src Addr Filter enable */
+ SYN_MAC_SRC_INV_ADDR_FILTER_DIS = 0x00000000,
+ SYN_MAC_PASS_CONTROL = 0x000000C0,
+ SYN_MAC_PASS_CONTROL3 = 0x000000C0, /* (PCF)Forwards ctrl frames that pass AF */
+ SYN_MAC_PASS_CONTROL2 = 0x00000080, /* Forwards all control frames
+ even if they fail the AF */
+ SYN_MAC_PASS_CONTROL1 = 0x00000040, /* Forwards all control frames except
+ PAUSE control frames to application
+ even if they fail the AF */
+ SYN_MAC_PASS_CONTROL0 = 0x00000000, /* Don't pass control frames */
+ SYN_MAC_BROADCAST = 0x00000020,
+ SYN_MAC_BROADCAST_DISABLE = 0x00000020, /* (DBF)Disable Rx of broadcast frames */
+ SYN_MAC_BROADCAST_ENABLE = 0x00000000, /* Enable broadcast frames */
+ SYN_MAC_MULTICAST_FILTER = 0x00000010,
+ SYN_MAC_MULTICAST_FILTER_OFF = 0x00000010, /* (PM) Pass all multicast packets */
+ SYN_MAC_MULTICAST_FILTER_ON = 0x00000000, /* Pass filtered multicast packets */
+ SYN_MAC_DEST_ADDR_FILTER = 0x00000008,
+ SYN_MAC_DEST_ADDR_FILTER_INV = 0x00000008, /* (DAIF)Inverse filtering for DA */
+ SYN_MAC_DEST_ADDR_FILTER_NOR = 0x00000000, /* Normal filtering for DA */
+ SYN_MAC_MCAST_HASH_FILTER = 0x00000004,
+ SYN_MAC_MCAST_HASH_FILTER_ON = 0x00000004, /* (HMC)perfom multicast hash filtering */
+ SYN_MAC_MCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
+ SYN_MAC_UCAST_HASH_FILTER = 0x00000002,
+ SYN_MAC_UCAST_HASH_FILTER_ON = 0x00000002, /* (HUC)Unicast Hash filtering only */
+ SYN_MAC_UCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */
+ SYN_MAC_PROMISCUOUS_MODE = 0x00000001,
+ SYN_MAC_PROMISCUOUS_MODE_ON = 0x00000001, /* Receive all frames */
+ SYN_MAC_PROMISCUOUS_MODE_OFF = 0x00000000, /* Receive filtered packets only */
+};
+
+/*
+ * SYN_MAC_FLOW_CONTROL = 0x0018, Flow control Register Layout
+ */
+enum syn_mac_flow_control_reg {
+ SYN_MAC_FC_PAUSE_TIME_MASK = 0xFFFF0000, /* (PT) PAUSE TIME field
+ in the control frame */
+ SYN_MAC_FC_PAUSE_TIME_SHIFT = 16,
+ SYN_MAC_FC_PAUSE_LOW_THRESH = 0x00000030,
+ SYN_MAC_FC_PAUSE_LOW_THRESH3 = 0x00000030, /* (PLT)thresh for pause
+ tmr 256 slot time */
+ SYN_MAC_FC_PAUSE_LOW_THRESH2 = 0x00000020, /* 144 slot time */
+ SYN_MAC_FC_PAUSE_LOW_THRESH1 = 0x00000010, /* 28 slot time */
+ SYN_MAC_FC_PAUSE_LOW_THRESH0 = 0x00000000, /* 4 slot time */
+ SYN_MAC_FC_UNICAST_PAUSE_FRAME = 0x00000008,
+ SYN_MAC_FC_UNICAST_PAUSE_FRAME_ON = 0x00000008, /* (UP)Detect pause frame
+ with unicast addr. */
+ SYN_MAC_FC_UNICAST_PAUSE_FRAME_OFF = 0x00000000,/* Detect only pause frame
+ with multicast addr. */
+ SYN_MAC_FC_RX_FLOW_CONTROL = 0x00000004,
+ SYN_MAC_FC_RX_FLOW_CONTROL_ENABLE = 0x00000004, /* (RFE)Enable Rx flow control */
+ SYN_MAC_FC_RX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable Rx flow control */
+ SYN_MAC_FC_TX_FLOW_CONTROL = 0x00000002,
+ SYN_MAC_FC_TX_FLOW_CONTROL_ENABLE = 0x00000002, /* (TFE)Enable Tx flow control */
+ SYN_MAC_FC_TX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable flow control */
+ SYN_MAC_FC_FLOW_CONTROL_BACK_PRESSURE = 0x00000001,
+ SYN_MAC_FC_SEND_PAUSE_FRAME = 0x00000001, /* (FCB/PBA)send pause frm/Apply
+ back pressure */
+};
+
+/*
+ * SYN_MAC_ADDR_HIGH Register
+ */
+enum syn_mac_addr_high {
+ SYN_MAC_ADDR_HIGH_AE = 0x80000000,
+};
+
+/*
+ * SYN_DMA_BUS_MODE = 0x0000, CSR0 - Bus Mode
+ */
+enum syn_dma_bus_mode_reg {
+ SYN_DMA_FIXED_BURST_ENABLE = 0x00010000, /* (FB)Fixed Burst SINGLE, INCR4,
+ INCR8 or INCR16 */
+ SYN_DMA_FIXED_BURST_DISABLE = 0x00000000, /* SINGLE, INCR */
+ SYN_DMA_TX_PRIORITY_RATIO11 = 0x00000000, /* (PR)TX:RX DMA priority ratio 1:1 */
+ SYN_DMA_TX_PRIORITY_RATIO21 = 0x00004000, /* (PR)TX:RX DMA priority ratio 2:1 */
+ SYN_DMA_TX_PRIORITY_RATIO31 = 0x00008000, /* (PR)TX:RX DMA priority ratio 3:1 */
+ SYN_DMA_TX_PRIORITY_RATIO41 = 0x0000C000, /* (PR)TX:RX DMA priority ratio 4:1 */
+ SYN_DMA_ADDRESS_ALIGNED_BEATS = 0x02000000, /* Address Aligned beats */
+ SYN_DMA_BURST_LENGTHX8 = 0x01000000, /* When set mutiplies the PBL by 8 */
+ SYN_DMA_BURST_LENGTH256 = 0x01002000, /* (dma_burst_lengthx8 |
+ dma_burst_length32) = 256 */
+ SYN_DMA_BURST_LENGTH128 = 0x01001000, /* (dma_burst_lengthx8 |
+ dma_burst_length16) = 128 */
+ SYN_DMA_BURST_LENGTH64 = 0x01000800, /* (dma_burst_lengthx8 |
+ dma_burst_length8) = 64 */
+ /* (PBL) programmable burst length */
+ SYN_DMA_BURST_LENGTH32 = 0x00002000, /* Dma burst length = 32 */
+ SYN_DMA_BURST_LENGTH16 = 0x00001000, /* Dma burst length = 16 */
+ SYN_DMA_BURST_LENGTH8 = 0x00000800, /* Dma burst length = 8 */
+ SYN_DMA_BURST_LENGTH4 = 0x00000400, /* Dma burst length = 4 */
+ SYN_DMA_BURST_LENGTH2 = 0x00000200, /* Dma burst length = 2 */
+ SYN_DMA_BURST_LENGTH1 = 0x00000100, /* Dma burst length = 1 */
+ SYN_DMA_BURST_LENGTH0 = 0x00000000, /* Dma burst length = 0 */
+
+ SYN_DMA_DESCRIPTOR8_WORDS = 0x00000080, /* Enh Descriptor works 1=>
+ 8 word descriptor */
+ SYN_DMA_DESCRIPTOR4_WORDS = 0x00000000, /* Enh Descriptor works 0=>
+ 4 word descriptor */
+ SYN_DMA_DESCRIPTOR_SKIP16 = 0x00000040, /* (DSL)Descriptor skip length (no.of dwords) */
+ SYN_DMA_DESCRIPTOR_SKIP8 = 0x00000020, /* between two unchained descriptors */
+ SYN_DMA_DESCRIPTOR_SKIP4 = 0x00000010,
+ SYN_DMA_DESCRIPTOR_SKIP2 = 0x00000008,
+ SYN_DMA_DESCRIPTOR_SKIP1 = 0x00000004,
+ SYN_DMA_DESCRIPTOR_SKIP0 = 0x00000000,
+ SYN_DMA_ARBIT_RR = 0x00000000, /* (DA) DMA RR arbitration */
+ SYN_DMA_ARBIT_PR = 0x00000002, /* Rx has priority over Tx */
+ SYN_DMA_RESET_ON = 0x00000001, /* (SWR)Software Reset DMA engine */
+ SYN_DMA_RESET_OFF = 0x00000000,
+};
+
+/*
+ * SYN_DMA_STATUS = 0x0014, CSR5 - Dma status Register
+ */
+enum syn_dma_status_reg {
+ SYN_DMA_GMAC_PMT_INTR = 0x10000000, /* (GPI)Gmac subsystem interrupt */
+ SYN_DMA_GMAC_MMC_INTR = 0x08000000, /* (GMI)Gmac MMC subsystem interrupt */
+ SYN_DMA_GMAC_LINE_INTF_INTR = 0x04000000, /* Line interface interrupt */
+ SYN_DMA_ERROR_BIT2 = 0x02000000, /* (EB)Error bits 0-data buffer, 1-desc access */
+ SYN_DMA_ERROR_BIT1 = 0x01000000, /* (EB)Error bits 0-write trnsf, 1-read transfer */
+ SYN_DMA_ERROR_BIT0 = 0x00800000, /* (EB)Error bits 0-Rx DMA, 1-Tx DMA */
+ SYN_DMA_TX_STATE = 0x00700000, /* (TS)Transmit process state */
+ SYN_DMA_TX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Tx Command issued */
+ SYN_DMA_TX_FETCHING = 0x00100000, /* Running - fetching the Tx descriptor */
+ SYN_DMA_TX_WAITING = 0x00200000, /* Running - waiting for status */
+ SYN_DMA_TX_READING = 0x00300000, /* Running - reading the data from host memory */
+ SYN_DMA_TX_SUSPENDED = 0x00600000, /* Suspended - Tx Descriptor unavailabe */
+ SYN_DMA_TX_CLOSING = 0x00700000, /* Running - closing Rx descriptor */
+ SYN_DMA_RX_STATE = 0x000E0000, /* (RS)Receive process state */
+ SYN_DMA_RX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Rx Command issued */
+ SYN_DMA_RX_FETCHING = 0x00020000, /* Running - fetching the Rx descriptor */
+ SYN_DMA_RX_WAITING = 0x00060000, /* Running - waiting for packet */
+ SYN_DMA_RX_SUSPENDED = 0x00080000, /* Suspended - Rx Descriptor unavailable */
+ SYN_DMA_RX_CLOSING = 0x000A0000, /* Running - closing descriptor */
+ SYN_DMA_RX_QUEUING = 0x000E0000, /* Running - queuing the receive frame into host memory */
+ SYN_DMA_INT_NORMAL = 0x00010000, /* (NIS)Normal interrupt summary */
+ SYN_DMA_INT_ABNORMAL = 0x00008000, /* (AIS)Abnormal interrupt summary */
+ SYN_DMA_INT_EARLY_RX = 0x00004000, /* Early receive interrupt (Normal) */
+ SYN_DMA_INT_BUS_ERROR = 0x00002000, /* Fatal bus error (Abnormal) */
+ SYN_DMA_INT_EARLY_TX = 0x00000400, /* Early transmit interrupt (Abnormal) */
+ SYN_DMA_INT_RX_WDOG_TO = 0x00000200, /* Receive Watchdog Timeout (Abnormal) */
+ SYN_DMA_INT_RX_STOPPED = 0x00000100, /* Receive process stopped (Abnormal) */
+ SYN_DMA_INT_RX_NO_BUFFER = 0x00000080, /* RX buffer unavailable (Abnormal) */
+ SYN_DMA_INT_RX_COMPLETED = 0x00000040, /* Completion of frame RX (Normal) */
+ SYN_DMA_INT_TX_UNDERFLOW = 0x00000020, /* Transmit underflow (Abnormal) */
+ SYN_DMA_INT_RCV_OVERFLOW = 0x00000010, /* RX Buffer overflow interrupt */
+ SYN_DMA_INT_TX_JABBER_TO = 0x00000008, /* TX Jabber Timeout (Abnormal) */
+ SYN_DMA_INT_TX_NO_BUFFER = 0x00000004, /* TX buffer unavailable (Normal) */
+ SYN_DMA_INT_TX_STOPPED = 0x00000002, /* TX process stopped (Abnormal) */
+ SYN_DMA_INT_TX_COMPLETED = 0x00000001, /* Transmit completed (Normal) */
+};
+
+/*
+ * SYN_DMA_OPERATION_MODE = 0x0018, CSR6 - Dma Operation Mode Register
+ */
+enum syn_dma_operation_mode_reg {
+ SYN_DMA_DISABLE_DROP_TCP_CS = 0x04000000, /* (DT) Dis. drop. of tcp/ip
+ CS error frames */
+ SYN_DMA_RX_STORE_AND_FORWARD = 0x02000000, /* Rx (SF)Store and forward */
+ SYN_DMA_RX_FRAME_FLUSH = 0x01000000, /* Disable Receive Frame Flush*/
+ SYN_DMA_TX_STORE_AND_FORWARD = 0x00200000, /* Tx (SF)Store and forward */
+ SYN_DMA_FLUSH_TX_FIFO = 0x00100000, /* (FTF)Tx FIFO controller
+ is reset to default */
+ SYN_DMA_TX_THRESH_CTRL = 0x0001C000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo */
+ SYN_DMA_TX_THRESH_CTRL16 = 0x0001C000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 16 */
+ SYN_DMA_TX_THRESH_CTRL24 = 0x00018000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 24 */
+ SYN_DMA_TX_THRESH_CTRL32 = 0x00014000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 32 */
+ SYN_DMA_TX_THRESH_CTRL40 = 0x00010000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 40 */
+ SYN_DMA_TX_THRESH_CTRL256 = 0x0000c000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 256 */
+ SYN_DMA_TX_THRESH_CTRL192 = 0x00008000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 192 */
+ SYN_DMA_TX_THRESH_CTRL128 = 0x00004000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 128 */
+ SYN_DMA_TX_THRESH_CTRL64 = 0x00000000, /* (TTC)Controls thre Thresh of
+ MTL tx Fifo 64 */
+ SYN_DMA_TX_START = 0x00002000, /* (ST)Start/Stop transmission*/
+ SYN_DMA_RX_FLOW_CTRL_DEACT = 0x00401800, /* (RFD)Rx flow control
+ deact. Threshold */
+ SYN_DMA_RX_FLOW_CTRL_DEACT1K = 0x00000000, /* (RFD)Rx flow control
+ deact. Threshold (1kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT2K = 0x00000800, /* (RFD)Rx flow control
+ deact. Threshold (2kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT3K = 0x00001000, /* (RFD)Rx flow control
+ deact. Threshold (3kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT4K = 0x00001800, /* (RFD)Rx flow control
+ deact. Threshold (4kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT5K = 0x00400000, /* (RFD)Rx flow control
+ deact. Threshold (4kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT6K = 0x00400800, /* (RFD)Rx flow control
+ deact. Threshold (4kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_DEACT7K = 0x00401000, /* (RFD)Rx flow control
+ deact. Threshold (4kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT = 0x00800600, /* (RFA)Rx flow control
+ Act. Threshold */
+ SYN_DMA_RX_FLOW_CTRL_ACT1K = 0x00000000, /* (RFA)Rx flow control
+ Act. Threshold (1kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT2K = 0x00000200, /* (RFA)Rx flow control
+ Act. Threshold (2kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT3K = 0x00000400, /* (RFA)Rx flow control
+ Act. Threshold (3kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT4K = 0x00000600, /* (RFA)Rx flow control
+ Act. Threshold (4kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT5K = 0x00800000, /* (RFA)Rx flow control
+ Act. Threshold (5kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT6K = 0x00800200, /* (RFA)Rx flow control
+ Act. Threshold (6kbytes) */
+ SYN_DMA_RX_FLOW_CTRL_ACT7K = 0x00800400, /* (RFA)Rx flow control
+ Act. Threshold (7kbytes) */
+ SYN_DMA_RX_THRESH_CTRL = 0x00000018, /* (RTC)Controls thre
+ Thresh of MTL rx Fifo */
+ SYN_DMA_RX_THRESH_CTRL64 = 0x00000000, /* (RTC)Controls thre
+ Thresh of MTL tx Fifo 64 */
+ SYN_DMA_RX_THRESH_CTRL32 = 0x00000008, /* (RTC)Controls thre
+ Thresh of MTL tx Fifo 32 */
+ SYN_DMA_RX_THRESH_CTRL96 = 0x00000010, /* (RTC)Controls thre
+ Thresh of MTL tx Fifo 96 */
+ SYN_DMA_RX_THRESH_CTRL128 = 0x00000018, /* (RTC)Controls thre
+ Thresh of MTL tx Fifo 128 */
+ SYN_DMA_EN_HW_FLOW_CTRL = 0x00000100, /* (EFC)Enable HW flow control*/
+ SYN_DMA_DIS_HW_FLOW_CTRL = 0x00000000, /* Disable HW flow control */
+ SYN_DMA_FWD_ERROR_FRAMES = 0x00000080, /* (FEF)Forward error frames */
+ SYN_DMA_FWD_UNDER_SZ_FRAMES = 0x00000040, /* (FUF)Forward undersize
+ frames */
+ SYN_DMA_TX_SECOND_FRAME = 0x00000004, /* (OSF)Operate on 2nd frame */
+ SYN_DMA_RX_START = 0x00000002, /* (SR)Start/Stop reception */
+};
+
+/*
+ * SYN_DMA_INT_ENABLE = 0x101C, CSR7 - Interrupt enable Register Layout
+ */
+enum syn_dma_interrupt_reg {
+ SYN_DMA_IE_NORMAL = SYN_DMA_INT_NORMAL, /* Normal interrupt enable */
+ SYN_DMA_IE_ABNORMAL = SYN_DMA_INT_ABNORMAL, /* Abnormal interrupt enable */
+ SYN_DMA_IE_EARLY_RX = SYN_DMA_INT_EARLY_RX, /* Early RX interrupt enable */
+ SYN_DMA_IE_BUS_ERROR = SYN_DMA_INT_BUS_ERROR, /* Fatal bus error enable */
+ SYN_DMA_IE_EARLY_TX = SYN_DMA_INT_EARLY_TX, /* Early TX interrupt enable */
+ SYN_DMA_IE_RX_WDOG_TO = SYN_DMA_INT_RX_WDOG_TO, /* RX Watchdog Timeout enable */
+ SYN_DMA_IE_RX_STOPPED = SYN_DMA_INT_RX_STOPPED, /* RX process stopped enable */
+ SYN_DMA_IE_RX_NO_BUFFER = SYN_DMA_INT_RX_NO_BUFFER,
+ /* Receive buffer unavailable enable */
+ SYN_DMA_IE_RX_COMPLETED = SYN_DMA_INT_RX_COMPLETED,
+ /* Completion of frame reception enable */
+ SYN_DMA_IE_TX_UNDERFLOW = SYN_DMA_INT_TX_UNDERFLOW,
+ /* TX underflow enable */
+ SYN_DMA_IE_RX_OVERFLOW = SYN_DMA_INT_RCV_OVERFLOW,
+ /* RX Buffer overflow interrupt */
+ SYN_DMA_IE_TX_JABBER_TO = SYN_DMA_INT_TX_JABBER_TO,
+ /* TX Jabber Timeout enable */
+ SYN_DMA_IE_TX_NO_BUFFER = SYN_DMA_INT_TX_NO_BUFFER,
+ /* TX buffer unavailable enable */
+ SYN_DMA_IE_TX_STOPPED = SYN_DMA_INT_TX_STOPPED,
+ /* TX process stopped enable */
+ SYN_DMA_IE_TX_COMPLETED = SYN_DMA_INT_TX_COMPLETED,
+ /* TX completed enable */
+};
+
+/*
+ * SYN_DMA_AXI_BUS_MODE = 0x1028
+ */
+enum syn_dma_axi_bus_mode_reg {
+ SYN_DMA_EN_LPI = 0x80000000,
+ SYN_DMA_LPI_XIT_FRM = 0x40000000,
+ SYN_DMA_WR_OSR_NUM_REQS16 = 0x00F00000,
+ SYN_DMA_WR_OSR_NUM_REQS8 = 0x00700000,
+ SYN_DMA_WR_OSR_NUM_REQS4 = 0x00300000,
+ SYN_DMA_WR_OSR_NUM_REQS2 = 0x00100000,
+ SYN_DMA_WR_OSR_NUM_REQS1 = 0x00000000,
+ SYN_DMA_RD_OSR_NUM_REQS16 = 0x000F0000,
+ SYN_DMA_RD_OSR_NUM_REQS8 = 0x00070000,
+ SYN_DMA_RD_OSR_NUM_REQS4 = 0x00030000,
+ SYN_DMA_RD_OSR_NUM_REQS2 = 0x00010000,
+ SYN_DMA_RD_OSR_NUM_REQS1 = 0x00000000,
+ SYN_DMA_ONEKBBE = 0x00002000,
+ SYN_DMA_AXI_AAL = 0x00001000,
+ SYN_DMA_AXI_BLEN256 = 0x00000080,
+ SYN_DMA_AXI_BLEN128 = 0x00000040,
+ SYN_DMA_AXI_BLEN64 = 0x00000020,
+ SYN_DMA_AXI_BLEN32 = 0x00000010,
+ SYN_DMA_AXI_BLEN16 = 0x00000008,
+ SYN_DMA_AXI_BLEN8 = 0x00000004,
+ SYN_DMA_AXI_BLEN4 = 0x00000002,
+ SYN_DMA_UNDEFINED = 0x00000001,
+};
+
+/*
+ * Values to initialize DMA registers
+ */
+enum syn_dma_init_values {
+ /*
+ * Interrupt groups
+ */
+ SYN_DMA_INT_ERROR_MASK = SYN_DMA_INT_BUS_ERROR, /* Error */
+ SYN_DMA_INT_RX_ABN_MASK = SYN_DMA_INT_RX_NO_BUFFER, /* RX abnormal intr */
+ SYN_DMA_INT_RX_NORM_MASK = SYN_DMA_INT_RX_COMPLETED, /* RXnormal intr */
+ SYN_DMA_INT_RX_STOPPED_MASK = SYN_DMA_INT_RX_STOPPED, /* RXstopped */
+ SYN_DMA_INT_TX_ABN_MASK = SYN_DMA_INT_TX_UNDERFLOW, /* TX abnormal intr */
+ SYN_DMA_INT_TX_NORM_MASK = SYN_DMA_INT_TX_COMPLETED, /* TX normal intr */
+ SYN_DMA_INT_TX_STOPPED_MASK = SYN_DMA_INT_TX_STOPPED, /* TX stopped */
+
+ SYN_DMA_BUS_MODE_INIT = SYN_DMA_FIXED_BURST_ENABLE | SYN_DMA_BURST_LENGTH8
+ | SYN_DMA_DESCRIPTOR_SKIP2 | SYN_DMA_RESET_OFF,
+
+ SYN_DMA_BUS_MODE_VAL = SYN_DMA_BURST_LENGTH32
+ | SYN_DMA_BURST_LENGTHX8 | SYN_DMA_DESCRIPTOR_SKIP0
+ | SYN_DMA_DESCRIPTOR8_WORDS | SYN_DMA_ARBIT_PR | SYN_DMA_ADDRESS_ALIGNED_BEATS,
+
+ SYN_DMA_OMR = SYN_DMA_TX_STORE_AND_FORWARD | SYN_DMA_RX_STORE_AND_FORWARD
+ | SYN_DMA_RX_THRESH_CTRL128 | SYN_DMA_TX_SECOND_FRAME,
+
+ SYN_DMA_INT_EN = SYN_DMA_IE_NORMAL | SYN_DMA_IE_ABNORMAL | SYN_DMA_INT_ERROR_MASK
+ | SYN_DMA_INT_RX_ABN_MASK | SYN_DMA_INT_RX_NORM_MASK
+ | SYN_DMA_INT_RX_STOPPED_MASK | SYN_DMA_INT_TX_ABN_MASK
+ | SYN_DMA_INT_TX_NORM_MASK | SYN_DMA_INT_TX_STOPPED_MASK,
+ SYN_DMA_INT_DISABLE = 0,
+ SYN_DMA_AXI_BUS_MODE_VAL = SYN_DMA_AXI_BLEN16 | SYN_DMA_RD_OSR_NUM_REQS8
+ | SYN_DMA_WR_OSR_NUM_REQS8,
+};
+
+/*
+ * desc_mode
+ * GMAC descriptors mode
+ */
+enum desc_mode {
+ RINGMODE = 0x00000001,
+ CHAINMODE = 0x00000002,
+};
+
+extern void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
+extern void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd);
+extern void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd);
+extern void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd);
+extern void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd);
+extern void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd);
+extern void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd);
+extern void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd);
+extern uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd);
+extern uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd);
+
+extern void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma);
+extern void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma);
+
+#endif /*__SYN_REG_H__*/
diff --git a/gmac_hal_ops/syn/syn_dev.h b/hal/gmac_hal_ops/syn/xgmac/syn_dev.h
similarity index 97%
rename from gmac_hal_ops/syn/syn_dev.h
rename to hal/gmac_hal_ops/syn/xgmac/syn_dev.h
index 6301a95..bdccd09 100644
--- a/gmac_hal_ops/syn/syn_dev.h
+++ b/hal/gmac_hal_ops/syn/xgmac/syn_dev.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
diff --git a/gmac_hal_ops/syn/syn_if.c b/hal/gmac_hal_ops/syn/xgmac/syn_if.c
similarity index 97%
rename from gmac_hal_ops/syn/syn_if.c
rename to hal/gmac_hal_ops/syn/xgmac/syn_if.c
index 222e9f3..1ab621a 100644
--- a/gmac_hal_ops/syn/syn_if.c
+++ b/hal/gmac_hal_ops/syn/xgmac/syn_if.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -94,7 +94,6 @@
#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats)
#define SYN_PRIV_FLAGS_LEN ARRAY_SIZE(syn_strings_priv_flags)
-
/*
* syn_rx_flow_control()
*/
@@ -312,7 +311,7 @@
case ETH_SS_STATS:
for (i = 0; i < SYN_STATS_LEN; i++) {
memcpy(data, syn_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
+ strlen(syn_gstrings_stats[i].stat_string));
data += ETH_GSTRING_LEN;
}
break;
@@ -320,7 +319,7 @@
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < SYN_PRIV_FLAGS_LEN; i++) {
memcpy(data, syn_strings_priv_flags[i],
- ETH_GSTRING_LEN);
+ strlen(syn_strings_priv_flags[i]));
data += ETH_GSTRING_LEN;
}
@@ -357,7 +356,7 @@
return -1;
netdev_dbg(nghd->netdev,
- "%s: mac_base:0x%p tx_enable:0x%x rx_enable:0x%x\n",
+ "%s: mac_base:0x%px tx_enable:0x%x rx_enable:0x%x\n",
__func__,
nghd->mac_base,
hal_read_reg(nghd->mac_base,
@@ -378,7 +377,7 @@
syn_tx_disable(nghd);
syn_rx_disable(nghd);
- netdev_dbg(nghd->netdev, "%s: Stopping mac_base:0x%p\n", __func__,
+ netdev_dbg(nghd->netdev, "%s: Stopping mac_base:0x%px\n", __func__,
nghd->mac_base);
return 0;
@@ -432,7 +431,7 @@
spin_lock_init(&shd->nghd.slock);
- netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%p\n",
+ netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n",
gmacpdata->reg_len,
ndev->base_addr,
shd->nghd.mac_base);
diff --git a/gmac_hal_ops/syn/syn_reg.h b/hal/gmac_hal_ops/syn/xgmac/syn_reg.h
similarity index 98%
rename from gmac_hal_ops/syn/syn_reg.h
rename to hal/gmac_hal_ops/syn/xgmac/syn_reg.h
index 9a446a9..f76fce1 100644
--- a/gmac_hal_ops/syn/syn_reg.h
+++ b/hal/gmac_hal_ops/syn/xgmac/syn_reg.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
diff --git a/include/edma.h b/hal/include/edma.h
similarity index 87%
rename from include/edma.h
rename to hal/include/edma.h
index 606a98b..9ed0c38 100644
--- a/include/edma.h
+++ b/hal/include/edma.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016, 2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -26,7 +26,6 @@
extern int edma_init(void);
extern void edma_cleanup(bool is_dp_override);
-extern int32_t edma_register_netdevice(struct net_device *netdev,
- uint32_t macid);
+extern struct nss_dp_data_plane_ops nss_dp_edma_ops;
#endif /*__NSS_DP_EDMA__ */
diff --git a/hal/include/nss_dp_hal.h b/hal/include/nss_dp_hal.h
new file mode 100644
index 0000000..89cdb1a
--- /dev/null
+++ b/hal/include/nss_dp_hal.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __NSS_DP_HAL_H__
+#define __NSS_DP_HAL_H__
+
+#include "nss_dp_dev.h"
+
+/*
+ * nss_dp_hal_get_gmac_ops()
+ * Returns gmac hal ops based on the GMAC type.
+ */
+static inline struct nss_gmac_hal_ops *nss_dp_hal_get_gmac_ops(uint32_t gmac_type)
+{
+ return dp_global_ctx.gmac_hal_ops[gmac_type];
+}
+
+/*
+ * nss_dp_hal_set_gmac_ops()
+ * Sets dp global gmac hal ops based on the GMAC type.
+ */
+static inline void nss_dp_hal_set_gmac_ops(struct nss_gmac_hal_ops *hal_ops, uint32_t gmac_type)
+{
+ dp_global_ctx.gmac_hal_ops[gmac_type] = hal_ops;
+}
+
+/*
+ * HAL functions implemented by SoC specific source files.
+ */
+extern bool nss_dp_hal_init(void);
+extern void nss_dp_hal_cleanup(void);
+extern void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv);
+extern struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void);
+
+#endif /* __NSS_DP_HAL_H__ */
diff --git a/include/nss_dp_hal_if.h b/hal/include/nss_dp_hal_if.h
similarity index 91%
rename from include/nss_dp_hal_if.h
rename to hal/include/nss_dp_hal_if.h
index 42f9ea8..68fc2da 100644
--- a/include/nss_dp_hal_if.h
+++ b/hal/include/nss_dp_hal_if.h
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -24,8 +24,9 @@
#include <uapi/linux/if_link.h>
enum gmac_device_type {
- GMAC_HAL_TYPE_QCOM = 0,
- GMAC_HAL_TYPE_10G,
+ GMAC_HAL_TYPE_QCOM = 0, /* 1G GMAC type */
+ GMAC_HAL_TYPE_SYN_XGMAC,/* Synopsys XGMAC type */
+ GMAC_HAL_TYPE_SYN_GMAC, /* Synopsys 1G GMAC type */
GMAC_HAL_TYPE_MAX
};
@@ -35,7 +36,7 @@
struct gmac_hal_platform_data {
struct net_device *netdev; /* Net device */
uint32_t reg_len; /* Register space length */
- uint32_t mactype; /* Mac chip type */
+ uint32_t mactype; /* MAC chip type */
uint32_t macid; /* MAC sequence id on the Chip */
};
@@ -43,8 +44,8 @@
* NSS GMAC HAL device data
*/
struct nss_gmac_hal_dev {
- void __iomem *mac_base; /* base address of MAC registers */
- uint32_t version; /* Gmac Revision version */
+ void __iomem *mac_base; /* Base address of MAC registers */
+ uint32_t version; /* GMAC Revision version */
uint32_t drv_flags; /* Driver specific feature flags */
/*
@@ -58,7 +59,7 @@
struct net_device *netdev;
struct resource *memres;
- uint32_t mac_reg_len; /* Mac Register block length */
+ uint32_t mac_reg_len; /* MAC Register block length */
uint32_t mac_id; /* MAC sequence id on the Chip */
spinlock_t slock; /* lock to protect concurrent reg access */
};
diff --git a/hal/syn_gmac_dp/syn_data_plane.c b/hal/syn_gmac_dp/syn_data_plane.c
new file mode 100644
index 0000000..0b9bdce
--- /dev/null
+++ b/hal/syn_gmac_dp/syn_data_plane.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "syn_data_plane.h"
+#include "syn_reg.h"
+
+#define SYN_DP_NAPI_BUDGET 64
+
+/*
+ * GMAC Ring info
+ */
+struct syn_dp_info dp_info[NSS_DP_HAL_MAX_PORTS];
+
+/*
+ * syn_dp_napi_poll()
+ * Scheduled by napi to process RX and TX complete
+ */
+static int syn_dp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct nss_dp_dev *gmac_dev = container_of(napi, struct nss_dp_dev, napi);
+ struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
+ int work_done;
+
+ /*
+ * Update GMAC stats
+ */
+ spin_lock_bh(&dp_info->stats_lock);
+ dp_info->stats.stats.rx_missed += syn_get_rx_missed(gmac_dev->gmac_hal_ctx);
+ dp_info->stats.stats.rx_missed += syn_get_fifo_overflows(gmac_dev->gmac_hal_ctx);
+ spin_unlock_bh(&dp_info->stats_lock);
+
+ syn_dp_process_tx_complete(gmac_dev, dev_info);
+ work_done = syn_dp_rx(gmac_dev, dev_info, budget);
+ syn_dp_rx_refill(gmac_dev, dev_info);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ syn_enable_dma_interrupt(gmac_dev->gmac_hal_ctx);
+ }
+
+ return work_done;
+}
+
+/*
+ * syn_dp_handle_irq()
+ * Process IRQ and schedule napi
+ */
+static irqreturn_t syn_dp_handle_irq(int irq, void *ctx)
+{
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)ctx;
+ struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
+
+ syn_clear_dma_status(nghd);
+ syn_disable_dma_interrupt(nghd);
+
+ /*
+ * Schedule NAPI
+ */
+ napi_schedule(&gmac_dev->napi);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * syn_dp_if_init()
+ * Initialize the GMAC data plane operations
+ */
+static int syn_dp_if_init(struct nss_dp_data_plane_ctx *dpc)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ uint32_t macid = gmac_dev->macid;
+ struct syn_dp_info *dev_info = &dp_info[macid - 1];
+ struct device *dev = &gmac_dev->pdev->dev;
+ int err;
+
+ if (!netdev) {
+ netdev_dbg(netdev, "nss_dp_gmac: Invalid netdev pointer %px\n", netdev);
+ return NSS_DP_FAILURE;
+ }
+
+ netdev_info(netdev, "nss_dp_gmac: Registering netdev %s(qcom-id:%d) with GMAC\n", netdev->name, macid);
+
+ if (!dev_info->napi_added) {
+ netif_napi_add(netdev, &gmac_dev->napi, syn_dp_napi_poll, SYN_DP_NAPI_BUDGET);
+
+ /*
+ * Requesting irq
+ */
+ netdev->irq = platform_get_irq(gmac_dev->pdev, 0);
+ err = request_irq(netdev->irq, syn_dp_handle_irq, 0, "nss-dp-gmac", gmac_dev);
+ if (err) {
+ netdev_dbg(netdev, "err_code:%d, Mac %d IRQ %d request failed\n", err,
+ gmac_dev->macid, netdev->irq);
+ return NSS_DP_FAILURE;
+ }
+
+ gmac_dev->drv_flags |= NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
+ dev_info->napi_added = 1;
+ }
+
+ /*
+ * Forcing the kernel to use 32-bit DMA addressing
+ */
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ /*
+ * Initialize the Tx/Rx ring
+ */
+ if (syn_dp_setup_rings(gmac_dev, netdev, dev, dev_info)) {
+ netdev_dbg(netdev, "nss_dp_gmac: Error initializing GMAC rings %px\n", netdev);
+ return NSS_DP_FAILURE;
+ }
+
+ spin_lock_init(&dev_info->data_lock);
+ spin_lock_init(&dev_info->stats_lock);
+
+ netdev_dbg(netdev,"Synopsys GMAC dataplane initialized\n");
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_open()
+ * Open the GMAC data plane operations
+ */
+static int syn_dp_if_open(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring,
+ uint32_t rx_desc_ring, uint32_t mode)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
+
+ syn_enable_dma_rx(nghd);
+ syn_enable_dma_tx(nghd);
+
+ napi_enable(&gmac_dev->napi);
+ syn_enable_dma_interrupt(nghd);
+
+ netdev_dbg(netdev, "Synopsys GMAC dataplane opened\n");
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_close()
+ * Close the GMAC data plane operations
+ */
+static int syn_dp_if_close(struct nss_dp_data_plane_ctx *dpc)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
+
+ syn_disable_dma_rx(nghd);
+ syn_disable_dma_tx(nghd);
+
+ syn_disable_dma_interrupt(nghd);
+ napi_disable(&gmac_dev->napi);
+
+ netdev_dbg(netdev, "Synopsys GMAC dataplane closed\n");
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_link_state()
+ * Change of link for the dataplane
+ */
+static int syn_dp_if_link_state(struct nss_dp_data_plane_ctx *dpc, uint32_t link_state)
+{
+ struct net_device *netdev = dpc->dev;
+
+ /*
+ * Switch interrupt based on the link state
+ */
+ if (link_state) {
+ netdev_dbg(netdev, "Data plane link up\n");
+ } else {
+ netdev_dbg(netdev, "Data plane link down\n");
+ }
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_mac_addr()
+ */
+static int syn_dp_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr)
+{
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_change_mtu()
+ */
+static int syn_dp_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu)
+{
+ /*
+ * TODO: Work on MTU fix along with register update for frame length
+ */
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_set_features()
+ * Set the supported net_device features
+ */
+static void syn_dp_if_set_features(struct nss_dp_data_plane_ctx *dpc)
+{
+ struct net_device *netdev = dpc->dev;
+
+ netdev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ netdev->wanted_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+}
+
+/*
+ * syn_dp_if_xmit()
+ * Dataplane method to transmit the packet
+ */
+static netdev_tx_t syn_dp_if_xmit(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * Most likely, it is not a fragmented pkt, optimize for that
+ */
+ if (likely(nfrags == 0)) {
+ if (syn_dp_tx(gmac_dev, dev_info, skb)) {
+ goto drop;
+ }
+
+ return NETDEV_TX_OK;
+ }
+
+drop:
+ dev_kfree_skb_any(skb);
+ dev_info->stats.stats.tx_dropped++;
+
+ return NETDEV_TX_BUSY;
+}
+
+/*
+ * syn_dp_if_pause_on_off()
+ */
+static int syn_dp_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc, uint32_t pause_on)
+{
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_if_get_stats
+ * Get Synopsys GMAC data plane stats
+ */
+static void syn_dp_if_get_stats(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
+
+ spin_lock_bh(&dev_info->stats_lock);
+ netdev_dbg(netdev, "GETTING stats: rx_packets:%llu rx_bytes:%llu mmc_rx_crc_errors:%llu", dev_info->stats.stats.rx_packets,
+ dev_info->stats.stats.rx_bytes, dev_info->stats.stats.mmc_rx_crc_errors);
+ memcpy(stats, &dev_info->stats, sizeof(*stats));
+ spin_unlock_bh(&dev_info->stats_lock);
+}
+
+/*
+ * syn_dp_if_deinit()
+ * Free all the Synopsys GMAC resources
+ */
+static int syn_dp_if_deinit(struct nss_dp_data_plane_ctx *dpc)
+{
+ struct net_device *netdev = dpc->dev;
+ struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev);
+ struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1];
+
+ if (dev_info->napi_added) {
+ /*
+ * Remove interrupt handlers and NAPI
+ */
+ if (gmac_dev->drv_flags & NSS_DP_PRIV_FLAG(IRQ_REQUESTED)) {
+ netdev_dbg(netdev, "Freeing IRQ %d for Mac %d\n", netdev->irq, gmac_dev->macid);
+ synchronize_irq(netdev->irq);
+ free_irq(netdev->irq, gmac_dev);
+ gmac_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(IRQ_REQUESTED);
+ }
+
+ netif_napi_del(&gmac_dev->napi);
+ dev_info->napi_added = 0;
+ }
+
+ /*
+ * Cleanup and free the rings
+ */
+ syn_dp_cleanup_rings(gmac_dev, netdev, dev_info);
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * nss_dp_gmac_ops
+ * Data plane operations for Synopsys GMAC
+ */
+struct nss_dp_data_plane_ops nss_dp_gmac_ops = {
+ .init = syn_dp_if_init,
+ .open = syn_dp_if_open,
+ .close = syn_dp_if_close,
+ .link_state = syn_dp_if_link_state,
+ .mac_addr = syn_dp_if_mac_addr,
+ .change_mtu = syn_dp_if_change_mtu,
+ .xmit = syn_dp_if_xmit,
+ .set_features = syn_dp_if_set_features,
+ .pause_on_off = syn_dp_if_pause_on_off,
+ .get_stats = syn_dp_if_get_stats,
+ .deinit = syn_dp_if_deinit,
+};
diff --git a/hal/syn_gmac_dp/syn_data_plane.h b/hal/syn_gmac_dp/syn_data_plane.h
new file mode 100644
index 0000000..c963095
--- /dev/null
+++ b/hal/syn_gmac_dp/syn_data_plane.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef __NSS_DP_SYN_DATAPLANE__
+#define __NSS_DP_SYN_DATAPLANE__
+
+#include "nss_dp_dev.h"
+#include "syn_dma_desc.h"
+
+#define SYN_DP_TX_DESC_SIZE 128 /* Tx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_RX_DESC_SIZE 128 /* Rx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_MINI_JUMBO_FRAME_MTU 1978
+#define SYN_DP_MAX_DESC_BUFF 0x1FFF /* Max size of buffer that can be programed into one field of desc */
+
+/*
+ * syn_dp_info
+ * Synopysys GMAC Dataplane information
+ */
+struct syn_dp_info {
+ struct nss_dp_gmac_stats stats; /* GMAC driver stats */
+
+ struct sk_buff *rx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Rx skb pool helping RX DMA descriptors*/
+
+ dma_addr_t rx_desc_dma; /* Dma-albe address of first rx descriptor
+ either in ring or chain mode, this is
+ used by the GMAC device */
+
+ struct dma_desc *rx_desc; /* start address of RX descriptors ring or
+ chain, this is used by the driver */
+
+ uint32_t busy_rx_desc; /* Number of Rx Descriptors owned by
+ DMA at any given time */
+
+ uint32_t rx_desc_count; /* number of rx descriptors in the
+ tx descriptor queue/pool */
+
+ uint32_t rx_busy; /* index of the rx descriptor owned by DMA,
+ obtained by nss_gmac_get_rx_qptr() */
+
+ uint32_t rx_next; /* index of the rx descriptor next available
+ with driver, given to DMA by
+ nss_gmac_set_rx_qptr()*/
+
+ struct dma_desc *rx_busy_desc; /* Rx Descriptor address corresponding
+ to the index tx_busy */
+
+ struct dma_desc *rx_next_desc; /* Rx Descriptor address corresponding
+ to the index rx_next */
+
+ struct sk_buff *tx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Tx skb pool helping RX DMA descriptors*/
+
+ dma_addr_t tx_desc_dma; /* Dma-able address of first tx descriptor
+ either in ring or chain mode, this is used
+ by the GMAC device */
+
+ struct dma_desc *tx_desc; /* start address of TX descriptors ring or
+ chain, this is used by the driver */
+
+ uint32_t busy_tx_desc; /* Number of Tx Descriptors owned by
+ DMA at any given time */
+
+ uint32_t tx_desc_count; /* number of tx descriptors in the
+ rx descriptor queue/pool */
+
+ uint32_t tx_busy; /* index of the tx descriptor owned by DMA,
+ is obtained by nss_gmac_get_tx_qptr() */
+
+ uint32_t tx_next; /* index of the tx descriptor next available
+ with driver, given to DMA by
+ nss_gmac_set_tx_qptr() */
+
+ struct dma_desc *tx_busy_desc; /* Tx Descriptor address corresponding
+ to the index tx_busy */
+
+ struct dma_desc *tx_next_desc; /* Tx Descriptor address corresponding
+ to the index tx_next */
+
+ spinlock_t data_lock; /* Lock to protect datapath */
+ spinlock_t stats_lock; /* Lock to protect datapath */
+ int napi_added; /* flag to indicate napi add status */
+};
+
+/*
+ * GMAC Tx/Tx APIs
+ */
+int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info);
+int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info);
+
+int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget);
+void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
+
+int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb);
+void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info);
+
+#endif /* __NSS_DP_SYN_DATAPLANE__ */
\ No newline at end of file
diff --git a/hal/syn_gmac_dp/syn_dma_desc.h b/hal/syn_gmac_dp/syn_dma_desc.h
new file mode 100644
index 0000000..5b50d38
--- /dev/null
+++ b/hal/syn_gmac_dp/syn_dma_desc.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __SYN_DESC__
+#define __SYN_DESC__
+
+/**********************************************************
+ * DMA Engine descriptors
+ **********************************************************/
+/*
+******Enhanced Descritpor structure to support 8K buffer per buffer *******
+
+dma_rx_base_addr = 0x000C, CSR3 - Receive Descriptor list base address
+dma_rx_base_addr is the pointer to the first Rx Descriptors.
+The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
+
+Similarly
+dma_tx_base_addr = 0x0010, CSR4 - Transmit Descriptor list base address
+dma_tx_base_addr is the pointer to the first Tx Descriptors.
+The Descriptor format in Little endian with a 32 bit Data bus is as shown below.
+ -------------------------------------------------------------------------
+ RDES0 |OWN (31)| Status |
+ -------------------------------------------------------------------------
+ RDES1 | Ctrl | Res | Byte Count Buffer 2 | Ctrl | Res | Byte Count Buffer 1 |
+ -------------------------------------------------------------------------
+ RDES2 | Buffer 1 Address |
+ -------------------------------------------------------------------------
+ RDES3 | Buffer 2 Address / Next Descriptor Address |
+ -------------------------------------------------------------------------
+ RDES4 | Extended Status |
+ -------------------------------------------------------------------------
+ RDES5 | Reserved |
+ -------------------------------------------------------------------------
+ RDES6 | Receive Timestamp Low |
+ -------------------------------------------------------------------------
+ RDES7 | Receive Timestamp High |
+ -------------------------------------------------------------------------
+
+ ------------------------------------------------------------------------
+ TDES0 |OWN (31)| Ctrl | Res | Ctrl | Res | Status |
+ ------------------------------------------------------------------------
+ TDES1 | Res | Byte Count Buffer 2 | Res | Byte Count Buffer 1 |
+ ------------------------------------------------------------------------
+ TDES2 | Buffer 1 Address |
+ ------------------------------------------------------------------------
+ TDES3 | Buffer 2 Address / Next Descriptor Address |
+ ------------------------------------------------------------------------
+ TDES4 | Reserved |
+ ------------------------------------------------------------------------
+ TDES5 | Reserved |
+ ------------------------------------------------------------------------
+ TDES6 | Transmit Timestamp Low |
+ ------------------------------------------------------------------------
+ TDES7 | Transmit Timestamp Higher |
+ ------------------------------------------------------------------------
+*/
+
+/*
+ * dma_descriptor_status
+ * status word of DMA descriptor
+ */
+enum dma_descriptor_status {
+ desc_own_by_dma = 0x80000000, /* (OWN)Descriptor is
+ owned by DMA engine */
+ desc_rx_da_filter_fail = 0x40000000, /* (AFM)Rx - DA Filter
+ Fail for the rx frame */
+ desc_rx_frame_length_mask = 0x3FFF0000, /* (FL)Receive descriptor
+ frame length */
+ desc_rx_frame_length_shift = 16,
+ desc_rx_error = 0x00008000, /* (ES)Error summary bit
+ - OR of the following bits:
+ DE || OE || IPC || GF || LC || RWT
+ || RE || CE */
+ desc_rx_truncated = 0x00004000, /* (DE)Rx - no more descriptors
+ for receive frame */
+ desc_sa_filter_fail = 0x00002000, /* (SAF)Rx - SA Filter Fail for
+ the received frame */
+ desc_rx_length_error = 0x00001000, /* (LE)Rx - frm size not
+ matching with len field */
+ desc_rx_overflow = 0x00000800, /* (OE)Rx - frm was damaged due
+ to buffer overflow */
+ desc_rx_vlan_tag = 0x00000400, /* (VLAN)Rx - received frame
+ is a VLAN frame */
+ desc_rx_first = 0x00000200, /* (FS)Rx - first
+ descriptor of the frame */
+ desc_rx_last = 0x00000100, /* (LS)Rx - last
+ descriptor of the frame */
+ desc_rx_long_frame = 0x00000080, /* (Giant Frame)Rx - frame is
+ longer than 1518/1522 */
+ desc_rx_collision = 0x00000040, /* (LC)Rx - late collision
+ occurred during reception */
+ desc_rx_frame_ether = 0x00000020, /* (FT)Rx - Frame type - Ether,
+ otherwise 802.3 */
+ desc_rx_watchdog = 0x00000010, /* (RWT)Rx - watchdog timer
+ expired during reception */
+ desc_rx_mii_error = 0x00000008, /* (RE)Rx - error reported
+ by MII interface */
+ desc_rx_dribbling = 0x00000004, /* (DE)Rx - frame contains non
+ int multiple of 8 bits */
+ desc_rx_crc = 0x00000002, /* (CE)Rx - CRC error */
+ desc_rx_ext_sts = 0x00000001, /* Extended Status Available
+ in RDES4 */
+ desc_tx_error = 0x00008000, /* (ES)Error summary Bits */
+ desc_tx_int_enable = 0x40000000, /* (IC)Tx - interrupt on
+ completion */
+ desc_tx_last = 0x20000000, /* (LS)Tx - Last segment of the
+ frame */
+ desc_tx_first = 0x10000000, /* (FS)Tx - First segment of the
+ frame */
+ desc_tx_disable_crc = 0x08000000, /* (DC)Tx - Add CRC disabled
+ (first segment only) */
+ desc_tx_disable_padd = 0x04000000, /* (DP)disable padding,
+ added by - reyaz */
+ desc_tx_cis_mask = 0x00c00000, /* Tx checksum offloading
+ control mask */
+ desc_tx_cis_bypass = 0x00000000, /* Checksum bypass */
+ desc_tx_cis_ipv4_hdr_cs = 0x00400000, /* IPv4 header checksum */
+ desc_tx_cis_tcp_only_cs = 0x00800000, /* TCP/UDP/ICMP checksum.
+ Pseudo header checksum
+ is assumed to be present */
+ desc_tx_cis_tcp_pseudo_cs = 0x00c00000, /* TCP/UDP/ICMP checksum fully
+ in hardware including
+ pseudo header */
+ desc_tx_desc_end_of_ring = 0x00200000, /* (TER)End of descriptor ring*/
+ desc_tx_desc_chain = 0x00100000, /* (TCH)Second buffer address
+ is chain address */
+ desc_rx_chk_bit0 = 0x00000001, /* Rx Payload Checksum Error */
+ desc_rx_chk_bit7 = 0x00000080, /* (IPC CS ERROR)Rx - Ipv4
+ header checksum error */
+ desc_rx_chk_bit5 = 0x00000020, /* (FT)Rx - Frame type - Ether,
+ otherwise 802.3 */
+ desc_rx_ts_avail = 0x00000080, /* Time stamp available */
+ desc_rx_frame_type = 0x00000020, /* (FT)Rx - Frame type - Ether,
+ otherwise 802.3 */
+ desc_tx_ipv4_chk_error = 0x00010000, /* (IHE) Tx Ip header error */
+ desc_tx_timeout = 0x00004000, /* (JT)Tx - Transmit
+ jabber timeout */
+ desc_tx_frame_flushed = 0x00002000, /* (FF)Tx - DMA/MTL flushed
+ the frame due to SW flush */
+ desc_tx_pay_chk_error = 0x00001000, /* (PCE) Tx Payload checksum
+ Error */
+ desc_tx_lost_carrier = 0x00000800, /* (LC)Tx - carrier lost
+ during tramsmission */
+ desc_tx_no_carrier = 0x00000400, /* (NC)Tx - no carrier signal
+ from the tranceiver */
+ desc_tx_late_collision = 0x00000200, /* (LC)Tx - transmission aborted
+ due to collision */
+ desc_tx_exc_collisions = 0x00000100, /* (EC)Tx - transmission aborted
+ after 16 collisions */
+ desc_tx_vlan_frame = 0x00000080, /* (VF)Tx - VLAN-type frame */
+ desc_tx_coll_mask = 0x00000078, /* (CC)Tx - Collision count */
+ desc_tx_coll_shift = 3,
+ desc_tx_exc_deferral = 0x00000004, /* (ED)Tx - excessive deferral */
+ desc_tx_underflow = 0x00000002, /* (UF)Tx - late data arrival
+ from the memory */
+ desc_tx_deferred = 0x00000001, /* (DB)Tx - frame
+ transmision deferred */
+
+ /*
+ * This explains the RDES1/TDES1 bits layout
+ * ------------------------------------------------------
+ * RDES1/TDES1 | Control Bits | Byte Count Buf 2 | Byte Count Buf 1 |
+ * ------------------------------------------------------
+ */
+
+ /* dma_descriptor_length */ /* length word of DMA descriptor */
+ desc_rx_dis_int_compl = 0x80000000, /* (Disable Rx int on completion) */
+ desc_rx_desc_end_of_ring = 0x00008000, /* (RER)End of descriptor ring */
+ desc_rx_desc_chain = 0x00004000, /* (RCH)Second buffer address
+ is chain address */
+ desc_size2_mask = 0x1FFF0000, /* (RBS2/TBS2) Buffer 2 size */
+ desc_size2_shift = 16,
+ desc_size1_mask = 0x00001FFF, /* (RBS1/TBS1) Buffer 1 size */
+ desc_size1_shift = 0,
+
+ /*
+ * This explains the RDES4 Extended Status bits layout
+ * --------------------------------------------------------
+ * RDES4 | Extended Status |
+ * --------------------------------------------------------
+ */
+ desc_rx_ts_dropped = 0x00004000, /* PTP snapshot available */
+ desc_rx_ptp_ver = 0x00002000, /* When set indicates IEEE1584
+ Version 2 (else Ver1) */
+ desc_rx_ptp_frame_type = 0x00001000, /* PTP frame type Indicates PTP
+ sent over ethernet */
+ desc_rx_ptp_message_type = 0x00000F00, /* Message Type */
+ desc_rx_ptp_no = 0x00000000, /* 0000 => No PTP message rcvd */
+ desc_rx_ptp_sync = 0x00000100, /* 0001 => Sync (all clock
+ types) received */
+ desc_rx_ptp_follow_up = 0x00000200, /* 0010 => Follow_Up (all clock
+ types) received */
+ desc_rx_ptp_delay_req = 0x00000300, /* 0011 => Delay_Req (all clock
+ types) received */
+ desc_rx_ptp_delay_resp = 0x00000400, /* 0100 => Delay_Resp (all clock
+ types) received */
+ desc_rx_ptp_pdelay_req = 0x00000500, /* 0101 => Pdelay_Req (in P
+ to P tras clk) or Announce
+ in Ord and Bound clk */
+ desc_rx_ptp_pdelay_resp = 0x00000600, /* 0110 => Pdealy_Resp(in P to
+ P trans clk) or Management in
+ Ord and Bound clk */
+ desc_rx_ptp_pdelay_resp_fp = 0x00000700,/* 0111 => Pdelay_Resp_Follow_Up
+ (in P to P trans clk) or
+ Signaling in Ord and Bound
+ clk */
+ desc_rx_ptp_ipv6 = 0x00000080, /* Received Packet is in IPV6 */
+ desc_rx_ptp_ipv4 = 0x00000040, /* Received Packet is in IPV4 */
+ desc_rx_chk_sum_bypass = 0x00000020, /* When set indicates checksum
+ offload engine is bypassed */
+ desc_rx_ip_payload_error = 0x00000010, /* When set indicates 16bit IP
+ payload CS is in error */
+ desc_rx_ip_header_error = 0x00000008, /* When set indicates 16bit IPV4
+ hdr CS is err or IP datagram
+ version is not consistent
+ with Ethernet type value */
+ desc_rx_ip_payload_type = 0x00000007, /* Indicate the type of payload
+ encapsulated in IPdatagram
+ processed by COE (Rx) */
+ desc_rx_ip_payload_unknown = 0x00000000,/* Unknown or didnot process
+ IP payload */
+ desc_rx_ip_payload_udp = 0x00000001, /* UDP */
+ desc_rx_ip_payload_tcp = 0x00000002, /* TCP */
+ desc_rx_ip_payload_icmp = 0x00000003, /* ICMP */
+};
+
+/*
+ * dma_desc
+ * DMA Descriptor Structure
+ *
+ * The structure is common for both receive and transmit descriptors.
+ */
+struct dma_desc {
+ uint32_t status; /* Status */
+ uint32_t length; /* Buffer 1 and Buffer 2 length */
+ uint32_t buffer1; /* Network Buffer 1 pointer (DMA-able)*/
+ uint32_t data1; /* This holds virtual address of
+ buffer1, not used by DMA */
+
+ /* This data below is used only by driver */
+ uint32_t extstatus; /* Extended status of a Rx Descriptor */
+ uint32_t reserved1; /* Reserved word */
+ uint32_t timestamplow; /* Lower 32 bits of the 64
+ bit timestamp value */
+ uint32_t timestamphigh; /* Higher 32 bits of the 64
+ bit timestamp value */
+};
+
+/*
+ * syn_dp_gmac_tx_checksum_offload_tcp_pseudo
+ * The checksum offload engine is enabled to do complete checksum computation.
+ */
+static inline void syn_dp_gmac_tx_checksum_offload_tcp_pseudo(struct dma_desc *desc)
+{
+ desc->status = ((desc->status & (~desc_tx_cis_mask)) | desc_tx_cis_tcp_pseudo_cs);
+}
+
+/*
+ * syn_dp_gmac_tx_desc_init_ring
+ * Initialize the tx descriptors for ring or chain mode operation.
+ */
+static inline void syn_dp_gmac_tx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
+{
+ struct dma_desc *last_desc = desc + no_of_desc - 1;
+ memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
+ last_desc->status = desc_tx_desc_end_of_ring;
+}
+
+/*
+ * syn_dp_gmac_rx_desc_init_ring
+ * Initialize the rx descriptors for ring or chain mode operation.
+ */
+static inline void syn_dp_gmac_rx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc)
+{
+ struct dma_desc *last_desc = desc + no_of_desc - 1;
+ memset(desc, 0, no_of_desc * sizeof(struct dma_desc));
+ last_desc->length = desc_rx_desc_end_of_ring;
+}
+
+/*
+ * syn_dp_gmac_is_rx_desc_valid
+ * Checks whether the rx descriptor is valid.
+ */
+static inline bool syn_dp_gmac_is_rx_desc_valid(uint32_t status)
+{
+ return (status & (desc_rx_error | desc_rx_first | desc_rx_last)) ==
+ (desc_rx_first | desc_rx_last);
+}
+
+/*
+ * syn_dp_gmac_get_rx_desc_frame_length
+ * Returns the byte length of received frame including CRC.
+ */
+static inline uint32_t syn_dp_gmac_get_rx_desc_frame_length(uint32_t status)
+{
+ return (status & desc_rx_frame_length_mask) >> desc_rx_frame_length_shift;
+}
+
+/*
+ * syn_dp_gmac_is_desc_owned_by_dma
+ * Checks whether the descriptor is owned by DMA.
+ */
+static inline bool syn_dp_gmac_is_desc_owned_by_dma(struct dma_desc *desc)
+{
+ return (desc->status & desc_own_by_dma) == desc_own_by_dma;
+}
+
+/*
+ * syn_dp_gmac_is_desc_empty
+ * Checks whether the descriptor is empty.
+ */
+static inline bool syn_dp_gmac_is_desc_empty(struct dma_desc *desc)
+{
+ /*
+ * If length of both buffer1 & buffer2 are zero then desc is empty
+ */
+ return (desc->length & desc_size1_mask) == 0;
+}
+
+/*
+ * syn_dp_gmac_get_tx_collision_count
+ * Gives the transmission collision count.
+ */
+static inline uint32_t syn_dp_gmac_get_tx_collision_count(uint32_t status)
+{
+ return (status & desc_tx_coll_mask) >> desc_tx_coll_shift;
+}
+
+#endif /* __SYN_DESC__ */
\ No newline at end of file
diff --git a/hal/syn_gmac_dp/syn_dp_cfg.c b/hal/syn_gmac_dp/syn_dp_cfg.c
new file mode 100644
index 0000000..ff18699
--- /dev/null
+++ b/hal/syn_gmac_dp/syn_dp_cfg.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "nss_dp_dev.h"
+#include "syn_data_plane.h"
+#include "syn_reg.h"
+
+/*
+ * syn_dp_setup_rx_desc_queue
+ * This sets up the receive Descriptor queue in ring or chain mode.
+ */
+static int syn_dp_setup_rx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
+ uint32_t no_of_desc, uint32_t desc_mode)
+{
+ struct dma_desc *first_desc = NULL;
+ dma_addr_t dma_addr;
+
+ dev_info->rx_desc_count = 0;
+
+ BUG_ON(desc_mode != RINGMODE);
+ BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
+
+ netdev_dbg(netdev, "total size of memory required for Rx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
+
+ first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
+ if (first_desc == NULL) {
+ netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n");
+ return -ENOMEM;
+ }
+
+ dev_info->rx_desc_count = no_of_desc;
+ dev_info->rx_desc = first_desc;
+ dev_info->rx_desc_dma = dma_addr;
+
+ netdev_dbg(netdev, "Rx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n",
+ no_of_desc, first_desc, (void *)dma_addr);
+
+ syn_dp_gmac_rx_desc_init_ring(dev_info->rx_desc, no_of_desc);
+
+ dev_info->rx_next = 0;
+ dev_info->rx_busy = 0;
+ dev_info->rx_next_desc = first_desc;
+ dev_info->rx_busy_desc = first_desc;
+ dev_info->busy_rx_desc = 0;
+
+ return 0;
+}
+
+/*
+ * syn_dp_setup_tx_desc_queue
+ * This sets up the transmit Descriptor queue in ring or chain mode.
+ */
+static int syn_dp_setup_tx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info,
+ uint32_t no_of_desc, uint32_t desc_mode)
+{
+ struct dma_desc *first_desc = NULL;
+ dma_addr_t dma_addr;
+
+ dev_info->tx_desc_count = 0;
+
+ BUG_ON(desc_mode != RINGMODE);
+ BUG_ON((no_of_desc & (no_of_desc - 1)) != 0);
+
+ netdev_dbg(netdev, "Total size of memory required for Tx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc)));
+
+ first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL);
+ if (first_desc == NULL) {
+ netdev_dbg(netdev, "Error in Tx Descriptors memory allocation\n");
+ return -ENOMEM;
+ }
+
+ dev_info->tx_desc_count = no_of_desc;
+ dev_info->tx_desc = first_desc;
+ dev_info->tx_desc_dma = dma_addr;
+ netdev_dbg(netdev, "Tx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n"
+ , no_of_desc, first_desc, (void *)dma_addr);
+
+ syn_dp_gmac_tx_desc_init_ring(dev_info->tx_desc, dev_info->tx_desc_count);
+
+ dev_info->tx_next = 0;
+ dev_info->tx_busy = 0;
+ dev_info->tx_next_desc = first_desc;
+ dev_info->tx_busy_desc = first_desc;
+ dev_info->busy_tx_desc = 0;
+
+ return 0;
+}
+
+/*
+ * syn_dp_setup_rings
+ * Perform initial setup of Tx/Rx rings
+ */
+int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info)
+{
+ struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
+ int err;
+
+ err = syn_dp_setup_rx_desc_queue(netdev, dev, dev_info, SYN_DP_RX_DESC_SIZE, RINGMODE);
+ if (err) {
+ netdev_dbg(netdev, "nss_dp_gmac: rx descriptor setup unsuccessfull, err code: %d", err);
+ return NSS_DP_FAILURE;
+ }
+
+ err = syn_dp_setup_tx_desc_queue(netdev, dev, dev_info, SYN_DP_TX_DESC_SIZE, RINGMODE);
+ if (err) {
+ netdev_dbg(netdev, "nss_dp_gmac: tx descriptor setup unsuccessfull, err code: %d", err);
+ return NSS_DP_FAILURE;
+ }
+
+ syn_dp_rx_refill(gmac_dev, dev_info);
+
+ syn_init_tx_desc_base(nghd, dev_info->tx_desc_dma);
+ syn_init_rx_desc_base(nghd, dev_info->rx_desc_dma);
+
+ return NSS_DP_SUCCESS;
+}
+
+/*
+ * syn_dp_cleanup_rings
+ * Cleanup Synopsys GMAC rings
+ */
+int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info)
+{
+ uint32_t rx_skb_index;
+ struct dma_desc *rxdesc;
+
+ uint32_t tx_skb_index;
+ struct dma_desc *txdesc;
+ int i;
+ struct sk_buff *skb;
+
+ /*
+ * Rx Ring cleaning
+ * We are assuming that the NAPI poll was already completed.
+ * No need of a lock here since the NAPI and interrupts have been disabled now
+ */
+ rx_skb_index = dev_info->rx_busy;
+ for (i = 0; i < dev_info->busy_rx_desc; i++) {
+ rx_skb_index = rx_skb_index & (dev_info->rx_desc_count - 1);
+ rxdesc = dev_info->rx_busy_desc;
+
+ dma_unmap_single(&(gmac_dev->netdev->dev), rxdesc->buffer1,
+ SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
+
+ skb = dev_info->rx_skb_list[rx_skb_index];
+ if (unlikely(skb != NULL)) {
+ dev_kfree_skb(skb);
+ dev_info->rx_skb_list[rx_skb_index] = NULL;
+ }
+ }
+
+ dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_RX_DESC_SIZE),
+ dev_info->rx_desc, dev_info->rx_desc_dma);
+
+ /*
+ * Tx Ring cleaning
+ */
+ spin_lock_bh(&dev_info->data_lock);
+
+ tx_skb_index = dev_info->tx_busy;
+ for (i = 0; i < dev_info->busy_tx_desc; i++) {
+ tx_skb_index = tx_skb_index & (dev_info->tx_desc_count - 1);
+ txdesc = dev_info->tx_busy_desc;
+
+ dma_unmap_single(&(gmac_dev->netdev->dev), txdesc->buffer1,
+ SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
+
+ skb = dev_info->tx_skb_list[tx_skb_index];
+ if (unlikely(skb != NULL)) {
+ dev_kfree_skb(skb);
+ dev_info->tx_skb_list[tx_skb_index] = NULL;
+ }
+ }
+
+ spin_unlock_bh(&dev_info->data_lock);
+
+ dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_TX_DESC_SIZE),
+ dev_info->tx_desc, dev_info->tx_desc_dma);
+
+ return 0;
+}
\ No newline at end of file
diff --git a/hal/syn_gmac_dp/syn_dp_tx_rx.c b/hal/syn_gmac_dp/syn_dp_tx_rx.c
new file mode 100644
index 0000000..ea01884
--- /dev/null
+++ b/hal/syn_gmac_dp/syn_dp_tx_rx.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+
+#include "syn_data_plane.h"
+#include "syn_reg.h"
+
+/*
+ * syn_dp_reset_rx_qptr
+ * Reset the descriptor after Rx is over.
+ */
+static inline void syn_dp_reset_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
+{
+
+ /* Index of descriptor the DMA just completed.
+ * May be useful when data is spread over multiple buffers/descriptors
+ */
+ uint32_t rxnext = dev_info->rx_busy;
+ struct dma_desc *rxdesc = dev_info->rx_busy_desc;
+
+ BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
+ dev_info->rx_busy = (rxnext + 1) & (dev_info->rx_desc_count - 1);
+ dev_info->rx_busy_desc = dev_info->rx_desc + dev_info->rx_busy;
+
+ dev_info->rx_skb_list[rxnext] = NULL;
+ rxdesc->status = 0;
+ rxdesc->length &= desc_rx_desc_end_of_ring;
+ rxdesc->buffer1 = 0;
+ rxdesc->data1 = 0;
+ rxdesc->reserved1 = 0;
+
+ /*
+ * This returns one descriptor to processor. So busy count will be decremented by one.
+ */
+ dev_info->busy_rx_desc--;
+}
+
+/*
+ * syn_dp_set_rx_qptr
+ * Prepares the descriptor to receive packets.
+ */
+static inline int32_t syn_dp_set_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
+ uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb)
+{
+ uint32_t rxnext = dev_info->rx_next;
+ struct dma_desc *rxdesc = dev_info->rx_next_desc;
+ uint32_t rx_skb_index = rxnext;
+
+ BUG_ON(dev_info->busy_rx_desc >= dev_info->rx_desc_count);
+ BUG_ON(rxdesc != (dev_info->rx_desc + rxnext));
+ BUG_ON(!syn_dp_gmac_is_desc_empty(rxdesc));
+ BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(rxdesc));
+
+ if (Length1 > SYN_DP_MAX_DESC_BUFF) {
+ rxdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
+ rxdesc->length |= ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
+ } else {
+ rxdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
+ }
+
+ rxdesc->buffer1 = Buffer1;
+ dev_info->rx_skb_list[rx_skb_index] = skb;
+
+ /* Program second buffer address if using two buffers. */
+ if (Length1 > SYN_DP_MAX_DESC_BUFF)
+ rxdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
+ else
+ rxdesc->data1 = 0;
+
+ rxdesc->extstatus = 0;
+ rxdesc->timestamplow = 0;
+ rxdesc->timestamphigh = 0;
+
+ /*
+ * Ensure all write completed before setting own by dma bit so when gmac
+ * HW takeover this descriptor, all the fields are filled correctly
+ */
+ wmb();
+ rxdesc->status = desc_own_by_dma;
+
+ dev_info->rx_next = (rxnext + 1) & (dev_info->rx_desc_count - 1);
+ dev_info->rx_next_desc = dev_info->rx_desc + dev_info->rx_next;
+
+ /*
+ * 1 descriptor will be given to HW. So busy count incremented by 1.
+ */
+ dev_info->busy_rx_desc++;
+
+ return rxnext;
+}
+
+/*
+ * syn_dp_rx_refill
+ * Refill the RX descrptor
+ */
+void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
+{
+ struct net_device *netdev = gmac_dev->netdev;
+ struct device *dev = &gmac_dev->pdev->dev;
+ int empty_count = SYN_DP_RX_DESC_SIZE - dev_info->busy_rx_desc;
+
+ dma_addr_t dma_addr;
+ int i;
+ struct sk_buff *skb;
+
+ for (i = 0; i < empty_count; i++) {
+ skb = __netdev_alloc_skb(netdev, SYN_DP_MINI_JUMBO_FRAME_MTU, GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
+ netdev_dbg(netdev, "Unable to allocate skb, will try next time\n");
+ break;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(dev, skb->data, SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ dev_kfree_skb(skb);
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
+ break;
+ }
+
+ syn_dp_set_rx_qptr(gmac_dev, dev_info, dma_addr, SYN_DP_MINI_JUMBO_FRAME_MTU, skb);
+ }
+}
+
+/*
+ * syn_dp_rx()
+ * Process RX packets
+ */
+int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget)
+{
+ struct dma_desc *desc = NULL;
+ int frame_length, busy;
+ uint32_t status;
+ struct sk_buff *rx_skb;
+ uint32_t rx_skb_index;
+
+ if (!dev_info->busy_rx_desc) {
+ /* no desc are held by gmac dma, we are done */
+ return 0;
+ }
+
+ busy = dev_info->busy_rx_desc;
+ if (busy > budget)
+ busy = budget;
+
+ do {
+ desc = dev_info->rx_busy_desc;
+ if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
+ /* desc still hold by gmac dma, so we are done */
+ break;
+ }
+
+ status = desc->status;
+
+ rx_skb_index = dev_info->rx_busy;
+ rx_skb = dev_info->rx_skb_list[rx_skb_index];
+
+ dma_unmap_single(&(gmac_dev->netdev->dev), desc->buffer1,
+ SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE);
+
+ spin_lock_bh(&dev_info->stats_lock);
+ if (likely(syn_dp_gmac_is_rx_desc_valid(status))) {
+ /* We have a pkt to process get the frame length */
+ frame_length = syn_dp_gmac_get_rx_desc_frame_length(status);
+ /* Get rid of FCS: 4 */
+ frame_length -= ETH_FCS_LEN;
+
+ /* Valid packet, collect stats */
+ dev_info->stats.stats.rx_packets++;
+ dev_info->stats.stats.rx_bytes += frame_length;
+
+ /* type_trans and deliver to linux */
+ skb_put(rx_skb, frame_length);
+ rx_skb->protocol = eth_type_trans(rx_skb, gmac_dev->netdev);
+ rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ napi_gro_receive(&gmac_dev->napi, rx_skb);
+
+ } else {
+ dev_info->stats.stats.rx_errors++;
+ dev_kfree_skb(rx_skb);
+
+ if (status & (desc_rx_crc | desc_rx_collision |
+ desc_rx_overflow | desc_rx_dribbling |
+ desc_rx_length_error)) {
+ dev_info->stats.stats.mmc_rx_crc_errors += (status & desc_rx_crc) ? 1 : 0;
+ dev_info->stats.stats.rx_late_collision_errors += (status & desc_rx_collision) ? 1 : 0;
+ dev_info->stats.stats.mmc_rx_overflow_errors += (status & desc_rx_overflow) ? 1 : 0;
+ dev_info->stats.stats.rx_dribble_bit_errors += (status & desc_rx_dribbling) ? 1 : 0;
+ dev_info->stats.stats.rx_length_errors += (status & desc_rx_length_error) ? 1 : 0;
+ }
+ }
+
+ spin_unlock_bh(&dev_info->stats_lock);
+
+ syn_dp_reset_rx_qptr(gmac_dev, dev_info);
+ busy--;
+ } while (busy > 0);
+ return budget - busy;
+}
+
+/*
+ * syn_dp_reset_tx_qptr
+ * Reset the descriptor after Tx is over.
+ */
+static inline void syn_dp_reset_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
+{
+ uint32_t txover = dev_info->tx_busy;
+ struct dma_desc *txdesc = dev_info->tx_busy_desc;
+
+ BUG_ON(txdesc != (dev_info->tx_desc + txover));
+ dev_info->tx_busy = (txover + 1) & (dev_info->tx_desc_count - 1);
+ dev_info->tx_busy_desc = dev_info->tx_desc + dev_info->tx_busy;
+
+ dev_info->tx_skb_list[txover] = NULL;
+ txdesc->status &= desc_tx_desc_end_of_ring;
+ txdesc->length = 0;
+ txdesc->buffer1 = 0;
+ txdesc->data1 = 0;
+ txdesc->reserved1 = 0;
+
+ /*
+ * Busy tx descriptor is reduced by one as
+ * it will be handed over to Processor now.
+ */
+ dev_info->busy_tx_desc--;
+}
+
+/*
+ * syn_dp_set_tx_qptr
+ * Populate the tx desc structure with the buffer address.
+ */
+static inline struct dma_desc *syn_dp_set_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info,
+ uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb, uint32_t offload_needed,
+ uint32_t tx_cntl, uint32_t set_dma)
+{
+ uint32_t txnext = dev_info->tx_next;
+ struct dma_desc *txdesc = dev_info->tx_next_desc;
+ uint32_t tx_skb_index = txnext;
+
+ BUG_ON(dev_info->busy_tx_desc > dev_info->tx_desc_count);
+ BUG_ON(txdesc != (dev_info->tx_desc + txnext));
+ BUG_ON(!syn_dp_gmac_is_desc_empty(txdesc));
+ BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(txdesc));
+
+ if (Length1 > SYN_DP_MAX_DESC_BUFF) {
+ txdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask;
+ txdesc->length |=
+ ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask;
+ } else {
+ txdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask);
+ }
+
+ txdesc->status |= tx_cntl;
+ txdesc->buffer1 = Buffer1;
+
+ dev_info->tx_skb_list[tx_skb_index] = skb;
+
+ /* Program second buffer address if using two buffers. */
+ if (Length1 > SYN_DP_MAX_DESC_BUFF)
+ txdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF;
+ else
+ txdesc->data1 = 0;
+
+ if (likely(offload_needed)) {
+ syn_dp_gmac_tx_checksum_offload_tcp_pseudo(txdesc);
+ }
+
+ /*
+ * Ensure all write completed before setting own by dma bit so when gmac
+ * HW takeover this descriptor, all the fields are filled correctly
+ */
+ wmb();
+ txdesc->status |= set_dma;
+
+ dev_info->tx_next = (txnext + 1) & (dev_info->tx_desc_count - 1);
+ dev_info->tx_next_desc = dev_info->tx_desc + dev_info->tx_next;
+
+ return txdesc;
+}
+
+/*
+ * syn_dp_tx_queue_desc
+ * Queue TX descriptor to the TX ring
+ */
+static void syn_dp_tx_desc_queue(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb, dma_addr_t dma_addr)
+{
+ unsigned int len = skb->len;
+
+ spin_lock_bh(&dev_info->data_lock);
+
+ syn_dp_set_tx_qptr(gmac_dev, dev_info, dma_addr, len, skb, (skb->ip_summed == CHECKSUM_PARTIAL),
+ (desc_tx_last | desc_tx_first | desc_tx_int_enable), desc_own_by_dma);
+ dev_info->busy_tx_desc++;
+
+ spin_unlock_bh(&dev_info->data_lock);
+}
+
+/*
+ * syn_dp_process_tx_complete
+ * Xmit complete, clear descriptor and free the skb
+ */
+void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info)
+{
+ int busy, len;
+ uint32_t status;
+ struct dma_desc *desc = NULL;
+ struct sk_buff *skb;
+ uint32_t tx_skb_index;
+
+ spin_lock_bh(&dev_info->data_lock);
+ busy = dev_info->busy_tx_desc;
+
+ if (!busy) {
+ /* No desc are hold by gmac dma, we are done */
+ spin_unlock_bh(&dev_info->data_lock);
+ return;
+ }
+
+ do {
+ desc = dev_info->tx_busy_desc;
+ if (syn_dp_gmac_is_desc_owned_by_dma(desc)) {
+ /* desc still hold by gmac dma, so we are done */
+ break;
+ }
+
+ len = (desc->length & desc_size1_mask) >> desc_size1_shift;
+ dma_unmap_single(&(gmac_dev->pdev->dev), desc->buffer1, len, DMA_TO_DEVICE);
+
+ status = desc->status;
+ if (status & desc_tx_last) {
+ /* TX is done for this whole skb, we can free it */
+ /* Get the skb from the tx skb pool */
+ tx_skb_index = dev_info->tx_busy;
+ skb = dev_info->tx_skb_list[tx_skb_index];
+
+ BUG_ON(!skb);
+ dev_kfree_skb(skb);
+
+ spin_lock_bh(&dev_info->stats_lock);
+
+ if (unlikely(status & desc_tx_error)) {
+ /* Some error happen, collect statistics */
+ dev_info->stats.stats.tx_errors++;
+ dev_info->stats.stats.tx_jabber_timeout_errors += (status & desc_tx_timeout) ? 1 : 0;
+ dev_info->stats.stats.tx_frame_flushed_errors += (status & desc_tx_frame_flushed) ? 1 : 0;
+ dev_info->stats.stats.tx_loss_of_carrier_errors += (status & desc_tx_lost_carrier) ? 1 : 0;
+ dev_info->stats.stats.tx_no_carrier_errors += (status & desc_tx_no_carrier) ? 1 : 0;
+ dev_info->stats.stats.tx_late_collision_errors += (status & desc_tx_late_collision) ? 1 : 0;
+ dev_info->stats.stats.tx_excessive_collision_errors += (status & desc_tx_exc_collisions) ? 1 : 0;
+ dev_info->stats.stats.tx_excessive_deferral_errors += (status & desc_tx_exc_deferral) ? 1 : 0;
+ dev_info->stats.stats.tx_underflow_errors += (status & desc_tx_underflow) ? 1 : 0;
+ dev_info->stats.stats.tx_ip_header_errors += (status & desc_tx_ipv4_chk_error) ? 1 : 0;
+ dev_info->stats.stats.tx_ip_payload_errors += (status & desc_tx_pay_chk_error) ? 1 : 0;
+ } else {
+ /* No error, recored tx pkts/bytes and
+ * collision
+ */
+ dev_info->stats.stats.tx_packets++;
+ dev_info->stats.stats.tx_collisions += syn_dp_gmac_get_tx_collision_count(status);
+ dev_info->stats.stats.tx_bytes += len;
+ }
+
+ spin_unlock_bh(&dev_info->stats_lock);
+ }
+ syn_dp_reset_tx_qptr(gmac_dev, dev_info);
+ busy--;
+ } while (busy > 0);
+
+ spin_unlock_bh(&dev_info->data_lock);
+}
+
+/*
+ * syn_dp_tx
+ * TX routine for Synopsys GMAC
+ */
+int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb)
+{
+ struct net_device *netdev = gmac_dev->netdev;
+ struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx;
+ unsigned len = skb->len;
+ dma_addr_t dma_addr;
+
+ /*
+ * If we don't have enough tx descriptor for this pkt, return busy.
+ */
+ if ((SYN_DP_TX_DESC_SIZE - dev_info->busy_tx_desc) < 1) {
+ netdev_dbg(netdev, "Not enough descriptors available");
+ return -1;
+ }
+
+ dma_addr = dma_map_single(&gmac_dev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&gmac_dev->pdev->dev, dma_addr))) {
+ netdev_dbg(netdev, "DMA mapping failed for empty buffer\n");
+ return -1;
+ }
+
+ /*
+ * Queue packet to the GMAC rings
+ */
+ syn_dp_tx_desc_queue(gmac_dev, dev_info, skb, dma_addr);
+
+ syn_resume_dma_tx(nghd);
+
+ return 0;
+}
\ No newline at end of file
diff --git a/include/nss_dp_dev.h b/include/nss_dp_dev.h
index 4435d8e..4397773 100644
--- a/include/nss_dp_dev.h
+++ b/include/nss_dp_dev.h
@@ -19,23 +19,17 @@
#ifndef __NSS_DP_DEV_H__
#define __NSS_DP_DEV_H__
+#include <linux/version.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
+#include <linux/switch.h>
#include "nss_dp_api_if.h"
#include "nss_dp_hal_if.h"
-#define NSS_DP_START_PHY_PORT 1
-#if defined(NSS_DP_IPQ60XX)
-#define NSS_DP_MAX_PHY_PORTS 5
-#else
-#define NSS_DP_MAX_PHY_PORTS 6
-#endif
-#define NSS_DP_ETH_HLEN_CRC (ETH_HLEN + ETH_FCS_LEN + 2*(VLAN_HLEN))
-
#define NSS_DP_ACL_DEV_ID 0
struct nss_dp_global_ctx;
@@ -62,17 +56,15 @@
struct net_device *netdev;
struct platform_device *pdev;
-
struct napi_struct napi;
- struct rtnl_link_stats64 stats; /* statistics counters */
struct nss_dp_data_plane_ctx *dpc;
/* context when NSS owns GMACs */
struct nss_dp_data_plane_ops *data_plane_ops;
- /* ops for each data plane*/
+ /* ops for each data plane */
struct nss_dp_global_ctx *ctx; /* Global NSS DP context */
struct nss_gmac_hal_dev *gmac_hal_ctx; /* context of gmac hal */
- struct nss_gmac_hal_ops *gmac_hal_ops; /* GMAC HAL OPS*/
+ struct nss_gmac_hal_ops *gmac_hal_ops; /* GMAC HAL OPS */
/* switchdev related attributes */
#ifdef CONFIG_NET_SWITCHDEV
@@ -85,14 +77,16 @@
* nss data plane global context
*/
struct nss_dp_global_ctx {
- struct nss_dp_dev *nss_dp[NSS_DP_MAX_PHY_PORTS];
+ struct nss_dp_dev *nss_dp[NSS_DP_HAL_MAX_PORTS];
+ struct nss_gmac_hal_ops *gmac_hal_ops[GMAC_HAL_TYPE_MAX];
+ /* GMAC HAL OPS */
bool common_init_done; /* Flag to hold common init state */
uint8_t slowproto_acl_bm; /* Port bitmap to allow slow protocol packets */
};
/* Global data */
extern struct nss_dp_global_ctx dp_global_ctx;
-extern struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_MAX_PHY_PORTS];
+extern struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss data plane link state
@@ -122,8 +116,6 @@
};
#define NSS_DP_PRIV_FLAG(x) (1 << __NSS_DP_PRIV_FLAG_ ## x)
-extern struct nss_dp_data_plane_ops nss_dp_edma_ops;
-
/*
* nss_dp_set_ethtool_ops()
*/
@@ -134,6 +126,7 @@
*/
#ifdef CONFIG_NET_SWITCHDEV
void nss_dp_switchdev_setup(struct net_device *dev);
+bool nss_dp_is_phy_dev(struct net_device *dev);
#endif
#endif /* __NSS_DP_DEV_H__ */
diff --git a/nss_dp_attach.c b/nss_dp_attach.c
index dd34481..94e8f69 100644
--- a/nss_dp_attach.c
+++ b/nss_dp_attach.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -16,8 +16,8 @@
**************************************************************************
*/
-#include "nss_dp_dev.h"
-#include <nss_dp_api_if.h>
+#include <linux/version.h>
+#include "nss_dp_hal.h"
/*
* nss_dp_reset_netdev_features()
@@ -46,7 +46,14 @@
dp_dev->macid, skb->len, skb->ip_summed);
#ifdef CONFIG_NET_SWITCHDEV
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
skb->offload_fwd_mark = netdev->offload_fwd_mark;
+#else
+ /*
+ * TODO: Implement ndo_get_devlink_port()
+ */
+ skb->offload_fwd_mark = 0;
+#endif
#endif
napi_gro_receive(napi, skb);
@@ -96,7 +103,14 @@
/*
* Free up the resources used by the data plane
*/
- dp_dev->data_plane_ops->deinit(dpc);
+ if (dp_dev->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE)) {
+ if (dp_dev->data_plane_ops->deinit(dpc)) {
+ netdev_dbg(netdev, "Data plane init failed\n");
+ return -ENOMEM;
+ }
+
+ dp_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(INIT_DONE);
+ }
/*
* Override the data_plane_ctx, data_plane_ops
@@ -123,7 +137,7 @@
}
if (dp_dev->dpc != dpc) {
- netdev_dbg(netdev, "Cookie %p does not match, reject\n", dpc);
+ netdev_dbg(netdev, "Cookie %px does not match, reject\n", dpc);
return;
}
@@ -148,8 +162,8 @@
nss_dp_reset_netdev_features(netdev);
}
- dp_dev->data_plane_ops = &nss_dp_edma_ops;
- dp_dev->dpc = &dp_global_data_plane_ctx[dp_dev->macid-1];
+ dp_dev->data_plane_ops = nss_dp_hal_get_data_plane_ops();
+ dp_dev->dpc = &dp_global_data_plane_ctx[dp_dev->macid - NSS_DP_START_IFNUM];
/*
* TODO: Re-initialize EDMA dataplane
@@ -158,21 +172,21 @@
EXPORT_SYMBOL(nss_dp_restore_data_plane);
/*
- * nss_dp_get_netdev_by_macid()
+ * nss_dp_get_netdev_by_nss_if_num()
* return the net device of the corrsponding id if exist
*/
-struct net_device *nss_dp_get_netdev_by_macid(int macid)
+struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num)
{
struct nss_dp_dev *dp_dev;
- if (macid > NSS_DP_MAX_PHY_PORTS || macid <= 0) {
- pr_err("Invalid macid %d\n", macid);
+ if ((if_num > NSS_DP_HAL_MAX_PORTS) || (if_num < NSS_DP_START_IFNUM)) {
+ pr_err("Invalid if_num %d\n", if_num);
return NULL;
}
- dp_dev = dp_global_ctx.nss_dp[macid - 1];
+ dp_dev = dp_global_ctx.nss_dp[if_num - NSS_DP_START_IFNUM];
if (!dp_dev)
return NULL;
return dp_dev->netdev;
}
-EXPORT_SYMBOL(nss_dp_get_netdev_by_macid);
+EXPORT_SYMBOL(nss_dp_get_netdev_by_nss_if_num);
diff --git a/nss_dp_ethtools.c b/nss_dp_ethtools.c
index a40dd5c..289bf87 100644
--- a/nss_dp_ethtools.c
+++ b/nss_dp_ethtools.c
@@ -1,6 +1,6 @@
/*
**************************************************************************
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -16,7 +16,10 @@
**************************************************************************
*/
+#include <linux/version.h>
#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/mii.h>
#include "nss_dp_dev.h"
#include "fal/fal_port_ctrl.h"
@@ -53,6 +56,7 @@
data);
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
/*
* nss_dp_get_settings()
*/
@@ -83,6 +87,7 @@
return phy_ethtool_sset(dp_priv->phydev, cmd);
}
+#endif
/*
* nss_dp_get_pauseparam()
@@ -100,10 +105,44 @@
/*
* nss_dp_set_pauseparam()
*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
+
+ /* set flow control settings */
+ dp_priv->pause = 0;
+ if (pause->rx_pause)
+ dp_priv->pause |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ dp_priv->pause |= FLOW_CTRL_TX;
+
+ if (!dp_priv->phydev)
+ return 0;
+
+ /* Update flow control advertisment */
+ dp_priv->phydev->advertising &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (pause->rx_pause)
+ dp_priv->phydev->advertising |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+
+ if (pause->tx_pause)
+ dp_priv->phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ genphy_config_aneg(dp_priv->phydev);
+
+ return 0;
+}
+#else
static int32_t nss_dp_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
/* set flow control settings */
dp_priv->pause = 0;
@@ -117,20 +156,25 @@
return 0;
/* Update flow control advertisment */
- dp_priv->phydev->advertising &=
- ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_copy(advertising, dp_priv->phydev->advertising);
- if (pause->rx_pause)
- dp_priv->phydev->advertising |=
- (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+
+ if (pause->rx_pause) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+ }
if (pause->tx_pause)
- dp_priv->phydev->advertising |= ADVERTISED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising);
+ linkmode_copy(dp_priv->phydev->advertising, advertising);
genphy_config_aneg(dp_priv->phydev);
return 0;
}
+#endif
/*
* nss_dp_fal_to_ethtool_linkmode_xlate()
@@ -311,8 +355,13 @@
.get_sset_count = &nss_dp_get_strset_count,
.get_ethtool_stats = &nss_dp_get_ethtool_stats,
.get_link = ðtool_op_get_link,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.get_settings = &nss_dp_get_settings,
.set_settings = &nss_dp_set_settings,
+#else
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+#endif
.get_pauseparam = &nss_dp_get_pauseparam,
.set_pauseparam = &nss_dp_set_pauseparam,
.get_eee = &nss_dp_get_eee,
diff --git a/nss_dp_main.c b/nss_dp_main.c
index 6e02059..5580b13 100644
--- a/nss_dp_main.c
+++ b/nss_dp_main.c
@@ -24,24 +24,20 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#if defined(NSS_DP_PPE_SUPPORT)
#include <ref/ref_vsi.h>
#endif
#include <net/switchdev.h>
-#include "nss_dp_dev.h"
-#include "edma.h"
-
-/*
- * Number of host CPU cores
- */
-#define NSS_DP_HOST_CPU_NUM 4
+#include "nss_dp_hal.h"
/*
* Number of TX/RX queue supported is based on the number of host CPU
*/
-#define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_HOST_CPU_NUM
-#define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_HOST_CPU_NUM
+#define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
+#define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_HAL_CPU_NUM
/* ipq40xx_mdio_data */
struct ipq40xx_mdio_data {
@@ -52,7 +48,7 @@
/* Global data */
struct nss_dp_global_ctx dp_global_ctx;
-struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_MAX_PHY_PORTS];
+struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS];
/*
* nss_dp_do_ioctl()
@@ -137,6 +133,7 @@
/*
* nss_dp_get_stats64()
*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static struct rtnl_link_stats64 *nss_dp_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -151,6 +148,20 @@
return stats;
}
+#else
+static void nss_dp_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nss_dp_dev *dp_priv;
+
+ if (!netdev)
+ return;
+
+ dp_priv = (struct nss_dp_dev *)netdev_priv(netdev);
+
+ dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats);
+}
+#endif
/*
* nss_dp_xmit()
@@ -199,7 +210,9 @@
}
#endif
- /* Notify data plane to close */
+ /*
+ * Notify data plane to close
+ */
if (dp_priv->data_plane_ops->close(dp_priv->dpc)) {
netdev_dbg(netdev, "Data plane close failed\n");
return -EAGAIN;
@@ -371,8 +384,13 @@
* nss_dp_select_queue()
* Select tx queue
*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
+#else
+static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+#endif
{
int cpu = get_cpu();
put_cpu();
@@ -396,13 +414,17 @@
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = nss_dp_change_mtu,
.ndo_do_ioctl = nss_dp_do_ioctl,
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
+#endif
+ .ndo_select_queue = nss_dp_select_queue,
+
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = nss_dp_rx_flow_steer,
#endif
- .ndo_select_queue = nss_dp_select_queue,
};
/*
@@ -423,7 +445,7 @@
return -EFAULT;
}
- if (dp_priv->macid > NSS_DP_MAX_PHY_PORTS || !dp_priv->macid) {
+ if (dp_priv->macid > NSS_DP_HAL_MAX_PORTS || !dp_priv->macid) {
pr_err("%s: invalid macid %d\n", np->name, dp_priv->macid);
return -EFAULT;
}
@@ -454,11 +476,17 @@
of_property_read_u32(np, "qcom,forced-duplex", &dp_priv->forced_duplex);
maddr = (uint8_t *)of_get_mac_address(np);
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0))
+ if (IS_ERR((void *)maddr)) {
+ maddr = NULL;
+ }
+#endif
+
if (maddr && is_valid_ether_addr(maddr)) {
ether_addr_copy(netdev->dev_addr, maddr);
} else {
random_ether_addr(netdev->dev_addr);
- pr_info("GMAC%d(%p) Invalid MAC@ - using %pM\n", dp_priv->macid,
+ pr_info("GMAC%d(%px) Invalid MAC@ - using %pM\n", dp_priv->macid,
dp_priv, netdev->dev_addr);
}
@@ -474,6 +502,14 @@
struct platform_device *mdio_plat;
struct ipq40xx_mdio_data *mdio_data;
+ /*
+ * Find mii_bus using "mdio-bus" handle.
+ */
+ mdio_node = of_parse_phandle(pdev->dev.of_node, "mdio-bus", 0);
+ if (mdio_node) {
+ return of_mdio_find_bus(mdio_node);
+ }
+
mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq40xx-mdio");
if (!mdio_node) {
dev_err(&pdev->dev, "cannot find mdio node by phandle\n");
@@ -497,6 +533,17 @@
return mdio_data->mii_bus;
}
+#ifdef CONFIG_NET_SWITCHDEV
+/*
+ * nss_dp_is_phy_dev()
+ * Check if it is dp device
+ */
+bool nss_dp_is_phy_dev(struct net_device *dev)
+{
+ return (dev->netdev_ops == &nss_dp_netdev_ops);
+}
+#endif
+
/*
* nss_dp_adjust_link()
*/
@@ -559,6 +606,11 @@
return -ENOMEM;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ /* max_mtu is set to 1500 in ether_setup() */
+ netdev->max_mtu = ETH_MAX_MTU;
+#endif
+
dp_priv = netdev_priv(netdev);
memset((void *)dp_priv, 0, sizeof(struct nss_dp_dev));
@@ -576,8 +628,13 @@
goto fail;
}
- /* Use EDMA data plane as default */
- dp_priv->data_plane_ops = &nss_dp_edma_ops;
+ /* Use data plane ops as per the configured SoC */
+ dp_priv->data_plane_ops = nss_dp_hal_get_data_plane_ops();
+ if (!dp_priv->data_plane_ops) {
+ netdev_dbg(netdev, "Dataplane ops not found.\n");
+ goto fail;
+ }
+
dp_priv->dpc = &dp_global_data_plane_ctx[dp_priv->macid-1];
dp_priv->dpc->dev = netdev;
dp_priv->ctx = &dp_global_ctx;
@@ -590,14 +647,9 @@
* The subsequent hal_ops calls expect the DP to pass the HAL
* context pointer as an argument
*/
- if (gmac_hal_pdata.mactype == GMAC_HAL_TYPE_QCOM)
- dp_priv->gmac_hal_ops = &qcom_hal_ops;
- else if (gmac_hal_pdata.mactype == GMAC_HAL_TYPE_10G)
- dp_priv->gmac_hal_ops = &syn_hal_ops;
-
+ dp_priv->gmac_hal_ops = nss_dp_hal_get_gmac_ops(gmac_hal_pdata.mactype);
if (!dp_priv->gmac_hal_ops) {
- netdev_dbg(netdev, "Unsupported Mac type: %d\n",
- gmac_hal_pdata.mactype);
+ netdev_dbg(netdev, "Unsupported Mac type: %d\n", gmac_hal_pdata.mactype);
goto fail;
}
@@ -614,19 +666,30 @@
goto fail;
}
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- dp_priv->miibus->id, dp_priv->phy_mdio_addr);
+ dp_priv->miibus->id, dp_priv->phy_mdio_addr);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
dp_priv->phydev = phy_connect(netdev, phy_id,
- &nss_dp_adjust_link,
- dp_priv->phy_mii_type);
+ &nss_dp_adjust_link,
+ dp_priv->phy_mii_type);
if (IS_ERR(dp_priv->phydev)) {
netdev_dbg(netdev, "failed to connect to phy device\n");
goto fail;
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
dp_priv->phydev->advertising |=
- (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
dp_priv->phydev->supported |=
- (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+#else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->advertising);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->supported);
+#endif
}
#if defined(NSS_DP_PPE_SUPPORT)
@@ -654,8 +717,7 @@
dp_global_ctx.nss_dp[dp_priv->macid - 1] = dp_priv;
dp_global_ctx.slowproto_acl_bm = 0;
- netdev_dbg(netdev, "Init NSS DP GMAC%d (base = 0x%lx)\n",
- dp_priv->macid, netdev->base_addr);
+ netdev_dbg(netdev, "Init NSS DP GMAC%d (base = 0x%lx)\n", dp_priv->macid, netdev->base_addr);
return 0;
@@ -673,7 +735,7 @@
struct nss_dp_dev *dp_priv;
struct nss_gmac_hal_ops *hal_ops;
- for (i = 0; i < NSS_DP_MAX_PHY_PORTS; i++) {
+ for (i = 0; i < NSS_DP_HAL_MAX_PORTS; i++) {
dp_priv = dp_global_ctx.nss_dp[i];
if (!dp_priv)
continue;
@@ -717,16 +779,18 @@
* Bail out on not supported platform
* TODO: Handle this properly with SoC ops
*/
- if (!of_machine_is_compatible("qcom,ipq807x") && !of_machine_is_compatible("qcom,ipq6018"))
+ if (!of_machine_is_compatible("qcom,ipq807x") &&
+ !of_machine_is_compatible("qcom,ipq8074") &&
+ !of_machine_is_compatible("qcom,ipq6018") &&
+ !of_machine_is_compatible("qcom,ipq5018"))
return 0;
/*
* TODO Move this to soc_ops
*/
dp_global_ctx.common_init_done = false;
- ret = edma_init();
- if (ret) {
- pr_info("EDMA init failed\n");
+ if (!nss_dp_hal_init()) {
+ pr_err("DP hal init failed.\n");
return -EFAULT;
}
@@ -752,7 +816,7 @@
* TODO Move this to soc_ops
*/
if (dp_global_ctx.common_init_done) {
- edma_cleanup(false);
+ nss_dp_hal_cleanup();
dp_global_ctx.common_init_done = false;
}
diff --git a/nss_dp_switchdev.c b/nss_dp_switchdev.c
index 3ad8789..d07d321 100644
--- a/nss_dp_switchdev.c
+++ b/nss_dp_switchdev.c
@@ -16,8 +16,11 @@
**************************************************************************
*/
+#include <linux/version.h>
#include <net/switchdev.h>
#include <linux/if_bridge.h>
+#include <net/switchdev.h>
+
#include "nss_dp_dev.h"
#include "fal/fal_stp.h"
#include "fal/fal_ctrlpkt.h"
@@ -27,29 +30,6 @@
#define ETH_P_NONE 0
/*
- * nss_dp_attr_get()
- * Get port information to update switchdev attribute for NSS data plane.
- */
-static int nss_dp_attr_get(struct net_device *dev, struct switchdev_attr *attr)
-{
- struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
-
- switch (attr->id) {
- case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- attr->u.ppid.id_len = 1;
- attr->u.ppid.id[0] = NSS_DP_SWITCH_ID;
- break;
- case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- attr->u.brport_flags = dp_priv->brport_flags;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-/*
* nss_dp_set_slow_proto_filter()
* Enable/Disable filter to allow Ethernet slow-protocol
*/
@@ -194,6 +174,31 @@
return 0;
}
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+/*
+ * nss_dp_attr_get()
+ * Get port information to update switchdev attribute for NSS data plane.
+ */
+static int nss_dp_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = 1;
+ attr->u.ppid.id[0] = NSS_DP_SWITCH_ID;
+ break;
+
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ attr->u.brport_flags = dp_priv->brport_flags;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
/*
* nss_dp_attr_set()
* Get switchdev attribute and set to the device of NSS data plane.
@@ -269,3 +274,96 @@
dev->switchdev_ops = &nss_dp_switchdev_ops;
switchdev_port_fwd_mark_set(dev, NULL, false);
}
+#else
+
+/*
+ * nss_dp_port_attr_set()
+ * Sets attributes
+ */
+static int nss_dp_port_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev);
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ dp_priv->brport_flags = attr->u.brport_flags;
+ netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags);
+ return 0;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return nss_dp_stp_state_set(dp_priv, attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+}
+
+/*
+ * nss_dp_switchdev_port_attr_set_event()
+ * Attribute set event
+ */
+static int nss_dp_switchdev_port_attr_set_event(struct net_device *netdev,
+ struct switchdev_notifier_port_attr_info *port_attr_info)
+{
+ int err;
+
+ err = nss_dp_port_attr_set(netdev, port_attr_info->attr,
+ port_attr_info->trans);
+
+ port_attr_info->handled = true;
+ return notifier_from_errno(err);
+}
+
+/*
+ * nss_dp_switchdev_event()
+ * Switch dev event on netdevice
+ */
+static int nss_dp_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+
+ /*
+ * Handle switchdev event only for physical devices
+ */
+ if (!nss_dp_is_phy_dev(dev)) {
+ return NOTIFY_DONE;
+ }
+
+ if (event == SWITCHDEV_PORT_ATTR_SET)
+ nss_dp_switchdev_port_attr_set_event(dev, ptr);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nss_dp_switchdev_notifier = {
+ .notifier_call = nss_dp_switchdev_event,
+};
+
+static bool switch_init_done;
+
+/*
+ * nss_dp_switchdev_setup()
+ * Setup switch dev
+ */
+void nss_dp_switchdev_setup(struct net_device *dev)
+{
+ int err;
+
+ if (switch_init_done) {
+ return;
+ }
+
+ err = register_switchdev_blocking_notifier(&nss_dp_switchdev_notifier);
+ if (err) {
+ netdev_dbg(dev, "%px:Failed to register switchdev notifier\n", dev);
+ }
+
+ switch_init_done = true;
+
+}
+#endif