Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * meth.c -- O2 Builtin 10/100 Ethernet driver |
| 3 | * |
| 4 | * Copyright (C) 2001-2003 Ilya Volynets |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/dma-mapping.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/platform_device.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/interrupt.h> |
| 20 | |
| 21 | #include <linux/in.h> |
| 22 | #include <linux/in6.h> |
| 23 | #include <linux/device.h> /* struct device, et al */ |
| 24 | #include <linux/netdevice.h> /* struct device, and other headers */ |
| 25 | #include <linux/etherdevice.h> /* eth_type_trans */ |
| 26 | #include <linux/ip.h> /* struct iphdr */ |
| 27 | #include <linux/tcp.h> /* struct tcphdr */ |
| 28 | #include <linux/skbuff.h> |
| 29 | #include <linux/mii.h> /* MII definitions */ |
| 30 | #include <linux/crc32.h> |
| 31 | |
| 32 | #include <asm/ip32/mace.h> |
| 33 | #include <asm/ip32/ip32_ints.h> |
| 34 | |
| 35 | #include <asm/io.h> |
| 36 | |
| 37 | #include "meth.h" |
| 38 | |
| 39 | #ifndef MFE_DEBUG |
| 40 | #define MFE_DEBUG 0 |
| 41 | #endif |
| 42 | |
| 43 | #if MFE_DEBUG>=1 |
| 44 | #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) |
| 45 | #define MFE_RX_DEBUG 2 |
| 46 | #else |
| 47 | #define DPRINTK(str,args...) |
| 48 | #define MFE_RX_DEBUG 0 |
| 49 | #endif |
| 50 | |
| 51 | |
| 52 | static const char *meth_str="SGI O2 Fast Ethernet"; |
| 53 | |
| 54 | /* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ |
| 55 | #define TX_TIMEOUT (400*HZ/1000) |
| 56 | |
| 57 | static int timeout = TX_TIMEOUT; |
| 58 | module_param(timeout, int, 0); |
| 59 | |
| 60 | /* |
| 61 | * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). |
| 62 | * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC. |
| 63 | */ |
| 64 | #define METH_MCF_LIMIT 32 |
| 65 | |
| 66 | /* |
| 67 | * This structure is private to each device. It is used to pass |
| 68 | * packets in and out, so there is place for a packet |
| 69 | */ |
| 70 | struct meth_private { |
| 71 | /* in-memory copy of MAC Control register */ |
| 72 | u64 mac_ctrl; |
| 73 | |
| 74 | /* in-memory copy of DMA Control register */ |
| 75 | unsigned long dma_ctrl; |
| 76 | /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ |
| 77 | unsigned long phy_addr; |
| 78 | tx_packet *tx_ring; |
| 79 | dma_addr_t tx_ring_dma; |
| 80 | struct sk_buff *tx_skbs[TX_RING_ENTRIES]; |
| 81 | dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; |
| 82 | unsigned long tx_read, tx_write, tx_count; |
| 83 | |
| 84 | rx_packet *rx_ring[RX_RING_ENTRIES]; |
| 85 | dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; |
| 86 | struct sk_buff *rx_skbs[RX_RING_ENTRIES]; |
| 87 | unsigned long rx_write; |
| 88 | |
| 89 | /* Multicast filter. */ |
| 90 | u64 mcast_filter; |
| 91 | |
| 92 | spinlock_t meth_lock; |
| 93 | }; |
| 94 | |
| 95 | static void meth_tx_timeout(struct net_device *dev); |
| 96 | static irqreturn_t meth_interrupt(int irq, void *dev_id); |
| 97 | |
| 98 | /* global, initialized in ip32-setup.c */ |
| 99 | char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; |
| 100 | |
| 101 | static inline void load_eaddr(struct net_device *dev) |
| 102 | { |
| 103 | int i; |
| 104 | u64 macaddr; |
| 105 | |
| 106 | DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr); |
| 107 | macaddr = 0; |
| 108 | for (i = 0; i < 6; i++) |
| 109 | macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); |
| 110 | |
| 111 | mace->eth.mac_addr = macaddr; |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * Waits for BUSY status of mdio bus to clear |
| 116 | */ |
| 117 | #define WAIT_FOR_PHY(___rval) \ |
| 118 | while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ |
| 119 | udelay(25); \ |
| 120 | } |
| 121 | /*read phy register, return value read */ |
| 122 | static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) |
| 123 | { |
| 124 | unsigned long rval; |
| 125 | WAIT_FOR_PHY(rval); |
| 126 | mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); |
| 127 | udelay(25); |
| 128 | mace->eth.phy_trans_go = 1; |
| 129 | udelay(25); |
| 130 | WAIT_FOR_PHY(rval); |
| 131 | return rval & MDIO_DATA_MASK; |
| 132 | } |
| 133 | |
| 134 | static int mdio_probe(struct meth_private *priv) |
| 135 | { |
| 136 | int i; |
| 137 | unsigned long p2, p3, flags; |
| 138 | /* check if phy is detected already */ |
| 139 | if(priv->phy_addr>=0&&priv->phy_addr<32) |
| 140 | return 0; |
| 141 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 142 | for (i=0;i<32;++i){ |
| 143 | priv->phy_addr=i; |
| 144 | p2=mdio_read(priv,2); |
| 145 | p3=mdio_read(priv,3); |
| 146 | #if MFE_DEBUG>=2 |
| 147 | switch ((p2<<12)|(p3>>4)){ |
| 148 | case PHY_QS6612X: |
| 149 | DPRINTK("PHY is QS6612X\n"); |
| 150 | break; |
| 151 | case PHY_ICS1889: |
| 152 | DPRINTK("PHY is ICS1889\n"); |
| 153 | break; |
| 154 | case PHY_ICS1890: |
| 155 | DPRINTK("PHY is ICS1890\n"); |
| 156 | break; |
| 157 | case PHY_DP83840: |
| 158 | DPRINTK("PHY is DP83840\n"); |
| 159 | break; |
| 160 | } |
| 161 | #endif |
| 162 | if(p2!=0xffff&&p2!=0x0000){ |
| 163 | DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); |
| 164 | break; |
| 165 | } |
| 166 | } |
| 167 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 168 | if(priv->phy_addr<32) { |
| 169 | return 0; |
| 170 | } |
| 171 | DPRINTK("Oopsie! PHY is not known!\n"); |
| 172 | priv->phy_addr=-1; |
| 173 | return -ENODEV; |
| 174 | } |
| 175 | |
| 176 | static void meth_check_link(struct net_device *dev) |
| 177 | { |
| 178 | struct meth_private *priv = netdev_priv(dev); |
| 179 | unsigned long mii_advertising = mdio_read(priv, 4); |
| 180 | unsigned long mii_partner = mdio_read(priv, 5); |
| 181 | unsigned long negotiated = mii_advertising & mii_partner; |
| 182 | unsigned long duplex, speed; |
| 183 | |
| 184 | if (mii_partner == 0xffff) |
| 185 | return; |
| 186 | |
| 187 | speed = (negotiated & 0x0380) ? METH_100MBIT : 0; |
| 188 | duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? |
| 189 | METH_PHY_FDX : 0; |
| 190 | |
| 191 | if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { |
| 192 | DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); |
| 193 | if (duplex) |
| 194 | priv->mac_ctrl |= METH_PHY_FDX; |
| 195 | else |
| 196 | priv->mac_ctrl &= ~METH_PHY_FDX; |
| 197 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 198 | } |
| 199 | |
| 200 | if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { |
| 201 | DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); |
| 202 | if (duplex) |
| 203 | priv->mac_ctrl |= METH_100MBIT; |
| 204 | else |
| 205 | priv->mac_ctrl &= ~METH_100MBIT; |
| 206 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | |
| 211 | static int meth_init_tx_ring(struct meth_private *priv) |
| 212 | { |
| 213 | /* Init TX ring */ |
| 214 | priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE, |
| 215 | &priv->tx_ring_dma, GFP_ATOMIC); |
| 216 | if (!priv->tx_ring) |
| 217 | return -ENOMEM; |
| 218 | |
| 219 | priv->tx_count = priv->tx_read = priv->tx_write = 0; |
| 220 | mace->eth.tx_ring_base = priv->tx_ring_dma; |
| 221 | /* Now init skb save area */ |
| 222 | memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); |
| 223 | memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); |
| 224 | return 0; |
| 225 | } |
| 226 | |
| 227 | static int meth_init_rx_ring(struct meth_private *priv) |
| 228 | { |
| 229 | int i; |
| 230 | |
| 231 | for (i = 0; i < RX_RING_ENTRIES; i++) { |
| 232 | priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); |
| 233 | /* 8byte status vector + 3quad padding + 2byte padding, |
| 234 | * to put data on 64bit aligned boundary */ |
| 235 | skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); |
| 236 | priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); |
| 237 | /* I'll need to re-sync it after each RX */ |
| 238 | priv->rx_ring_dmas[i] = |
| 239 | dma_map_single(NULL, priv->rx_ring[i], |
| 240 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 241 | mace->eth.rx_fifo = priv->rx_ring_dmas[i]; |
| 242 | } |
| 243 | priv->rx_write = 0; |
| 244 | return 0; |
| 245 | } |
| 246 | static void meth_free_tx_ring(struct meth_private *priv) |
| 247 | { |
| 248 | int i; |
| 249 | |
| 250 | /* Remove any pending skb */ |
| 251 | for (i = 0; i < TX_RING_ENTRIES; i++) { |
| 252 | if (priv->tx_skbs[i]) |
| 253 | dev_kfree_skb(priv->tx_skbs[i]); |
| 254 | priv->tx_skbs[i] = NULL; |
| 255 | } |
| 256 | dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, |
| 257 | priv->tx_ring_dma); |
| 258 | } |
| 259 | |
| 260 | /* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ |
| 261 | static void meth_free_rx_ring(struct meth_private *priv) |
| 262 | { |
| 263 | int i; |
| 264 | |
| 265 | for (i = 0; i < RX_RING_ENTRIES; i++) { |
| 266 | dma_unmap_single(NULL, priv->rx_ring_dmas[i], |
| 267 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 268 | priv->rx_ring[i] = 0; |
| 269 | priv->rx_ring_dmas[i] = 0; |
| 270 | kfree_skb(priv->rx_skbs[i]); |
| 271 | } |
| 272 | } |
| 273 | |
| 274 | int meth_reset(struct net_device *dev) |
| 275 | { |
| 276 | struct meth_private *priv = netdev_priv(dev); |
| 277 | |
| 278 | /* Reset card */ |
| 279 | mace->eth.mac_ctrl = SGI_MAC_RESET; |
| 280 | udelay(1); |
| 281 | mace->eth.mac_ctrl = 0; |
| 282 | udelay(25); |
| 283 | |
| 284 | /* Load ethernet address */ |
| 285 | load_eaddr(dev); |
| 286 | /* Should load some "errata", but later */ |
| 287 | |
| 288 | /* Check for device */ |
| 289 | if (mdio_probe(priv) < 0) { |
| 290 | DPRINTK("Unable to find PHY\n"); |
| 291 | return -ENODEV; |
| 292 | } |
| 293 | |
| 294 | /* Initial mode: 10 | Half-duplex | Accept normal packets */ |
| 295 | priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; |
| 296 | if (dev->flags & IFF_PROMISC) |
| 297 | priv->mac_ctrl |= METH_PROMISC; |
| 298 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 299 | |
| 300 | /* Autonegotiate speed and duplex mode */ |
| 301 | meth_check_link(dev); |
| 302 | |
| 303 | /* Now set dma control, but don't enable DMA, yet */ |
| 304 | priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | |
| 305 | (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); |
| 306 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 307 | |
| 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | /*============End Helper Routines=====================*/ |
| 312 | |
| 313 | /* |
| 314 | * Open and close |
| 315 | */ |
| 316 | static int meth_open(struct net_device *dev) |
| 317 | { |
| 318 | struct meth_private *priv = netdev_priv(dev); |
| 319 | int ret; |
| 320 | |
| 321 | priv->phy_addr = -1; /* No PHY is known yet... */ |
| 322 | |
| 323 | /* Initialize the hardware */ |
| 324 | ret = meth_reset(dev); |
| 325 | if (ret < 0) |
| 326 | return ret; |
| 327 | |
| 328 | /* Allocate the ring buffers */ |
| 329 | ret = meth_init_tx_ring(priv); |
| 330 | if (ret < 0) |
| 331 | return ret; |
| 332 | ret = meth_init_rx_ring(priv); |
| 333 | if (ret < 0) |
| 334 | goto out_free_tx_ring; |
| 335 | |
| 336 | ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); |
| 337 | if (ret) { |
| 338 | printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); |
| 339 | goto out_free_rx_ring; |
| 340 | } |
| 341 | |
| 342 | /* Start DMA */ |
| 343 | priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ |
| 344 | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; |
| 345 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 346 | |
| 347 | DPRINTK("About to start queue\n"); |
| 348 | netif_start_queue(dev); |
| 349 | |
| 350 | return 0; |
| 351 | |
| 352 | out_free_rx_ring: |
| 353 | meth_free_rx_ring(priv); |
| 354 | out_free_tx_ring: |
| 355 | meth_free_tx_ring(priv); |
| 356 | |
| 357 | return ret; |
| 358 | } |
| 359 | |
| 360 | static int meth_release(struct net_device *dev) |
| 361 | { |
| 362 | struct meth_private *priv = netdev_priv(dev); |
| 363 | |
| 364 | DPRINTK("Stopping queue\n"); |
| 365 | netif_stop_queue(dev); /* can't transmit any more */ |
| 366 | /* shut down DMA */ |
| 367 | priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | |
| 368 | METH_DMA_RX_EN | METH_DMA_RX_INT_EN); |
| 369 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 370 | free_irq(dev->irq, dev); |
| 371 | meth_free_tx_ring(priv); |
| 372 | meth_free_rx_ring(priv); |
| 373 | |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | /* |
| 378 | * Receive a packet: retrieve, encapsulate and pass over to upper levels |
| 379 | */ |
| 380 | static void meth_rx(struct net_device* dev, unsigned long int_status) |
| 381 | { |
| 382 | struct sk_buff *skb; |
| 383 | unsigned long status, flags; |
| 384 | struct meth_private *priv = netdev_priv(dev); |
| 385 | unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; |
| 386 | |
| 387 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 388 | priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; |
| 389 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 390 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 391 | |
| 392 | if (int_status & METH_INT_RX_UNDERFLOW) { |
| 393 | fifo_rptr = (fifo_rptr - 1) & 0x0f; |
| 394 | } |
| 395 | while (priv->rx_write != fifo_rptr) { |
| 396 | dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], |
| 397 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 398 | status = priv->rx_ring[priv->rx_write]->status.raw; |
| 399 | #if MFE_DEBUG |
| 400 | if (!(status & METH_RX_ST_VALID)) { |
| 401 | DPRINTK("Not received? status=%016lx\n",status); |
| 402 | } |
| 403 | #endif |
| 404 | if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { |
| 405 | int len = (status & 0xffff) - 4; /* omit CRC */ |
| 406 | /* length sanity check */ |
| 407 | if (len < 60 || len > 1518) { |
| 408 | printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n", |
| 409 | dev->name, priv->rx_write, |
| 410 | priv->rx_ring[priv->rx_write]->status.raw); |
| 411 | dev->stats.rx_errors++; |
| 412 | dev->stats.rx_length_errors++; |
| 413 | skb = priv->rx_skbs[priv->rx_write]; |
| 414 | } else { |
| 415 | skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); |
| 416 | if (!skb) { |
| 417 | /* Ouch! No memory! Drop packet on the floor */ |
| 418 | DPRINTK("No mem: dropping packet\n"); |
| 419 | dev->stats.rx_dropped++; |
| 420 | skb = priv->rx_skbs[priv->rx_write]; |
| 421 | } else { |
| 422 | struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; |
| 423 | /* 8byte status vector + 3quad padding + 2byte padding, |
| 424 | * to put data on 64bit aligned boundary */ |
| 425 | skb_reserve(skb, METH_RX_HEAD); |
| 426 | /* Write metadata, and then pass to the receive level */ |
| 427 | skb_put(skb_c, len); |
| 428 | priv->rx_skbs[priv->rx_write] = skb; |
| 429 | skb_c->protocol = eth_type_trans(skb_c, dev); |
| 430 | dev->stats.rx_packets++; |
| 431 | dev->stats.rx_bytes += len; |
| 432 | netif_rx(skb_c); |
| 433 | } |
| 434 | } |
| 435 | } else { |
| 436 | dev->stats.rx_errors++; |
| 437 | skb=priv->rx_skbs[priv->rx_write]; |
| 438 | #if MFE_DEBUG>0 |
| 439 | printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); |
| 440 | if(status&METH_RX_ST_RCV_CODE_VIOLATION) |
| 441 | printk(KERN_WARNING "Receive Code Violation\n"); |
| 442 | if(status&METH_RX_ST_CRC_ERR) |
| 443 | printk(KERN_WARNING "CRC error\n"); |
| 444 | if(status&METH_RX_ST_INV_PREAMBLE_CTX) |
| 445 | printk(KERN_WARNING "Invalid Preamble Context\n"); |
| 446 | if(status&METH_RX_ST_LONG_EVT_SEEN) |
| 447 | printk(KERN_WARNING "Long Event Seen...\n"); |
| 448 | if(status&METH_RX_ST_BAD_PACKET) |
| 449 | printk(KERN_WARNING "Bad Packet\n"); |
| 450 | if(status&METH_RX_ST_CARRIER_EVT_SEEN) |
| 451 | printk(KERN_WARNING "Carrier Event Seen\n"); |
| 452 | #endif |
| 453 | } |
| 454 | priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; |
| 455 | priv->rx_ring[priv->rx_write]->status.raw = 0; |
| 456 | priv->rx_ring_dmas[priv->rx_write] = |
| 457 | dma_map_single(NULL, priv->rx_ring[priv->rx_write], |
| 458 | METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); |
| 459 | mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; |
| 460 | ADVANCE_RX_PTR(priv->rx_write); |
| 461 | } |
| 462 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 463 | /* In case there was underflow, and Rx DMA was disabled */ |
| 464 | priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; |
| 465 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 466 | mace->eth.int_stat = METH_INT_RX_THRESHOLD; |
| 467 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 468 | } |
| 469 | |
| 470 | static int meth_tx_full(struct net_device *dev) |
| 471 | { |
| 472 | struct meth_private *priv = netdev_priv(dev); |
| 473 | |
| 474 | return priv->tx_count >= TX_RING_ENTRIES - 1; |
| 475 | } |
| 476 | |
| 477 | static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) |
| 478 | { |
| 479 | struct meth_private *priv = netdev_priv(dev); |
| 480 | unsigned long status, flags; |
| 481 | struct sk_buff *skb; |
| 482 | unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; |
| 483 | |
| 484 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 485 | |
| 486 | /* Stop DMA notification */ |
| 487 | priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); |
| 488 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 489 | |
| 490 | while (priv->tx_read != rptr) { |
| 491 | skb = priv->tx_skbs[priv->tx_read]; |
| 492 | status = priv->tx_ring[priv->tx_read].header.raw; |
| 493 | #if MFE_DEBUG>=1 |
| 494 | if (priv->tx_read == priv->tx_write) |
| 495 | DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); |
| 496 | #endif |
| 497 | if (status & METH_TX_ST_DONE) { |
| 498 | if (status & METH_TX_ST_SUCCESS){ |
| 499 | dev->stats.tx_packets++; |
| 500 | dev->stats.tx_bytes += skb->len; |
| 501 | } else { |
| 502 | dev->stats.tx_errors++; |
| 503 | #if MFE_DEBUG>=1 |
| 504 | DPRINTK("TX error: status=%016lx <",status); |
| 505 | if(status & METH_TX_ST_SUCCESS) |
| 506 | printk(" SUCCESS"); |
| 507 | if(status & METH_TX_ST_TOOLONG) |
| 508 | printk(" TOOLONG"); |
| 509 | if(status & METH_TX_ST_UNDERRUN) |
| 510 | printk(" UNDERRUN"); |
| 511 | if(status & METH_TX_ST_EXCCOLL) |
| 512 | printk(" EXCCOLL"); |
| 513 | if(status & METH_TX_ST_DEFER) |
| 514 | printk(" DEFER"); |
| 515 | if(status & METH_TX_ST_LATECOLL) |
| 516 | printk(" LATECOLL"); |
| 517 | printk(" >\n"); |
| 518 | #endif |
| 519 | } |
| 520 | } else { |
| 521 | DPRINTK("RPTR points us here, but packet not done?\n"); |
| 522 | break; |
| 523 | } |
| 524 | dev_kfree_skb_irq(skb); |
| 525 | priv->tx_skbs[priv->tx_read] = NULL; |
| 526 | priv->tx_ring[priv->tx_read].header.raw = 0; |
| 527 | priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); |
| 528 | priv->tx_count--; |
| 529 | } |
| 530 | |
| 531 | /* wake up queue if it was stopped */ |
| 532 | if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { |
| 533 | netif_wake_queue(dev); |
| 534 | } |
| 535 | |
| 536 | mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; |
| 537 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 538 | } |
| 539 | |
| 540 | static void meth_error(struct net_device* dev, unsigned status) |
| 541 | { |
| 542 | struct meth_private *priv = netdev_priv(dev); |
| 543 | unsigned long flags; |
| 544 | |
| 545 | printk(KERN_WARNING "meth: error status: 0x%08x\n",status); |
| 546 | /* check for errors too... */ |
| 547 | if (status & (METH_INT_TX_LINK_FAIL)) |
| 548 | printk(KERN_WARNING "meth: link failure\n"); |
| 549 | /* Should I do full reset in this case? */ |
| 550 | if (status & (METH_INT_MEM_ERROR)) |
| 551 | printk(KERN_WARNING "meth: memory error\n"); |
| 552 | if (status & (METH_INT_TX_ABORT)) |
| 553 | printk(KERN_WARNING "meth: aborted\n"); |
| 554 | if (status & (METH_INT_RX_OVERFLOW)) |
| 555 | printk(KERN_WARNING "meth: Rx overflow\n"); |
| 556 | if (status & (METH_INT_RX_UNDERFLOW)) { |
| 557 | printk(KERN_WARNING "meth: Rx underflow\n"); |
| 558 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 559 | mace->eth.int_stat = METH_INT_RX_UNDERFLOW; |
| 560 | /* more underflow interrupts will be delivered, |
| 561 | * effectively throwing us into an infinite loop. |
| 562 | * Thus I stop processing Rx in this case. */ |
| 563 | priv->dma_ctrl &= ~METH_DMA_RX_EN; |
| 564 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 565 | DPRINTK("Disabled meth Rx DMA temporarily\n"); |
| 566 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 567 | } |
| 568 | mace->eth.int_stat = METH_INT_ERROR; |
| 569 | } |
| 570 | |
| 571 | /* |
| 572 | * The typical interrupt entry point |
| 573 | */ |
| 574 | static irqreturn_t meth_interrupt(int irq, void *dev_id) |
| 575 | { |
| 576 | struct net_device *dev = (struct net_device *)dev_id; |
| 577 | struct meth_private *priv = netdev_priv(dev); |
| 578 | unsigned long status; |
| 579 | |
| 580 | status = mace->eth.int_stat; |
| 581 | while (status & 0xff) { |
| 582 | /* First handle errors - if we get Rx underflow, |
| 583 | * Rx DMA will be disabled, and Rx handler will reenable |
| 584 | * it. I don't think it's possible to get Rx underflow, |
| 585 | * without getting Rx interrupt */ |
| 586 | if (status & METH_INT_ERROR) { |
| 587 | meth_error(dev, status); |
| 588 | } |
| 589 | if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { |
| 590 | /* a transmission is over: free the skb */ |
| 591 | meth_tx_cleanup(dev, status); |
| 592 | } |
| 593 | if (status & METH_INT_RX_THRESHOLD) { |
| 594 | if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) |
| 595 | break; |
| 596 | /* send it to meth_rx for handling */ |
| 597 | meth_rx(dev, status); |
| 598 | } |
| 599 | status = mace->eth.int_stat; |
| 600 | } |
| 601 | |
| 602 | return IRQ_HANDLED; |
| 603 | } |
| 604 | |
| 605 | /* |
| 606 | * Transmits packets that fit into TX descriptor (are <=120B) |
| 607 | */ |
| 608 | static void meth_tx_short_prepare(struct meth_private *priv, |
| 609 | struct sk_buff *skb) |
| 610 | { |
| 611 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 612 | int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; |
| 613 | |
| 614 | desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); |
| 615 | /* maybe I should set whole thing to 0 first... */ |
| 616 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); |
| 617 | if (skb->len < len) |
| 618 | memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); |
| 619 | } |
| 620 | #define TX_CATBUF1 BIT(25) |
| 621 | static void meth_tx_1page_prepare(struct meth_private *priv, |
| 622 | struct sk_buff *skb) |
| 623 | { |
| 624 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 625 | void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); |
| 626 | int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); |
| 627 | int buffer_len = skb->len - unaligned_len; |
| 628 | dma_addr_t catbuf; |
| 629 | |
| 630 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); |
| 631 | |
| 632 | /* unaligned part */ |
| 633 | if (unaligned_len) { |
| 634 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
| 635 | unaligned_len); |
| 636 | desc->header.raw |= (128 - unaligned_len) << 16; |
| 637 | } |
| 638 | |
| 639 | /* first page */ |
| 640 | catbuf = dma_map_single(NULL, buffer_data, buffer_len, |
| 641 | DMA_TO_DEVICE); |
| 642 | desc->data.cat_buf[0].form.start_addr = catbuf >> 3; |
| 643 | desc->data.cat_buf[0].form.len = buffer_len - 1; |
| 644 | } |
| 645 | #define TX_CATBUF2 BIT(26) |
| 646 | static void meth_tx_2page_prepare(struct meth_private *priv, |
| 647 | struct sk_buff *skb) |
| 648 | { |
| 649 | tx_packet *desc = &priv->tx_ring[priv->tx_write]; |
| 650 | void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); |
| 651 | void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); |
| 652 | int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); |
| 653 | int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); |
| 654 | int buffer2_len = skb->len - buffer1_len - unaligned_len; |
| 655 | dma_addr_t catbuf1, catbuf2; |
| 656 | |
| 657 | desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); |
| 658 | /* unaligned part */ |
| 659 | if (unaligned_len){ |
| 660 | skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), |
| 661 | unaligned_len); |
| 662 | desc->header.raw |= (128 - unaligned_len) << 16; |
| 663 | } |
| 664 | |
| 665 | /* first page */ |
| 666 | catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, |
| 667 | DMA_TO_DEVICE); |
| 668 | desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; |
| 669 | desc->data.cat_buf[0].form.len = buffer1_len - 1; |
| 670 | /* second page */ |
| 671 | catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, |
| 672 | DMA_TO_DEVICE); |
| 673 | desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; |
| 674 | desc->data.cat_buf[1].form.len = buffer2_len - 1; |
| 675 | } |
| 676 | |
| 677 | static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) |
| 678 | { |
| 679 | /* Remember the skb, so we can free it at interrupt time */ |
| 680 | priv->tx_skbs[priv->tx_write] = skb; |
| 681 | if (skb->len <= 120) { |
| 682 | /* Whole packet fits into descriptor */ |
| 683 | meth_tx_short_prepare(priv, skb); |
| 684 | } else if (PAGE_ALIGN((unsigned long)skb->data) != |
| 685 | PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { |
| 686 | /* Packet crosses page boundary */ |
| 687 | meth_tx_2page_prepare(priv, skb); |
| 688 | } else { |
| 689 | /* Packet is in one page */ |
| 690 | meth_tx_1page_prepare(priv, skb); |
| 691 | } |
| 692 | priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); |
| 693 | mace->eth.tx_info = priv->tx_write; |
| 694 | priv->tx_count++; |
| 695 | } |
| 696 | |
| 697 | /* |
| 698 | * Transmit a packet (called by the kernel) |
| 699 | */ |
| 700 | static int meth_tx(struct sk_buff *skb, struct net_device *dev) |
| 701 | { |
| 702 | struct meth_private *priv = netdev_priv(dev); |
| 703 | unsigned long flags; |
| 704 | |
| 705 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 706 | /* Stop DMA notification */ |
| 707 | priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); |
| 708 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 709 | |
| 710 | meth_add_to_tx_ring(priv, skb); |
| 711 | dev->trans_start = jiffies; /* save the timestamp */ |
| 712 | |
| 713 | /* If TX ring is full, tell the upper layer to stop sending packets */ |
| 714 | if (meth_tx_full(dev)) { |
| 715 | printk(KERN_DEBUG "TX full: stopping\n"); |
| 716 | netif_stop_queue(dev); |
| 717 | } |
| 718 | |
| 719 | /* Restart DMA notification */ |
| 720 | priv->dma_ctrl |= METH_DMA_TX_INT_EN; |
| 721 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 722 | |
| 723 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 724 | |
| 725 | return NETDEV_TX_OK; |
| 726 | } |
| 727 | |
| 728 | /* |
| 729 | * Deal with a transmit timeout. |
| 730 | */ |
| 731 | static void meth_tx_timeout(struct net_device *dev) |
| 732 | { |
| 733 | struct meth_private *priv = netdev_priv(dev); |
| 734 | unsigned long flags; |
| 735 | |
| 736 | printk(KERN_WARNING "%s: transmit timed out\n", dev->name); |
| 737 | |
| 738 | /* Protect against concurrent rx interrupts */ |
| 739 | spin_lock_irqsave(&priv->meth_lock,flags); |
| 740 | |
| 741 | /* Try to reset the interface. */ |
| 742 | meth_reset(dev); |
| 743 | |
| 744 | dev->stats.tx_errors++; |
| 745 | |
| 746 | /* Clear all rings */ |
| 747 | meth_free_tx_ring(priv); |
| 748 | meth_free_rx_ring(priv); |
| 749 | meth_init_tx_ring(priv); |
| 750 | meth_init_rx_ring(priv); |
| 751 | |
| 752 | /* Restart dma */ |
| 753 | priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; |
| 754 | mace->eth.dma_ctrl = priv->dma_ctrl; |
| 755 | |
| 756 | /* Enable interrupt */ |
| 757 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 758 | |
| 759 | dev->trans_start = jiffies; /* prevent tx timeout */ |
| 760 | netif_wake_queue(dev); |
| 761 | } |
| 762 | |
| 763 | /* |
| 764 | * Ioctl commands |
| 765 | */ |
| 766 | static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
| 767 | { |
| 768 | /* XXX Not yet implemented */ |
| 769 | switch(cmd) { |
| 770 | case SIOCGMIIPHY: |
| 771 | case SIOCGMIIREG: |
| 772 | case SIOCSMIIREG: |
| 773 | default: |
| 774 | return -EOPNOTSUPP; |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | static void meth_set_rx_mode(struct net_device *dev) |
| 779 | { |
| 780 | struct meth_private *priv = netdev_priv(dev); |
| 781 | unsigned long flags; |
| 782 | |
| 783 | netif_stop_queue(dev); |
| 784 | spin_lock_irqsave(&priv->meth_lock, flags); |
| 785 | priv->mac_ctrl &= ~METH_PROMISC; |
| 786 | |
| 787 | if (dev->flags & IFF_PROMISC) { |
| 788 | priv->mac_ctrl |= METH_PROMISC; |
| 789 | priv->mcast_filter = 0xffffffffffffffffUL; |
| 790 | } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) || |
| 791 | (dev->flags & IFF_ALLMULTI)) { |
| 792 | priv->mac_ctrl |= METH_ACCEPT_AMCAST; |
| 793 | priv->mcast_filter = 0xffffffffffffffffUL; |
| 794 | } else { |
| 795 | struct netdev_hw_addr *ha; |
| 796 | priv->mac_ctrl |= METH_ACCEPT_MCAST; |
| 797 | |
| 798 | netdev_for_each_mc_addr(ha, dev) |
| 799 | set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26), |
| 800 | (volatile unsigned long *)&priv->mcast_filter); |
| 801 | } |
| 802 | |
| 803 | /* Write the changes to the chip registers. */ |
| 804 | mace->eth.mac_ctrl = priv->mac_ctrl; |
| 805 | mace->eth.mcast_filter = priv->mcast_filter; |
| 806 | |
| 807 | /* Done! */ |
| 808 | spin_unlock_irqrestore(&priv->meth_lock, flags); |
| 809 | netif_wake_queue(dev); |
| 810 | } |
| 811 | |
| 812 | static const struct net_device_ops meth_netdev_ops = { |
| 813 | .ndo_open = meth_open, |
| 814 | .ndo_stop = meth_release, |
| 815 | .ndo_start_xmit = meth_tx, |
| 816 | .ndo_do_ioctl = meth_ioctl, |
| 817 | .ndo_tx_timeout = meth_tx_timeout, |
| 818 | .ndo_change_mtu = eth_change_mtu, |
| 819 | .ndo_validate_addr = eth_validate_addr, |
| 820 | .ndo_set_mac_address = eth_mac_addr, |
| 821 | .ndo_set_rx_mode = meth_set_rx_mode, |
| 822 | }; |
| 823 | |
| 824 | /* |
| 825 | * The init function. |
| 826 | */ |
| 827 | static int meth_probe(struct platform_device *pdev) |
| 828 | { |
| 829 | struct net_device *dev; |
| 830 | struct meth_private *priv; |
| 831 | int err; |
| 832 | |
| 833 | dev = alloc_etherdev(sizeof(struct meth_private)); |
| 834 | if (!dev) |
| 835 | return -ENOMEM; |
| 836 | |
| 837 | dev->netdev_ops = &meth_netdev_ops; |
| 838 | dev->watchdog_timeo = timeout; |
| 839 | dev->irq = MACE_ETHERNET_IRQ; |
| 840 | dev->base_addr = (unsigned long)&mace->eth; |
| 841 | memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN); |
| 842 | |
| 843 | priv = netdev_priv(dev); |
| 844 | spin_lock_init(&priv->meth_lock); |
| 845 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 846 | |
| 847 | err = register_netdev(dev); |
| 848 | if (err) { |
| 849 | free_netdev(dev); |
| 850 | return err; |
| 851 | } |
| 852 | |
| 853 | printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", |
| 854 | dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); |
| 855 | return 0; |
| 856 | } |
| 857 | |
| 858 | static int __exit meth_remove(struct platform_device *pdev) |
| 859 | { |
| 860 | struct net_device *dev = platform_get_drvdata(pdev); |
| 861 | |
| 862 | unregister_netdev(dev); |
| 863 | free_netdev(dev); |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | |
| 868 | static struct platform_driver meth_driver = { |
| 869 | .probe = meth_probe, |
| 870 | .remove = __exit_p(meth_remove), |
| 871 | .driver = { |
| 872 | .name = "meth", |
| 873 | } |
| 874 | }; |
| 875 | |
| 876 | module_platform_driver(meth_driver); |
| 877 | |
| 878 | MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); |
| 879 | MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); |
| 880 | MODULE_LICENSE("GPL"); |
| 881 | MODULE_ALIAS("platform:meth"); |