1 /**********************************************************************
2 * Author: Cavium Networks
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
7 * Copyright (c) 2003-2007 Cavium Networks
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 **********************************************************************/
27 #include <linux/kernel.h>
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/delay.h>
33 #include <linux/mii.h>
37 #include <asm/octeon/octeon.h>
39 #include "ethernet-defines.h"
40 #include "octeon-ethernet.h"
41 #include "ethernet-mem.h"
42 #include "ethernet-rx.h"
43 #include "ethernet-tx.h"
44 #include "ethernet-mdio.h"
45 #include "ethernet-util.h"
46 #include "ethernet-proc.h"
53 #include "cvmx-helper.h"
55 #include "cvmx-gmxx-defs.h"
56 #include "cvmx-smix-defs.h"
58 #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
59 && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
60 int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS;
62 int num_packet_buffers = 1024;
64 module_param(num_packet_buffers, int, 0444);
65 MODULE_PARM_DESC(num_packet_buffers, "\n"
66 "\tNumber of packet buffers to allocate and store in the\n"
67 "\tFPA. By default, 1024 packet buffers are used unless\n"
68 "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined.");
70 int pow_receive_group = 15;
71 module_param(pow_receive_group, int, 0444);
72 MODULE_PARM_DESC(pow_receive_group, "\n"
73 "\tPOW group to receive packets from. All ethernet hardware\n"
74 "\twill be configured to send incomming packets to this POW\n"
75 "\tgroup. Also any other software can submit packets to this\n"
76 "\tgroup for the kernel to process.");
78 int pow_send_group = -1;
79 module_param(pow_send_group, int, 0644);
80 MODULE_PARM_DESC(pow_send_group, "\n"
81 "\tPOW group to send packets to other software on. This\n"
82 "\tcontrols the creation of the virtual device pow0.\n"
83 "\talways_use_pow also depends on this value.");
86 module_param(always_use_pow, int, 0444);
87 MODULE_PARM_DESC(always_use_pow, "\n"
88 "\tWhen set, always send to the pow group. This will cause\n"
89 "\tpackets sent to real ethernet devices to be sent to the\n"
90 "\tPOW group instead of the hardware. Unless some other\n"
91 "\tapplication changes the config, packets will still be\n"
92 "\treceived from the low level hardware. Use this option\n"
93 "\tto allow a CVMX app to intercept all packets from the\n"
94 "\tlinux kernel. You must specify pow_send_group along with\n"
97 char pow_send_list[128] = "";
98 module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
99 MODULE_PARM_DESC(pow_send_list, "\n"
100 "\tComma separated list of ethernet devices that should use the\n"
101 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
102 "\tis a per port version of always_use_pow. always_use_pow takes\n"
103 "\tprecedence over this list. For example, setting this to\n"
104 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
105 "\tusing the pow_send_group.");
107 static int disable_core_queueing = 1;
108 module_param(disable_core_queueing, int, 0444);
109 MODULE_PARM_DESC(disable_core_queueing, "\n"
110 "\tWhen set the networking core's tx_queue_len is set to zero. This\n"
111 "\tallows packets to be sent without lock contention in the packet\n"
112 "\tscheduler resulting in some cases in improved throughput.\n");
116 * The offset from mac_addr_base that should be used for the next port
117 * that is configured. By convention, if any mgmt ports exist on the
118 * chip, they get the first mac addresses, The ports controlled by
119 * this driver are numbered sequencially following any mgmt addresses
122 static unsigned int cvm_oct_mac_addr_offset;
125 * Periodic timer to check auto negotiation
127 static struct timer_list cvm_oct_poll_timer;
130 * Array of every ethernet device owned by this driver indexed by
131 * the ipd input port number.
133 struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
135 extern struct semaphore mdio_sem;
138 * Periodic timer tick for slow management operations
140 * @arg: Device to check
142 static void cvm_do_timer(unsigned long arg)
144 int32_t skb_to_free, undo;
147 struct octeon_ethernet *priv;
150 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
152 * All ports have been polled. Start the next
153 * iteration through the ports in one second.
156 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
159 if (!cvm_oct_device[port])
162 priv = netdev_priv(cvm_oct_device[port]);
164 /* skip polling if we don't get the lock */
165 if (!down_trylock(&mdio_sem)) {
166 priv->poll(cvm_oct_device[port]);
171 queues_per_port = cvmx_pko_get_num_queues(port);
172 /* Drain any pending packets in the free list */
173 for (qos = 0; qos < queues_per_port; qos++) {
174 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
176 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
178 undo = skb_to_free > 0 ?
179 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
181 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
182 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
183 MAX_SKB_TO_FREE : -skb_to_free;
184 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
186 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
190 /* Poll the next port in a 50th of a second.
191 This spreads the polling of ports out a little bit */
192 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
196 * Configure common hardware for all interfaces
198 static __init void cvm_oct_configure_common_hw(void)
203 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
205 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
207 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
208 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
209 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
212 cvmx_helper_setup_red(num_packet_buffers / 4,
213 num_packet_buffers / 8);
215 /* Enable the MII interface */
216 if (!octeon_is_simulation())
217 cvmx_write_csr(CVMX_SMIX_EN(0), 1);
219 /* Register an IRQ hander for to receive POW interrupts */
220 r = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
221 cvm_oct_do_interrupt, IRQF_SHARED, "Ethernet",
224 #if defined(CONFIG_SMP) && 0
225 if (USE_MULTICORE_RECEIVE) {
226 irq_set_affinity(OCTEON_IRQ_WORKQ0 + pow_receive_group,
233 * Free a work queue entry received in a intercept callback.
236 * Work queue entry to free
237 * Returns Zero on success, Negative on failure.
239 int cvm_oct_free_work(void *work_queue_entry)
241 cvmx_wqe_t *work = work_queue_entry;
243 int segments = work->word2.s.bufs;
244 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
247 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
248 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
249 if (unlikely(!segment_ptr.s.i))
250 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
252 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
254 segment_ptr = next_ptr;
256 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
260 EXPORT_SYMBOL(cvm_oct_free_work);
263 * Get the low level ethernet statistics
265 * @dev: Device to get the statistics from
266 * Returns Pointer to the statistics
268 static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
270 cvmx_pip_port_status_t rx_status;
271 cvmx_pko_port_status_t tx_status;
272 struct octeon_ethernet *priv = netdev_priv(dev);
274 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
275 if (octeon_is_simulation()) {
276 /* The simulator doesn't support statistics */
277 memset(&rx_status, 0, sizeof(rx_status));
278 memset(&tx_status, 0, sizeof(tx_status));
280 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
281 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
284 priv->stats.rx_packets += rx_status.inb_packets;
285 priv->stats.tx_packets += tx_status.packets;
286 priv->stats.rx_bytes += rx_status.inb_octets;
287 priv->stats.tx_bytes += tx_status.octets;
288 priv->stats.multicast += rx_status.multicast_packets;
289 priv->stats.rx_crc_errors += rx_status.inb_errors;
290 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
293 * The drop counter must be incremented atomically
294 * since the RX tasklet also increments it.
297 atomic64_add(rx_status.dropped_packets,
298 (atomic64_t *)&priv->stats.rx_dropped);
300 atomic_add(rx_status.dropped_packets,
301 (atomic_t *)&priv->stats.rx_dropped);
309 * Change the link MTU. Unimplemented
311 * @dev: Device to change
312 * @new_mtu: The new MTU
314 * Returns Zero on success
316 static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320 int index = INDEX(priv->port);
321 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
328 * Limit the MTU to make sure the ethernet packets are between
329 * 64 bytes and 65535 bytes.
331 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
332 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
333 pr_err("MTU must be between %d and %d.\n",
334 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
340 && (cvmx_helper_interface_get_mode(interface) !=
341 CVMX_HELPER_INTERFACE_MODE_SPI)) {
342 /* Add ethernet header and FCS, and VLAN if configured. */
343 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
345 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
346 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
347 /* Signal errors on packets larger than the MTU */
348 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
352 * Set the hardware to truncate packets larger
353 * than the MTU and smaller the 64 bytes.
355 union cvmx_pip_frm_len_chkx frm_len_chk;
357 frm_len_chk.s.minlen = 64;
358 frm_len_chk.s.maxlen = max_packet;
359 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
363 * Set the hardware to truncate packets larger than
364 * the MTU. The jabber register must be set to a
365 * multiple of 8 bytes, so round up.
367 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
368 (max_packet + 7) & ~7u);
374 * Set the multicast list. Currently unimplemented.
376 * @dev: Device to work on
378 static void cvm_oct_common_set_multicast_list(struct net_device *dev)
380 union cvmx_gmxx_prtx_cfg gmx_cfg;
381 struct octeon_ethernet *priv = netdev_priv(dev);
382 int interface = INTERFACE(priv->port);
383 int index = INDEX(priv->port);
386 && (cvmx_helper_interface_get_mode(interface) !=
387 CVMX_HELPER_INTERFACE_MODE_SPI)) {
388 union cvmx_gmxx_rxx_adr_ctl control;
390 control.s.bcst = 1; /* Allow broadcast MAC addresses */
392 if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
393 (dev->flags & IFF_PROMISC))
394 /* Force accept multicast packets */
397 /* Force reject multicat packets */
400 if (dev->flags & IFF_PROMISC)
402 * Reject matches if promisc. Since CAM is
403 * shut off, should accept everything.
405 control.s.cam_mode = 0;
407 /* Filter packets based on the CAM */
408 control.s.cam_mode = 1;
411 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
412 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
413 gmx_cfg.u64 & ~1ull);
415 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
417 if (dev->flags & IFF_PROMISC)
418 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
419 (index, interface), 0);
421 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
422 (index, interface), 1);
424 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
430 * Set the hardware MAC address for a device
432 * @dev: Device to change the MAC address for
433 * @addr: Address structure to change it too. MAC address is addr + 2.
434 * Returns Zero on success
436 static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
438 struct octeon_ethernet *priv = netdev_priv(dev);
439 union cvmx_gmxx_prtx_cfg gmx_cfg;
440 int interface = INTERFACE(priv->port);
441 int index = INDEX(priv->port);
443 memcpy(dev->dev_addr, addr + 2, 6);
446 && (cvmx_helper_interface_get_mode(interface) !=
447 CVMX_HELPER_INTERFACE_MODE_SPI)) {
451 for (i = 0; i < 6; i++)
452 mac = (mac << 8) | (uint64_t) (ptr[i + 2]);
455 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
456 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
457 gmx_cfg.u64 & ~1ull);
459 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
460 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
462 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
464 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
466 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
468 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
470 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
472 cvm_oct_common_set_multicast_list(dev);
473 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
480 * Per network device initialization
482 * @dev: Device to initialize
483 * Returns Zero on success
485 int cvm_oct_common_init(struct net_device *dev)
487 struct octeon_ethernet *priv = netdev_priv(dev);
489 u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
490 ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
491 ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
492 ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
493 ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
494 (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
496 mac += cvm_oct_mac_addr_offset;
497 sa.sa_data[0] = (mac >> 40) & 0xff;
498 sa.sa_data[1] = (mac >> 32) & 0xff;
499 sa.sa_data[2] = (mac >> 24) & 0xff;
500 sa.sa_data[3] = (mac >> 16) & 0xff;
501 sa.sa_data[4] = (mac >> 8) & 0xff;
502 sa.sa_data[5] = mac & 0xff;
504 if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
505 printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
506 " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
507 sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
508 sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
509 sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
510 cvm_oct_mac_addr_offset++;
513 * Force the interface to use the POW send if always_use_pow
514 * was specified or it is in the pow send list.
516 if ((pow_send_group != -1)
517 && (always_use_pow || strstr(pow_send_list, dev->name)))
520 if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
521 dev->features |= NETIF_F_IP_CSUM;
523 /* We do our own locking, Linux doesn't need to */
524 dev->features |= NETIF_F_LLTX;
525 SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
527 cvm_oct_mdio_setup_device(dev);
528 dev->netdev_ops->ndo_set_mac_address(dev, &sa);
529 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
532 * Zero out stats for port so we won't mistakenly show
533 * counters from the bootloader.
535 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
536 sizeof(struct net_device_stats));
541 void cvm_oct_common_uninit(struct net_device *dev)
543 /* Currently nothing to do */
546 static const struct net_device_ops cvm_oct_npi_netdev_ops = {
547 .ndo_init = cvm_oct_common_init,
548 .ndo_uninit = cvm_oct_common_uninit,
549 .ndo_start_xmit = cvm_oct_xmit,
550 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
551 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
552 .ndo_do_ioctl = cvm_oct_ioctl,
553 .ndo_change_mtu = cvm_oct_common_change_mtu,
554 .ndo_get_stats = cvm_oct_common_get_stats,
555 #ifdef CONFIG_NET_POLL_CONTROLLER
556 .ndo_poll_controller = cvm_oct_poll_controller,
559 static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
560 .ndo_init = cvm_oct_xaui_init,
561 .ndo_uninit = cvm_oct_xaui_uninit,
562 .ndo_open = cvm_oct_xaui_open,
563 .ndo_stop = cvm_oct_xaui_stop,
564 .ndo_start_xmit = cvm_oct_xmit,
565 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
566 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
567 .ndo_do_ioctl = cvm_oct_ioctl,
568 .ndo_change_mtu = cvm_oct_common_change_mtu,
569 .ndo_get_stats = cvm_oct_common_get_stats,
570 #ifdef CONFIG_NET_POLL_CONTROLLER
571 .ndo_poll_controller = cvm_oct_poll_controller,
574 static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
575 .ndo_init = cvm_oct_sgmii_init,
576 .ndo_uninit = cvm_oct_sgmii_uninit,
577 .ndo_open = cvm_oct_sgmii_open,
578 .ndo_stop = cvm_oct_sgmii_stop,
579 .ndo_start_xmit = cvm_oct_xmit,
580 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
581 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
582 .ndo_do_ioctl = cvm_oct_ioctl,
583 .ndo_change_mtu = cvm_oct_common_change_mtu,
584 .ndo_get_stats = cvm_oct_common_get_stats,
585 #ifdef CONFIG_NET_POLL_CONTROLLER
586 .ndo_poll_controller = cvm_oct_poll_controller,
589 static const struct net_device_ops cvm_oct_spi_netdev_ops = {
590 .ndo_init = cvm_oct_spi_init,
591 .ndo_uninit = cvm_oct_spi_uninit,
592 .ndo_start_xmit = cvm_oct_xmit,
593 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
594 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
595 .ndo_do_ioctl = cvm_oct_ioctl,
596 .ndo_change_mtu = cvm_oct_common_change_mtu,
597 .ndo_get_stats = cvm_oct_common_get_stats,
598 #ifdef CONFIG_NET_POLL_CONTROLLER
599 .ndo_poll_controller = cvm_oct_poll_controller,
602 static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
603 .ndo_init = cvm_oct_rgmii_init,
604 .ndo_uninit = cvm_oct_rgmii_uninit,
605 .ndo_open = cvm_oct_rgmii_open,
606 .ndo_stop = cvm_oct_rgmii_stop,
607 .ndo_start_xmit = cvm_oct_xmit,
608 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
609 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
610 .ndo_do_ioctl = cvm_oct_ioctl,
611 .ndo_change_mtu = cvm_oct_common_change_mtu,
612 .ndo_get_stats = cvm_oct_common_get_stats,
613 #ifdef CONFIG_NET_POLL_CONTROLLER
614 .ndo_poll_controller = cvm_oct_poll_controller,
617 static const struct net_device_ops cvm_oct_pow_netdev_ops = {
618 .ndo_init = cvm_oct_common_init,
619 .ndo_start_xmit = cvm_oct_xmit_pow,
620 .ndo_set_multicast_list = cvm_oct_common_set_multicast_list,
621 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
622 .ndo_do_ioctl = cvm_oct_ioctl,
623 .ndo_change_mtu = cvm_oct_common_change_mtu,
624 .ndo_get_stats = cvm_oct_common_get_stats,
625 #ifdef CONFIG_NET_POLL_CONTROLLER
626 .ndo_poll_controller = cvm_oct_poll_controller,
631 * Module/ driver initialization. Creates the linux network
634 * Returns Zero on success
636 static int __init cvm_oct_init_module(void)
640 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
643 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
645 if (OCTEON_IS_MODEL(OCTEON_CN52XX))
646 cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
647 else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
648 cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
650 cvm_oct_mac_addr_offset = 0;
652 cvm_oct_proc_initialize();
653 cvm_oct_rx_initialize();
654 cvm_oct_configure_common_hw();
656 cvmx_helper_initialize_packet_io_global();
658 /* Change the input group for all ports before input is enabled */
659 num_interfaces = cvmx_helper_get_number_of_interfaces();
660 for (interface = 0; interface < num_interfaces; interface++) {
661 int num_ports = cvmx_helper_ports_on_interface(interface);
664 for (port = cvmx_helper_get_ipd_port(interface, 0);
665 port < cvmx_helper_get_ipd_port(interface, num_ports);
667 union cvmx_pip_prt_tagx pip_prt_tagx;
669 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
670 pip_prt_tagx.s.grp = pow_receive_group;
671 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
676 cvmx_helper_ipd_and_packet_input_enable();
678 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
681 * Initialize the FAU used for counting packet buffers that
684 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
686 if ((pow_send_group != -1)) {
687 struct net_device *dev;
688 pr_info("\tConfiguring device for POW only access\n");
689 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
691 /* Initialize the device private structure. */
692 struct octeon_ethernet *priv = netdev_priv(dev);
693 memset(priv, 0, sizeof(struct octeon_ethernet));
695 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
696 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
697 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
699 strcpy(dev->name, "pow%d");
700 for (qos = 0; qos < 16; qos++)
701 skb_queue_head_init(&priv->tx_free_list[qos]);
703 if (register_netdev(dev) < 0) {
704 pr_err("Failed to register ethernet "
708 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
709 pr_info("%s: POW send group %d, receive "
711 dev->name, pow_send_group,
715 pr_err("Failed to allocate ethernet device "
720 num_interfaces = cvmx_helper_get_number_of_interfaces();
721 for (interface = 0; interface < num_interfaces; interface++) {
722 cvmx_helper_interface_mode_t imode =
723 cvmx_helper_interface_get_mode(interface);
724 int num_ports = cvmx_helper_ports_on_interface(interface);
727 for (port = cvmx_helper_get_ipd_port(interface, 0);
728 port < cvmx_helper_get_ipd_port(interface, num_ports);
730 struct octeon_ethernet *priv;
731 struct net_device *dev =
732 alloc_etherdev(sizeof(struct octeon_ethernet));
734 pr_err("Failed to allocate ethernet device "
735 "for port %d\n", port);
738 if (disable_core_queueing)
739 dev->tx_queue_len = 0;
741 /* Initialize the device private structure. */
742 priv = netdev_priv(dev);
743 memset(priv, 0, sizeof(struct octeon_ethernet));
747 priv->queue = cvmx_pko_get_base_queue(priv->port);
748 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
749 for (qos = 0; qos < 16; qos++)
750 skb_queue_head_init(&priv->tx_free_list[qos]);
751 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
753 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
755 switch (priv->imode) {
757 /* These types don't support ports to IPD/PKO */
758 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
759 case CVMX_HELPER_INTERFACE_MODE_PCIE:
760 case CVMX_HELPER_INTERFACE_MODE_PICMG:
763 case CVMX_HELPER_INTERFACE_MODE_NPI:
764 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
765 strcpy(dev->name, "npi%d");
768 case CVMX_HELPER_INTERFACE_MODE_XAUI:
769 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
770 strcpy(dev->name, "xaui%d");
773 case CVMX_HELPER_INTERFACE_MODE_LOOP:
774 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
775 strcpy(dev->name, "loop%d");
778 case CVMX_HELPER_INTERFACE_MODE_SGMII:
779 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
780 strcpy(dev->name, "eth%d");
783 case CVMX_HELPER_INTERFACE_MODE_SPI:
784 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
785 strcpy(dev->name, "spi%d");
788 case CVMX_HELPER_INTERFACE_MODE_RGMII:
789 case CVMX_HELPER_INTERFACE_MODE_GMII:
790 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
791 strcpy(dev->name, "eth%d");
795 if (!dev->netdev_ops) {
797 } else if (register_netdev(dev) < 0) {
798 pr_err("Failed to register ethernet device "
799 "for interface %d, port %d\n",
800 interface, priv->port);
803 cvm_oct_device[priv->port] = dev;
805 cvmx_pko_get_num_queues(priv->port) *
811 if (INTERRUPT_LIMIT) {
813 * Set the POW timer rate to give an interrupt at most
814 * INTERRUPT_LIMIT times per second.
816 cvmx_write_csr(CVMX_POW_WQ_INT_PC,
817 octeon_bootinfo->eclock_hz / (INTERRUPT_LIMIT *
821 * Enable POW timer interrupt. It will count when
822 * there are packets available.
824 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
827 /* Enable POW interrupt when our port has at least one packet */
828 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0x1001);
831 /* Enable the poll timer for checking RGMII status */
832 init_timer(&cvm_oct_poll_timer);
833 cvm_oct_poll_timer.data = 0;
834 cvm_oct_poll_timer.function = cvm_do_timer;
835 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
841 * Module / driver shutdown
843 * Returns Zero on success
845 static void __exit cvm_oct_cleanup_module(void)
849 /* Disable POW interrupt */
850 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
854 /* Free the interrupt handler */
855 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
857 del_timer(&cvm_oct_poll_timer);
858 cvm_oct_rx_shutdown();
861 /* Free the ethernet devices */
862 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
863 if (cvm_oct_device[port]) {
864 cvm_oct_tx_shutdown(cvm_oct_device[port]);
865 unregister_netdev(cvm_oct_device[port]);
866 kfree(cvm_oct_device[port]);
867 cvm_oct_device[port] = NULL;
872 cvm_oct_proc_shutdown();
876 /* Free the HW pools */
877 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
879 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
881 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
882 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
883 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
886 MODULE_LICENSE("GPL");
887 MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
888 MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");
889 module_init(cvm_oct_init_module);
890 module_exit(cvm_oct_cleanup_module);