1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
72 * Cards with following subsystem_id have a link state indication
73 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
74 * macro below identifies these cards given the subsystem_id.
76 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
77 (((subid >= 0x600B) && (subid <= 0x600D)) || \
78 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
80 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
81 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
82 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
85 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
88 mac_info_t *mac_control;
90 mac_control = &sp->mac_control;
91 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
93 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
102 /* Ethtool related variables and Macros. */
103 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
104 "Register test\t(offline)",
105 "Eeprom test\t(offline)",
106 "Link test\t(online)",
107 "RLDRAM test\t(offline)",
108 "BIST Test\t(offline)"
111 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
113 {"tmac_data_octets"},
117 {"tmac_pause_ctrl_frms"},
118 {"tmac_any_err_frms"},
119 {"tmac_vld_ip_octets"},
127 {"rmac_data_octets"},
128 {"rmac_fcs_err_frms"},
130 {"rmac_vld_mcst_frms"},
131 {"rmac_vld_bcst_frms"},
132 {"rmac_in_rng_len_err_frms"},
134 {"rmac_pause_ctrl_frms"},
135 {"rmac_discarded_frms"},
136 {"rmac_usized_frms"},
137 {"rmac_osized_frms"},
139 {"rmac_jabber_frms"},
147 {"rmac_err_drp_udp"},
149 {"rmac_accepted_ip"},
153 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
154 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
156 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
157 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
160 * Constants to be programmed into the Xena's registers, to configure
164 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
167 static u64 default_mdio_cfg[] = {
169 0xC001010000000000ULL, 0xC0010100000000E0ULL,
170 0xC0010100008000E4ULL,
171 /* Remove Reset from PMA PLL */
172 0xC001010000000000ULL, 0xC0010100000000E0ULL,
173 0xC0010100000000E4ULL,
177 static u64 default_dtx_cfg[] = {
178 0x8000051500000000ULL, 0x80000515000000E0ULL,
179 0x80000515D93500E4ULL, 0x8001051500000000ULL,
180 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
181 0x8002051500000000ULL, 0x80020515000000E0ULL,
182 0x80020515F21000E4ULL,
183 /* Set PADLOOPBACKN */
184 0x8002051500000000ULL, 0x80020515000000E0ULL,
185 0x80020515B20000E4ULL, 0x8003051500000000ULL,
186 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
187 0x8004051500000000ULL, 0x80040515000000E0ULL,
188 0x80040515B20000E4ULL, 0x8005051500000000ULL,
189 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
191 /* Remove PADLOOPBACKN */
192 0x8002051500000000ULL, 0x80020515000000E0ULL,
193 0x80020515F20000E4ULL, 0x8003051500000000ULL,
194 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
195 0x8004051500000000ULL, 0x80040515000000E0ULL,
196 0x80040515F20000E4ULL, 0x8005051500000000ULL,
197 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
202 * Constants for Fixing the MacAddress problem seen mostly on
205 static u64 fix_mac[] = {
206 0x0060000000000000ULL, 0x0060600000000000ULL,
207 0x0040600000000000ULL, 0x0000600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0060600000000000ULL,
216 0x0020600000000000ULL, 0x0060600000000000ULL,
217 0x0020600000000000ULL, 0x0060600000000000ULL,
218 0x0020600000000000ULL, 0x0000600000000000ULL,
219 0x0040600000000000ULL, 0x0060600000000000ULL,
223 /* Module Loadable parameters. */
224 static unsigned int tx_fifo_num = 1;
225 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
226 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
227 static unsigned int rx_ring_num = 1;
228 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
229 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
230 static unsigned int Stats_refresh_time = 4;
231 static unsigned int rts_frm_len[MAX_RX_RINGS] =
232 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
233 static unsigned int rmac_pause_time = 65535;
234 static unsigned int mc_pause_threshold_q0q3 = 187;
235 static unsigned int mc_pause_threshold_q4q7 = 187;
236 static unsigned int shared_splits;
237 static unsigned int tmac_util_period = 5;
238 static unsigned int rmac_util_period = 5;
239 #ifndef CONFIG_S2IO_NAPI
240 static unsigned int indicate_max_pkts;
245 * This table lists all the devices that this driver supports.
247 static struct pci_device_id s2io_tbl[] __devinitdata = {
248 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
249 PCI_ANY_ID, PCI_ANY_ID},
250 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
251 PCI_ANY_ID, PCI_ANY_ID},
252 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
253 PCI_ANY_ID, PCI_ANY_ID},
254 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
255 PCI_ANY_ID, PCI_ANY_ID},
259 MODULE_DEVICE_TABLE(pci, s2io_tbl);
261 static struct pci_driver s2io_driver = {
263 .id_table = s2io_tbl,
264 .probe = s2io_init_nic,
265 .remove = __devexit_p(s2io_rem_nic),
268 /* A simplifier macro used both by init and free shared_mem Fns(). */
269 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
272 * init_shared_mem - Allocation and Initialization of Memory
273 * @nic: Device private variable.
274 * Description: The function allocates all the memory areas shared
275 * between the NIC and the driver. This includes Tx descriptors,
276 * Rx descriptors and the statistics block.
279 static int init_shared_mem(struct s2io_nic *nic)
282 void *tmp_v_addr, *tmp_v_addr_next;
283 dma_addr_t tmp_p_addr, tmp_p_addr_next;
284 RxD_block_t *pre_rxd_blk = NULL;
285 int i, j, blk_cnt, rx_sz, tx_sz;
286 int lst_size, lst_per_page;
287 struct net_device *dev = nic->dev;
288 #ifdef CONFIG_2BUFF_MODE
293 mac_info_t *mac_control;
294 struct config_param *config;
296 mac_control = &nic->mac_control;
297 config = &nic->config;
300 /* Allocation and initialization of TXDLs in FIOFs */
302 for (i = 0; i < config->tx_fifo_num; i++) {
303 size += config->tx_cfg[i].fifo_len;
305 if (size > MAX_AVAILABLE_TXDS) {
306 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
308 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
309 DBG_PRINT(ERR_DBG, "that can be used\n");
313 lst_size = (sizeof(TxD_t) * config->max_txds);
314 tx_sz = lst_size * size;
315 lst_per_page = PAGE_SIZE / lst_size;
317 for (i = 0; i < config->tx_fifo_num; i++) {
318 int fifo_len = config->tx_cfg[i].fifo_len;
319 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
320 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
322 if (!mac_control->fifos[i].list_info) {
324 "Malloc failed for list_info\n");
327 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
329 for (i = 0; i < config->tx_fifo_num; i++) {
330 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
332 mac_control->fifos[i].tx_curr_put_info.offset = 0;
333 mac_control->fifos[i].tx_curr_put_info.fifo_len =
334 config->tx_cfg[i].fifo_len - 1;
335 mac_control->fifos[i].tx_curr_get_info.offset = 0;
336 mac_control->fifos[i].tx_curr_get_info.fifo_len =
337 config->tx_cfg[i].fifo_len - 1;
338 mac_control->fifos[i].fifo_no = i;
339 mac_control->fifos[i].nic = nic;
340 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
342 for (j = 0; j < page_num; j++) {
346 tmp_v = pci_alloc_consistent(nic->pdev,
350 "pci_alloc_consistent ");
351 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
354 while (k < lst_per_page) {
355 int l = (j * lst_per_page) + k;
356 if (l == config->tx_cfg[i].fifo_len)
358 mac_control->fifos[i].list_info[l].list_virt_addr =
359 tmp_v + (k * lst_size);
360 mac_control->fifos[i].list_info[l].list_phy_addr =
361 tmp_p + (k * lst_size);
367 /* Allocation and initialization of RXDs in Rings */
369 for (i = 0; i < config->rx_ring_num; i++) {
370 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
371 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
372 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
374 DBG_PRINT(ERR_DBG, "RxDs per Block");
377 size += config->rx_cfg[i].num_rxd;
378 mac_control->rings[i].block_count =
379 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
380 mac_control->rings[i].pkt_cnt =
381 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
383 size = (size * (sizeof(RxD_t)));
386 for (i = 0; i < config->rx_ring_num; i++) {
387 mac_control->rings[i].rx_curr_get_info.block_index = 0;
388 mac_control->rings[i].rx_curr_get_info.offset = 0;
389 mac_control->rings[i].rx_curr_get_info.ring_len =
390 config->rx_cfg[i].num_rxd - 1;
391 mac_control->rings[i].rx_curr_put_info.block_index = 0;
392 mac_control->rings[i].rx_curr_put_info.offset = 0;
393 mac_control->rings[i].rx_curr_put_info.ring_len =
394 config->rx_cfg[i].num_rxd - 1;
395 mac_control->rings[i].nic = nic;
396 mac_control->rings[i].ring_no = i;
399 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
400 /* Allocating all the Rx blocks */
401 for (j = 0; j < blk_cnt; j++) {
402 #ifndef CONFIG_2BUFF_MODE
403 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
405 size = SIZE_OF_BLOCK;
407 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
409 if (tmp_v_addr == NULL) {
411 * In case of failure, free_shared_mem()
412 * is called, which should free any
413 * memory that was alloced till the
416 mac_control->rings[i].rx_blocks[j].block_virt_addr =
420 memset(tmp_v_addr, 0, size);
421 mac_control->rings[i].rx_blocks[j].block_virt_addr =
423 mac_control->rings[i].rx_blocks[j].block_dma_addr =
426 /* Interlinking all Rx Blocks */
427 for (j = 0; j < blk_cnt; j++) {
429 mac_control->rings[i].rx_blocks[j].block_virt_addr;
431 mac_control->rings[i].rx_blocks[(j + 1) %
432 blk_cnt].block_virt_addr;
434 mac_control->rings[i].rx_blocks[j].block_dma_addr;
436 mac_control->rings[i].rx_blocks[(j + 1) %
437 blk_cnt].block_dma_addr;
439 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
440 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
443 #ifndef CONFIG_2BUFF_MODE
444 pre_rxd_blk->reserved_2_pNext_RxD_block =
445 (unsigned long) tmp_v_addr_next;
447 pre_rxd_blk->pNext_RxD_Blk_physical =
448 (u64) tmp_p_addr_next;
452 #ifdef CONFIG_2BUFF_MODE
454 * Allocation of Storages for buffer addresses in 2BUFF mode
455 * and the buffers as well.
457 for (i = 0; i < config->rx_ring_num; i++) {
459 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
460 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
462 if (!mac_control->rings[i].ba)
464 for (j = 0; j < blk_cnt; j++) {
466 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
467 (MAX_RXDS_PER_BLOCK + 1)),
469 if (!mac_control->rings[i].ba[j])
471 while (k != MAX_RXDS_PER_BLOCK) {
472 ba = &mac_control->rings[i].ba[j][k];
474 ba->ba_0_org = (void *) kmalloc
475 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
478 tmp = (u64) ba->ba_0_org;
480 tmp &= ~((u64) ALIGN_SIZE);
481 ba->ba_0 = (void *) tmp;
483 ba->ba_1_org = (void *) kmalloc
484 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
487 tmp = (u64) ba->ba_1_org;
489 tmp &= ~((u64) ALIGN_SIZE);
490 ba->ba_1 = (void *) tmp;
497 /* Allocation and initialization of Statistics block */
498 size = sizeof(StatInfo_t);
499 mac_control->stats_mem = pci_alloc_consistent
500 (nic->pdev, size, &mac_control->stats_mem_phy);
502 if (!mac_control->stats_mem) {
504 * In case of failure, free_shared_mem() is called, which
505 * should free any memory that was alloced till the
510 mac_control->stats_mem_sz = size;
512 tmp_v_addr = mac_control->stats_mem;
513 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
514 memset(tmp_v_addr, 0, size);
515 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
516 (unsigned long long) tmp_p_addr);
522 * free_shared_mem - Free the allocated Memory
523 * @nic: Device private variable.
524 * Description: This function is to free all memory locations allocated by
525 * the init_shared_mem() function and return it to the kernel.
528 static void free_shared_mem(struct s2io_nic *nic)
530 int i, j, blk_cnt, size;
532 dma_addr_t tmp_p_addr;
533 mac_info_t *mac_control;
534 struct config_param *config;
535 int lst_size, lst_per_page;
541 mac_control = &nic->mac_control;
542 config = &nic->config;
544 lst_size = (sizeof(TxD_t) * config->max_txds);
545 lst_per_page = PAGE_SIZE / lst_size;
547 for (i = 0; i < config->tx_fifo_num; i++) {
548 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
550 for (j = 0; j < page_num; j++) {
551 int mem_blks = (j * lst_per_page);
552 if (!mac_control->fifos[i].list_info[mem_blks].
555 pci_free_consistent(nic->pdev, PAGE_SIZE,
556 mac_control->fifos[i].
559 mac_control->fifos[i].
563 kfree(mac_control->fifos[i].list_info);
566 #ifndef CONFIG_2BUFF_MODE
567 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
569 size = SIZE_OF_BLOCK;
571 for (i = 0; i < config->rx_ring_num; i++) {
572 blk_cnt = mac_control->rings[i].block_count;
573 for (j = 0; j < blk_cnt; j++) {
574 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
576 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
578 if (tmp_v_addr == NULL)
580 pci_free_consistent(nic->pdev, size,
581 tmp_v_addr, tmp_p_addr);
585 #ifdef CONFIG_2BUFF_MODE
586 /* Freeing buffer storage addresses in 2BUFF mode. */
587 for (i = 0; i < config->rx_ring_num; i++) {
589 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
590 for (j = 0; j < blk_cnt; j++) {
592 if (!mac_control->rings[i].ba[j])
594 while (k != MAX_RXDS_PER_BLOCK) {
595 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
600 kfree(mac_control->rings[i].ba[j]);
602 if (mac_control->rings[i].ba)
603 kfree(mac_control->rings[i].ba);
607 if (mac_control->stats_mem) {
608 pci_free_consistent(nic->pdev,
609 mac_control->stats_mem_sz,
610 mac_control->stats_mem,
611 mac_control->stats_mem_phy);
616 * init_nic - Initialization of hardware
617 * @nic: device peivate variable
618 * Description: The function sequentially configures every block
619 * of the H/W from their reset values.
620 * Return Value: SUCCESS on success and
621 * '-1' on failure (endian settings incorrect).
624 static int init_nic(struct s2io_nic *nic)
626 XENA_dev_config_t __iomem *bar0 = nic->bar0;
627 struct net_device *dev = nic->dev;
628 register u64 val64 = 0;
632 mac_info_t *mac_control;
633 struct config_param *config;
634 int mdio_cnt = 0, dtx_cnt = 0;
635 unsigned long long mem_share;
638 mac_control = &nic->mac_control;
639 config = &nic->config;
641 /* to set the swapper control on the card */
642 if(s2io_set_swapper(nic)) {
643 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
647 /* Remove XGXS from reset state */
649 writeq(val64, &bar0->sw_reset);
651 val64 = readq(&bar0->sw_reset);
653 /* Enable Receiving broadcasts */
654 add = &bar0->mac_cfg;
655 val64 = readq(&bar0->mac_cfg);
656 val64 |= MAC_RMAC_BCAST_ENABLE;
657 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
658 writel((u32) val64, add);
659 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
660 writel((u32) (val64 >> 32), (add + 4));
662 /* Read registers in all blocks */
663 val64 = readq(&bar0->mac_int_mask);
664 val64 = readq(&bar0->mc_int_mask);
665 val64 = readq(&bar0->xgxs_int_mask);
669 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
672 * Configuring the XAUI Interface of Xena.
673 * ***************************************
674 * To Configure the Xena's XAUI, one has to write a series
675 * of 64 bit values into two registers in a particular
676 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
677 * which will be defined in the array of configuration values
678 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
679 * to switch writing from one regsiter to another. We continue
680 * writing these values until we encounter the 'END_SIGN' macro.
681 * For example, After making a series of 21 writes into
682 * dtx_control register the 'SWITCH_SIGN' appears and hence we
683 * start writing into mdio_control until we encounter END_SIGN.
687 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
688 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
692 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
693 &bar0->dtx_control, UF);
694 val64 = readq(&bar0->dtx_control);
698 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
699 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
703 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
704 &bar0->mdio_control, UF);
705 val64 = readq(&bar0->mdio_control);
708 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
709 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
716 /* Tx DMA Initialization */
718 writeq(val64, &bar0->tx_fifo_partition_0);
719 writeq(val64, &bar0->tx_fifo_partition_1);
720 writeq(val64, &bar0->tx_fifo_partition_2);
721 writeq(val64, &bar0->tx_fifo_partition_3);
724 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
726 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
727 13) | vBIT(config->tx_cfg[i].fifo_priority,
730 if (i == (config->tx_fifo_num - 1)) {
737 writeq(val64, &bar0->tx_fifo_partition_0);
741 writeq(val64, &bar0->tx_fifo_partition_1);
745 writeq(val64, &bar0->tx_fifo_partition_2);
749 writeq(val64, &bar0->tx_fifo_partition_3);
754 /* Enable Tx FIFO partition 0. */
755 val64 = readq(&bar0->tx_fifo_partition_0);
756 val64 |= BIT(0); /* To enable the FIFO partition. */
757 writeq(val64, &bar0->tx_fifo_partition_0);
759 val64 = readq(&bar0->tx_fifo_partition_0);
760 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
761 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
764 * Initialization of Tx_PA_CONFIG register to ignore packet
765 * integrity checking.
767 val64 = readq(&bar0->tx_pa_cfg);
768 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
769 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
770 writeq(val64, &bar0->tx_pa_cfg);
772 /* Rx DMA intialization. */
774 for (i = 0; i < config->rx_ring_num; i++) {
776 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
779 writeq(val64, &bar0->rx_queue_priority);
782 * Allocating equal share of memory to all the
787 for (i = 0; i < config->rx_ring_num; i++) {
790 mem_share = (mem_size / config->rx_ring_num +
791 mem_size % config->rx_ring_num);
792 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
795 mem_share = (mem_size / config->rx_ring_num);
796 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
799 mem_share = (mem_size / config->rx_ring_num);
800 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
803 mem_share = (mem_size / config->rx_ring_num);
804 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
807 mem_share = (mem_size / config->rx_ring_num);
808 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
811 mem_share = (mem_size / config->rx_ring_num);
812 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
815 mem_share = (mem_size / config->rx_ring_num);
816 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
819 mem_share = (mem_size / config->rx_ring_num);
820 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
824 writeq(val64, &bar0->rx_queue_cfg);
826 /* Initializing the Tx round robin registers to 0
827 * filling tx and rx round robin registers as per
828 * the number of FIFOs and Rings is still TODO
830 writeq(0, &bar0->tx_w_round_robin_0);
831 writeq(0, &bar0->tx_w_round_robin_1);
832 writeq(0, &bar0->tx_w_round_robin_2);
833 writeq(0, &bar0->tx_w_round_robin_3);
834 writeq(0, &bar0->tx_w_round_robin_4);
838 * Disable Rx steering. Hard coding all packets to be steered to
841 val64 = 0x8080808080808080ULL;
842 writeq(val64, &bar0->rts_qos_steering);
846 for (i = 0; i < 8; i++)
847 writeq(val64, &bar0->rts_frm_len_n[i]);
849 /* Set the default rts frame length for ring0 */
850 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu+22),
851 &bar0->rts_frm_len_n[0]);
853 /* Program statistics memory */
854 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
855 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
856 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
857 writeq(val64, &bar0->stat_cfg);
860 * Initializing the sampling rate for the device to calculate the
861 * bandwidth utilization.
863 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
864 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
865 writeq(val64, &bar0->mac_link_util);
869 * Initializing the Transmit and Receive Traffic Interrupt
873 * TTI Initialization. Default Tx timer gets us about
874 * 250 interrupts per sec. Continuous interrupts are enabled
877 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
878 TTI_DATA1_MEM_TX_URNG_A(0xA) |
879 TTI_DATA1_MEM_TX_URNG_B(0x10) |
880 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
881 TTI_DATA1_MEM_TX_TIMER_CI_EN;
882 writeq(val64, &bar0->tti_data1_mem);
884 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
885 TTI_DATA2_MEM_TX_UFC_B(0x20) |
886 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
887 writeq(val64, &bar0->tti_data2_mem);
889 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
890 writeq(val64, &bar0->tti_command_mem);
893 * Once the operation completes, the Strobe bit of the command
894 * register will be reset. We poll for this particular condition
895 * We wait for a maximum of 500ms for the operation to complete,
896 * if it's not complete by then we return error.
900 val64 = readq(&bar0->tti_command_mem);
901 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
905 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
913 /* RTI Initialization */
914 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
915 RTI_DATA1_MEM_RX_URNG_A(0xA) |
916 RTI_DATA1_MEM_RX_URNG_B(0x10) |
917 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
919 writeq(val64, &bar0->rti_data1_mem);
921 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
922 RTI_DATA2_MEM_RX_UFC_B(0x2) |
923 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
924 writeq(val64, &bar0->rti_data2_mem);
926 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
927 writeq(val64, &bar0->rti_command_mem);
930 * Once the operation completes, the Strobe bit of the command
931 * register will be reset. We poll for this particular condition
932 * We wait for a maximum of 500ms for the operation to complete,
933 * if it's not complete by then we return error.
937 val64 = readq(&bar0->rti_command_mem);
938 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
942 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
951 * Initializing proper values as Pause threshold into all
952 * the 8 Queues on Rx side.
954 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
955 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
957 /* Disable RMAC PAD STRIPPING */
958 add = (void *) &bar0->mac_cfg;
959 val64 = readq(&bar0->mac_cfg);
960 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
961 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
962 writel((u32) (val64), add);
963 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
964 writel((u32) (val64 >> 32), (add + 4));
965 val64 = readq(&bar0->mac_cfg);
968 * Set the time value to be inserted in the pause frame
971 val64 = readq(&bar0->rmac_pause_cfg);
972 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
973 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
974 writeq(val64, &bar0->rmac_pause_cfg);
977 * Set the Threshold Limit for Generating the pause frame
978 * If the amount of data in any Queue exceeds ratio of
979 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
980 * pause frame is generated
983 for (i = 0; i < 4; i++) {
985 (((u64) 0xFF00 | nic->mac_control.
986 mc_pause_threshold_q0q3)
989 writeq(val64, &bar0->mc_pause_thresh_q0q3);
992 for (i = 0; i < 4; i++) {
994 (((u64) 0xFF00 | nic->mac_control.
995 mc_pause_threshold_q4q7)
998 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1001 * TxDMA will stop Read request if the number of read split has
1002 * exceeded the limit pointed by shared_splits
1004 val64 = readq(&bar0->pic_control);
1005 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1006 writeq(val64, &bar0->pic_control);
1012 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1013 * @nic: device private variable,
1014 * @mask: A mask indicating which Intr block must be modified and,
1015 * @flag: A flag indicating whether to enable or disable the Intrs.
1016 * Description: This function will either disable or enable the interrupts
1017 * depending on the flag argument. The mask argument can be used to
1018 * enable/disable any Intr block.
1019 * Return Value: NONE.
1022 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1024 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1025 register u64 val64 = 0, temp64 = 0;
1027 /* Top level interrupt classification */
1028 /* PIC Interrupts */
1029 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1030 /* Enable PIC Intrs in the general intr mask register */
1031 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1032 if (flag == ENABLE_INTRS) {
1033 temp64 = readq(&bar0->general_int_mask);
1034 temp64 &= ~((u64) val64);
1035 writeq(temp64, &bar0->general_int_mask);
1037 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1038 * interrupts for now.
1041 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1043 * No MSI Support is available presently, so TTI and
1044 * RTI interrupts are also disabled.
1046 } else if (flag == DISABLE_INTRS) {
1048 * Disable PIC Intrs in the general
1049 * intr mask register
1051 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1052 temp64 = readq(&bar0->general_int_mask);
1054 writeq(val64, &bar0->general_int_mask);
1058 /* DMA Interrupts */
1059 /* Enabling/Disabling Tx DMA interrupts */
1060 if (mask & TX_DMA_INTR) {
1061 /* Enable TxDMA Intrs in the general intr mask register */
1062 val64 = TXDMA_INT_M;
1063 if (flag == ENABLE_INTRS) {
1064 temp64 = readq(&bar0->general_int_mask);
1065 temp64 &= ~((u64) val64);
1066 writeq(temp64, &bar0->general_int_mask);
1068 * Keep all interrupts other than PFC interrupt
1069 * and PCC interrupt disabled in DMA level.
1071 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1073 writeq(val64, &bar0->txdma_int_mask);
1075 * Enable only the MISC error 1 interrupt in PFC block
1077 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1078 writeq(val64, &bar0->pfc_err_mask);
1080 * Enable only the FB_ECC error interrupt in PCC block
1082 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1083 writeq(val64, &bar0->pcc_err_mask);
1084 } else if (flag == DISABLE_INTRS) {
1086 * Disable TxDMA Intrs in the general intr mask
1089 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1090 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1091 temp64 = readq(&bar0->general_int_mask);
1093 writeq(val64, &bar0->general_int_mask);
1097 /* Enabling/Disabling Rx DMA interrupts */
1098 if (mask & RX_DMA_INTR) {
1099 /* Enable RxDMA Intrs in the general intr mask register */
1100 val64 = RXDMA_INT_M;
1101 if (flag == ENABLE_INTRS) {
1102 temp64 = readq(&bar0->general_int_mask);
1103 temp64 &= ~((u64) val64);
1104 writeq(temp64, &bar0->general_int_mask);
1106 * All RxDMA block interrupts are disabled for now
1109 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1110 } else if (flag == DISABLE_INTRS) {
1112 * Disable RxDMA Intrs in the general intr mask
1115 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1116 temp64 = readq(&bar0->general_int_mask);
1118 writeq(val64, &bar0->general_int_mask);
1122 /* MAC Interrupts */
1123 /* Enabling/Disabling MAC interrupts */
1124 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1125 val64 = TXMAC_INT_M | RXMAC_INT_M;
1126 if (flag == ENABLE_INTRS) {
1127 temp64 = readq(&bar0->general_int_mask);
1128 temp64 &= ~((u64) val64);
1129 writeq(temp64, &bar0->general_int_mask);
1131 * All MAC block error interrupts are disabled for now
1132 * except the link status change interrupt.
1135 val64 = MAC_INT_STATUS_RMAC_INT;
1136 temp64 = readq(&bar0->mac_int_mask);
1137 temp64 &= ~((u64) val64);
1138 writeq(temp64, &bar0->mac_int_mask);
1140 val64 = readq(&bar0->mac_rmac_err_mask);
1141 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1142 writeq(val64, &bar0->mac_rmac_err_mask);
1143 } else if (flag == DISABLE_INTRS) {
1145 * Disable MAC Intrs in the general intr mask register
1147 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1148 writeq(DISABLE_ALL_INTRS,
1149 &bar0->mac_rmac_err_mask);
1151 temp64 = readq(&bar0->general_int_mask);
1153 writeq(val64, &bar0->general_int_mask);
1157 /* XGXS Interrupts */
1158 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1159 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1160 if (flag == ENABLE_INTRS) {
1161 temp64 = readq(&bar0->general_int_mask);
1162 temp64 &= ~((u64) val64);
1163 writeq(temp64, &bar0->general_int_mask);
1165 * All XGXS block error interrupts are disabled for now
1168 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1169 } else if (flag == DISABLE_INTRS) {
1171 * Disable MC Intrs in the general intr mask register
1173 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1174 temp64 = readq(&bar0->general_int_mask);
1176 writeq(val64, &bar0->general_int_mask);
1180 /* Memory Controller(MC) interrupts */
1181 if (mask & MC_INTR) {
1183 if (flag == ENABLE_INTRS) {
1184 temp64 = readq(&bar0->general_int_mask);
1185 temp64 &= ~((u64) val64);
1186 writeq(temp64, &bar0->general_int_mask);
1188 * All MC block error interrupts are disabled for now.
1191 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1192 } else if (flag == DISABLE_INTRS) {
1194 * Disable MC Intrs in the general intr mask register
1196 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1197 temp64 = readq(&bar0->general_int_mask);
1199 writeq(val64, &bar0->general_int_mask);
1204 /* Tx traffic interrupts */
1205 if (mask & TX_TRAFFIC_INTR) {
1206 val64 = TXTRAFFIC_INT_M;
1207 if (flag == ENABLE_INTRS) {
1208 temp64 = readq(&bar0->general_int_mask);
1209 temp64 &= ~((u64) val64);
1210 writeq(temp64, &bar0->general_int_mask);
1212 * Enable all the Tx side interrupts
1213 * writing 0 Enables all 64 TX interrupt levels
1215 writeq(0x0, &bar0->tx_traffic_mask);
1216 } else if (flag == DISABLE_INTRS) {
1218 * Disable Tx Traffic Intrs in the general intr mask
1221 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1222 temp64 = readq(&bar0->general_int_mask);
1224 writeq(val64, &bar0->general_int_mask);
1228 /* Rx traffic interrupts */
1229 if (mask & RX_TRAFFIC_INTR) {
1230 val64 = RXTRAFFIC_INT_M;
1231 if (flag == ENABLE_INTRS) {
1232 temp64 = readq(&bar0->general_int_mask);
1233 temp64 &= ~((u64) val64);
1234 writeq(temp64, &bar0->general_int_mask);
1235 /* writing 0 Enables all 8 RX interrupt levels */
1236 writeq(0x0, &bar0->rx_traffic_mask);
1237 } else if (flag == DISABLE_INTRS) {
1239 * Disable Rx Traffic Intrs in the general intr mask
1242 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1243 temp64 = readq(&bar0->general_int_mask);
1245 writeq(val64, &bar0->general_int_mask);
1250 static int check_prc_pcc_state(u64 val64, int flag)
1254 if (flag == FALSE) {
1255 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1256 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1257 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1261 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1262 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1263 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1264 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1265 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1273 * verify_xena_quiescence - Checks whether the H/W is ready
1274 * @val64 : Value read from adapter status register.
1275 * @flag : indicates if the adapter enable bit was ever written once
1277 * Description: Returns whether the H/W is ready to go or not. Depending
1278 * on whether adapter enable bit was written or not the comparison
1279 * differs and the calling function passes the input argument flag to
1281 * Return: 1 If xena is quiescence
1282 * 0 If Xena is not quiescence
1285 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1288 u64 tmp64 = ~((u64) val64);
1292 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1293 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1294 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1295 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1296 ADAPTER_STATUS_P_PLL_LOCK))) {
1297 ret = check_prc_pcc_state(val64, flag);
1304 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1305 * @sp: Pointer to device specifc structure
1307 * New procedure to clear mac address reading problems on Alpha platforms
1311 void fix_mac_address(nic_t * sp)
1313 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1317 while (fix_mac[i] != END_SIGN) {
1318 writeq(fix_mac[i++], &bar0->gpio_control);
1320 val64 = readq(&bar0->gpio_control);
1325 * start_nic - Turns the device on
1326 * @nic : device private variable.
1328 * This function actually turns the device on. Before this function is
1329 * called,all Registers are configured from their reset states
1330 * and shared memory is allocated but the NIC is still quiescent. On
1331 * calling this function, the device interrupts are cleared and the NIC is
1332 * literally switched on by writing into the adapter control register.
1334 * SUCCESS on success and -1 on failure.
1337 static int start_nic(struct s2io_nic *nic)
1339 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1340 struct net_device *dev = nic->dev;
1341 register u64 val64 = 0;
1344 mac_info_t *mac_control;
1345 struct config_param *config;
1347 mac_control = &nic->mac_control;
1348 config = &nic->config;
1350 /* PRC Initialization and configuration */
1351 for (i = 0; i < config->rx_ring_num; i++) {
1352 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1353 &bar0->prc_rxd0_n[i]);
1355 val64 = readq(&bar0->prc_ctrl_n[i]);
1356 #ifndef CONFIG_2BUFF_MODE
1357 val64 |= PRC_CTRL_RC_ENABLED;
1359 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1361 writeq(val64, &bar0->prc_ctrl_n[i]);
1364 #ifdef CONFIG_2BUFF_MODE
1365 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1366 val64 = readq(&bar0->rx_pa_cfg);
1367 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1368 writeq(val64, &bar0->rx_pa_cfg);
1372 * Enabling MC-RLDRAM. After enabling the device, we timeout
1373 * for around 100ms, which is approximately the time required
1374 * for the device to be ready for operation.
1376 val64 = readq(&bar0->mc_rldram_mrs);
1377 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1378 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1379 val64 = readq(&bar0->mc_rldram_mrs);
1381 msleep(100); /* Delay by around 100 ms. */
1383 /* Enabling ECC Protection. */
1384 val64 = readq(&bar0->adapter_control);
1385 val64 &= ~ADAPTER_ECC_EN;
1386 writeq(val64, &bar0->adapter_control);
1389 * Clearing any possible Link state change interrupts that
1390 * could have popped up just before Enabling the card.
1392 val64 = readq(&bar0->mac_rmac_err_reg);
1394 writeq(val64, &bar0->mac_rmac_err_reg);
1397 * Verify if the device is ready to be enabled, if so enable
1400 val64 = readq(&bar0->adapter_status);
1401 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1402 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1403 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1404 (unsigned long long) val64);
1408 /* Enable select interrupts */
1409 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1411 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1414 * With some switches, link might be already up at this point.
1415 * Because of this weird behavior, when we enable laser,
1416 * we may not get link. We need to handle this. We cannot
1417 * figure out which switch is misbehaving. So we are forced to
1418 * make a global change.
1421 /* Enabling Laser. */
1422 val64 = readq(&bar0->adapter_control);
1423 val64 |= ADAPTER_EOI_TX_ON;
1424 writeq(val64, &bar0->adapter_control);
1426 /* SXE-002: Initialize link and activity LED */
1427 subid = nic->pdev->subsystem_device;
1428 if ((subid & 0xFF) >= 0x07) {
1429 val64 = readq(&bar0->gpio_control);
1430 val64 |= 0x0000800000000000ULL;
1431 writeq(val64, &bar0->gpio_control);
1432 val64 = 0x0411040400000000ULL;
1433 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1437 * Don't see link state interrupts on certain switches, so
1438 * directly scheduling a link state task from here.
1440 schedule_work(&nic->set_link_task);
1443 * Here we are performing soft reset on XGXS to
1444 * force link down. Since link is already up, we will get
1445 * link state change interrupt after this reset
1447 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1448 val64 = readq(&bar0->dtx_control);
1450 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1451 val64 = readq(&bar0->dtx_control);
1453 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1454 val64 = readq(&bar0->dtx_control);
1461 * free_tx_buffers - Free all queued Tx buffers
1462 * @nic : device private variable.
1464 * Free all queued Tx buffers.
1465 * Return Value: void
1468 static void free_tx_buffers(struct s2io_nic *nic)
1470 struct net_device *dev = nic->dev;
1471 struct sk_buff *skb;
1474 mac_info_t *mac_control;
1475 struct config_param *config;
1478 mac_control = &nic->mac_control;
1479 config = &nic->config;
1481 for (i = 0; i < config->tx_fifo_num; i++) {
1482 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1483 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1486 (struct sk_buff *) ((unsigned long) txdp->
1489 memset(txdp, 0, sizeof(TxD_t));
1493 memset(txdp, 0, sizeof(TxD_t));
1497 "%s:forcibly freeing %d skbs on FIFO%d\n",
1499 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1500 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1505 * stop_nic - To stop the nic
1506 * @nic ; device private variable.
1508 * This function does exactly the opposite of what the start_nic()
1509 * function does. This function is called to stop the device.
1514 static void stop_nic(struct s2io_nic *nic)
1516 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1517 register u64 val64 = 0;
1518 u16 interruptible, i;
1519 mac_info_t *mac_control;
1520 struct config_param *config;
1522 mac_control = &nic->mac_control;
1523 config = &nic->config;
1525 /* Disable all interrupts */
1526 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1528 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1531 for (i = 0; i < config->rx_ring_num; i++) {
1532 val64 = readq(&bar0->prc_ctrl_n[i]);
1533 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1534 writeq(val64, &bar0->prc_ctrl_n[i]);
1539 * fill_rx_buffers - Allocates the Rx side skbs
1540 * @nic: device private variable
1541 * @ring_no: ring number
1543 * The function allocates Rx side skbs and puts the physical
1544 * address of these buffers into the RxD buffer pointers, so that the NIC
1545 * can DMA the received frame into these locations.
1546 * The NIC supports 3 receive modes, viz
1548 * 2. three buffer and
1549 * 3. Five buffer modes.
1550 * Each mode defines how many fragments the received frame will be split
1551 * up into by the NIC. The frame is split into L3 header, L4 Header,
1552 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1553 * is split into 3 fragments. As of now only single buffer mode is
1556 * SUCCESS on success or an appropriate -ve value on failure.
1559 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1561 struct net_device *dev = nic->dev;
1562 struct sk_buff *skb;
1564 int off, off1, size, block_no, block_no1;
1565 int offset, offset1;
1568 mac_info_t *mac_control;
1569 struct config_param *config;
1570 #ifdef CONFIG_2BUFF_MODE
1575 dma_addr_t rxdpphys;
1577 #ifndef CONFIG_S2IO_NAPI
1578 unsigned long flags;
1581 mac_control = &nic->mac_control;
1582 config = &nic->config;
1583 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1584 atomic_read(&nic->rx_bufs_left[ring_no]);
1585 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1586 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1588 while (alloc_tab < alloc_cnt) {
1589 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1591 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1593 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1594 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1595 #ifndef CONFIG_2BUFF_MODE
1596 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1597 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1599 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1600 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1603 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1604 block_virt_addr + off;
1605 if ((offset == offset1) && (rxdp->Host_Control)) {
1606 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1607 DBG_PRINT(INTR_DBG, " info equated\n");
1610 #ifndef CONFIG_2BUFF_MODE
1611 if (rxdp->Control_1 == END_OF_BLOCK) {
1612 mac_control->rings[ring_no].rx_curr_put_info.
1614 mac_control->rings[ring_no].rx_curr_put_info.
1615 block_index %= mac_control->rings[ring_no].block_count;
1616 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1619 off %= (MAX_RXDS_PER_BLOCK + 1);
1620 mac_control->rings[ring_no].rx_curr_put_info.offset =
1622 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1623 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1626 #ifndef CONFIG_S2IO_NAPI
1627 spin_lock_irqsave(&nic->put_lock, flags);
1628 mac_control->rings[ring_no].put_pos =
1629 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1630 spin_unlock_irqrestore(&nic->put_lock, flags);
1633 if (rxdp->Host_Control == END_OF_BLOCK) {
1634 mac_control->rings[ring_no].rx_curr_put_info.
1636 mac_control->rings[ring_no].rx_curr_put_info.block_index
1637 %= mac_control->rings[ring_no].block_count;
1638 block_no = mac_control->rings[ring_no].rx_curr_put_info
1641 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1642 dev->name, block_no,
1643 (unsigned long long) rxdp->Control_1);
1644 mac_control->rings[ring_no].rx_curr_put_info.offset =
1646 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1649 #ifndef CONFIG_S2IO_NAPI
1650 spin_lock_irqsave(&nic->put_lock, flags);
1651 mac_control->rings[ring_no].put_pos = (block_no *
1652 (MAX_RXDS_PER_BLOCK + 1)) + off;
1653 spin_unlock_irqrestore(&nic->put_lock, flags);
1657 #ifndef CONFIG_2BUFF_MODE
1658 if (rxdp->Control_1 & RXD_OWN_XENA)
1660 if (rxdp->Control_2 & BIT(0))
1663 mac_control->rings[ring_no].rx_curr_put_info.
1667 #ifdef CONFIG_2BUFF_MODE
1669 * RxDs Spanning cache lines will be replenished only
1670 * if the succeeding RxD is also owned by Host. It
1671 * will always be the ((8*i)+3) and ((8*i)+6)
1672 * descriptors for the 48 byte descriptor. The offending
1673 * decsriptor is of-course the 3rd descriptor.
1675 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1676 block_dma_addr + (off * sizeof(RxD_t));
1677 if (((u64) (rxdpphys)) % 128 > 80) {
1678 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1679 block_virt_addr + (off + 1);
1680 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1681 nextblk = (block_no + 1) %
1682 (mac_control->rings[ring_no].block_count);
1683 rxdpnext = mac_control->rings[ring_no].rx_blocks
1684 [nextblk].block_virt_addr;
1686 if (rxdpnext->Control_2 & BIT(0))
1691 #ifndef CONFIG_2BUFF_MODE
1692 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1694 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1697 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1698 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1701 #ifndef CONFIG_2BUFF_MODE
1702 skb_reserve(skb, NET_IP_ALIGN);
1703 memset(rxdp, 0, sizeof(RxD_t));
1704 rxdp->Buffer0_ptr = pci_map_single
1705 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1706 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1707 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1708 rxdp->Host_Control = (unsigned long) (skb);
1709 rxdp->Control_1 |= RXD_OWN_XENA;
1711 off %= (MAX_RXDS_PER_BLOCK + 1);
1712 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1714 ba = &mac_control->rings[ring_no].ba[block_no][off];
1715 skb_reserve(skb, BUF0_LEN);
1716 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1718 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1720 memset(rxdp, 0, sizeof(RxD_t));
1721 rxdp->Buffer2_ptr = pci_map_single
1722 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1723 PCI_DMA_FROMDEVICE);
1725 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1726 PCI_DMA_FROMDEVICE);
1728 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1729 PCI_DMA_FROMDEVICE);
1731 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1732 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1733 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1734 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1735 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1736 rxdp->Control_1 |= RXD_OWN_XENA;
1738 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1741 atomic_inc(&nic->rx_bufs_left[ring_no]);
1750 * free_rx_buffers - Frees all Rx buffers
1751 * @sp: device private variable.
1753 * This function will free all Rx buffers allocated by host.
1758 static void free_rx_buffers(struct s2io_nic *sp)
1760 struct net_device *dev = sp->dev;
1761 int i, j, blk = 0, off, buf_cnt = 0;
1763 struct sk_buff *skb;
1764 mac_info_t *mac_control;
1765 struct config_param *config;
1766 #ifdef CONFIG_2BUFF_MODE
1770 mac_control = &sp->mac_control;
1771 config = &sp->config;
1773 for (i = 0; i < config->rx_ring_num; i++) {
1774 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1775 off = j % (MAX_RXDS_PER_BLOCK + 1);
1776 rxdp = mac_control->rings[i].rx_blocks[blk].
1777 block_virt_addr + off;
1779 #ifndef CONFIG_2BUFF_MODE
1780 if (rxdp->Control_1 == END_OF_BLOCK) {
1782 (RxD_t *) ((unsigned long) rxdp->
1788 if (rxdp->Host_Control == END_OF_BLOCK) {
1794 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1795 memset(rxdp, 0, sizeof(RxD_t));
1800 (struct sk_buff *) ((unsigned long) rxdp->
1803 #ifndef CONFIG_2BUFF_MODE
1804 pci_unmap_single(sp->pdev, (dma_addr_t)
1807 HEADER_ETHERNET_II_802_3_SIZE
1808 + HEADER_802_2_SIZE +
1810 PCI_DMA_FROMDEVICE);
1812 ba = &mac_control->rings[i].ba[blk][off];
1813 pci_unmap_single(sp->pdev, (dma_addr_t)
1816 PCI_DMA_FROMDEVICE);
1817 pci_unmap_single(sp->pdev, (dma_addr_t)
1820 PCI_DMA_FROMDEVICE);
1821 pci_unmap_single(sp->pdev, (dma_addr_t)
1823 dev->mtu + BUF0_LEN + 4,
1824 PCI_DMA_FROMDEVICE);
1827 atomic_dec(&sp->rx_bufs_left[i]);
1830 memset(rxdp, 0, sizeof(RxD_t));
1832 mac_control->rings[i].rx_curr_put_info.block_index = 0;
1833 mac_control->rings[i].rx_curr_get_info.block_index = 0;
1834 mac_control->rings[i].rx_curr_put_info.offset = 0;
1835 mac_control->rings[i].rx_curr_get_info.offset = 0;
1836 atomic_set(&sp->rx_bufs_left[i], 0);
1837 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1838 dev->name, buf_cnt, i);
1843 * s2io_poll - Rx interrupt handler for NAPI support
1844 * @dev : pointer to the device structure.
1845 * @budget : The number of packets that were budgeted to be processed
1846 * during one pass through the 'Poll" function.
1848 * Comes into picture only if NAPI support has been incorporated. It does
1849 * the same thing that rx_intr_handler does, but not in a interrupt context
1850 * also It will process only a given number of packets.
1852 * 0 on success and 1 if there are No Rx packets to be processed.
1855 #if defined(CONFIG_S2IO_NAPI)
1856 static int s2io_poll(struct net_device *dev, int *budget)
1858 nic_t *nic = dev->priv;
1859 int pkt_cnt = 0, org_pkts_to_process;
1860 mac_info_t *mac_control;
1861 struct config_param *config;
1862 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
1866 mac_control = &nic->mac_control;
1867 config = &nic->config;
1869 nic->pkts_to_process = *budget;
1870 if (nic->pkts_to_process > dev->quota)
1871 nic->pkts_to_process = dev->quota;
1872 org_pkts_to_process = nic->pkts_to_process;
1874 val64 = readq(&bar0->rx_traffic_int);
1875 writeq(val64, &bar0->rx_traffic_int);
1877 for (i = 0; i < config->rx_ring_num; i++) {
1878 rx_intr_handler(&mac_control->rings[i]);
1879 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
1880 if (!nic->pkts_to_process) {
1881 /* Quota for the current iteration has been met */
1888 dev->quota -= pkt_cnt;
1890 netif_rx_complete(dev);
1892 for (i = 0; i < config->rx_ring_num; i++) {
1893 if (fill_rx_buffers(nic, i) == -ENOMEM) {
1894 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
1895 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
1899 /* Re enable the Rx interrupts. */
1900 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
1904 dev->quota -= pkt_cnt;
1907 for (i = 0; i < config->rx_ring_num; i++) {
1908 if (fill_rx_buffers(nic, i) == -ENOMEM) {
1909 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
1910 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
1919 * rx_intr_handler - Rx interrupt handler
1920 * @nic: device private variable.
1922 * If the interrupt is because of a received frame or if the
1923 * receive ring contains fresh as yet un-processed frames,this function is
1924 * called. It picks out the RxD at which place the last Rx processing had
1925 * stopped and sends the skb to the OSM's Rx handler and then increments
1930 static void rx_intr_handler(ring_info_t *ring_data)
1932 nic_t *nic = ring_data->nic;
1933 struct net_device *dev = (struct net_device *) nic->dev;
1934 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1935 int get_block, get_offset, put_block, put_offset, ring_bufs;
1936 rx_curr_get_info_t get_info, put_info;
1938 struct sk_buff *skb;
1939 #ifndef CONFIG_S2IO_NAPI
1945 * rx_traffic_int reg is an R1 register, hence we read and write
1946 * back the same value in the register to clear it
1948 val64 = readq(&bar0->tx_traffic_int);
1949 writeq(val64, &bar0->tx_traffic_int);
1951 get_info = ring_data->rx_curr_get_info;
1952 get_block = get_info.block_index;
1953 put_info = ring_data->rx_curr_put_info;
1954 put_block = put_info.block_index;
1955 ring_bufs = get_info.ring_len+1;
1956 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1958 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1960 #ifndef CONFIG_S2IO_NAPI
1961 spin_lock(&nic->put_lock);
1962 put_offset = ring_data->put_pos;
1963 spin_unlock(&nic->put_lock);
1965 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1968 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1969 #ifdef CONFIG_2BUFF_MODE
1970 (!rxdp->Control_2 & BIT(0)) &&
1972 (((get_offset + 1) % ring_bufs) != put_offset)) {
1973 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
1975 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1977 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1980 #ifndef CONFIG_2BUFF_MODE
1981 pci_unmap_single(nic->pdev, (dma_addr_t)
1984 HEADER_ETHERNET_II_802_3_SIZE +
1987 PCI_DMA_FROMDEVICE);
1989 pci_unmap_single(nic->pdev, (dma_addr_t)
1991 BUF0_LEN, PCI_DMA_FROMDEVICE);
1992 pci_unmap_single(nic->pdev, (dma_addr_t)
1994 BUF1_LEN, PCI_DMA_FROMDEVICE);
1995 pci_unmap_single(nic->pdev, (dma_addr_t)
1997 dev->mtu + BUF0_LEN + 4,
1998 PCI_DMA_FROMDEVICE);
2000 rx_osm_handler(ring_data, rxdp);
2002 ring_data->rx_curr_get_info.offset =
2004 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2006 if (get_info.offset &&
2007 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2008 get_info.offset = 0;
2009 ring_data->rx_curr_get_info.offset
2012 get_block %= ring_data->block_count;
2013 ring_data->rx_curr_get_info.block_index
2015 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2018 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2020 #ifdef CONFIG_S2IO_NAPI
2021 nic->pkts_to_process -= 1;
2022 if (!nic->pkts_to_process)
2026 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2033 * tx_intr_handler - Transmit interrupt handler
2034 * @nic : device private variable
2036 * If an interrupt was raised to indicate DMA complete of the
2037 * Tx packet, this function is called. It identifies the last TxD
2038 * whose buffer was freed and frees all skbs whose data have already
2039 * DMA'ed into the NICs internal memory.
2044 static void tx_intr_handler(fifo_info_t *fifo_data)
2046 nic_t *nic = fifo_data->nic;
2047 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2048 struct net_device *dev = (struct net_device *) nic->dev;
2049 tx_curr_get_info_t get_info, put_info;
2050 struct sk_buff *skb;
2053 register u64 val64 = 0;
2056 * tx_traffic_int reg is an R1 register, hence we read and write
2057 * back the same value in the register to clear it
2059 val64 = readq(&bar0->tx_traffic_int);
2060 writeq(val64, &bar0->tx_traffic_int);
2062 get_info = fifo_data->tx_curr_get_info;
2063 put_info = fifo_data->tx_curr_put_info;
2064 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2066 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2067 (get_info.offset != put_info.offset) &&
2068 (txdlp->Host_Control)) {
2069 /* Check for TxD errors */
2070 if (txdlp->Control_1 & TXD_T_CODE) {
2071 unsigned long long err;
2072 err = txdlp->Control_1 & TXD_T_CODE;
2073 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2077 skb = (struct sk_buff *) ((unsigned long)
2078 txdlp->Host_Control);
2080 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2082 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2086 frg_cnt = skb_shinfo(skb)->nr_frags;
2087 nic->tx_pkt_count++;
2089 pci_unmap_single(nic->pdev, (dma_addr_t)
2090 txdlp->Buffer_Pointer,
2091 skb->len - skb->data_len,
2097 for (j = 0; j < frg_cnt; j++, txdlp++) {
2099 &skb_shinfo(skb)->frags[j];
2100 pci_unmap_page(nic->pdev,
2110 (sizeof(TxD_t) * fifo_data->max_txds));
2112 /* Updating the statistics block */
2113 nic->stats.tx_packets++;
2114 nic->stats.tx_bytes += skb->len;
2115 dev_kfree_skb_irq(skb);
2118 get_info.offset %= get_info.fifo_len + 1;
2119 txdlp = (TxD_t *) fifo_data->list_info
2120 [get_info.offset].list_virt_addr;
2121 fifo_data->tx_curr_get_info.offset =
2125 spin_lock(&nic->tx_lock);
2126 if (netif_queue_stopped(dev))
2127 netif_wake_queue(dev);
2128 spin_unlock(&nic->tx_lock);
2132 * alarm_intr_handler - Alarm Interrrupt handler
2133 * @nic: device private variable
2134 * Description: If the interrupt was neither because of Rx packet or Tx
2135 * complete, this function is called. If the interrupt was to indicate
2136 * a loss of link, the OSM link status handler is invoked for any other
2137 * alarm interrupt the block that raised the interrupt is displayed
2138 * and a H/W reset is issued.
2143 static void alarm_intr_handler(struct s2io_nic *nic)
2145 struct net_device *dev = (struct net_device *) nic->dev;
2146 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2147 register u64 val64 = 0, err_reg = 0;
2149 /* Handling link status change error Intr */
2150 err_reg = readq(&bar0->mac_rmac_err_reg);
2151 writeq(err_reg, &bar0->mac_rmac_err_reg);
2152 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2153 schedule_work(&nic->set_link_task);
2156 /* In case of a serious error, the device will be Reset. */
2157 val64 = readq(&bar0->serr_source);
2158 if (val64 & SERR_SOURCE_ANY) {
2159 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2160 DBG_PRINT(ERR_DBG, "serious error!!\n");
2161 netif_stop_queue(dev);
2162 schedule_work(&nic->rst_timer_task);
2166 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2167 * Error occurs, the adapter will be recycled by disabling the
2168 * adapter enable bit and enabling it again after the device
2169 * becomes Quiescent.
2171 val64 = readq(&bar0->pcc_err_reg);
2172 writeq(val64, &bar0->pcc_err_reg);
2173 if (val64 & PCC_FB_ECC_DB_ERR) {
2174 u64 ac = readq(&bar0->adapter_control);
2175 ac &= ~(ADAPTER_CNTL_EN);
2176 writeq(ac, &bar0->adapter_control);
2177 ac = readq(&bar0->adapter_control);
2178 schedule_work(&nic->set_link_task);
2181 /* Other type of interrupts are not being handled now, TODO */
2185 * wait_for_cmd_complete - waits for a command to complete.
2186 * @sp : private member of the device structure, which is a pointer to the
2187 * s2io_nic structure.
2188 * Description: Function that waits for a command to Write into RMAC
2189 * ADDR DATA registers to be completed and returns either success or
2190 * error depending on whether the command was complete or not.
2192 * SUCCESS on success and FAILURE on failure.
2195 int wait_for_cmd_complete(nic_t * sp)
2197 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2198 int ret = FAILURE, cnt = 0;
2202 val64 = readq(&bar0->rmac_addr_cmd_mem);
2203 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2216 * s2io_reset - Resets the card.
2217 * @sp : private member of the device structure.
2218 * Description: Function to Reset the card. This function then also
2219 * restores the previously saved PCI configuration space registers as
2220 * the card reset also resets the configuration space.
2225 void s2io_reset(nic_t * sp)
2227 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2231 val64 = SW_RESET_ALL;
2232 writeq(val64, &bar0->sw_reset);
2235 * At this stage, if the PCI write is indeed completed, the
2236 * card is reset and so is the PCI Config space of the device.
2237 * So a read cannot be issued at this stage on any of the
2238 * registers to ensure the write into "sw_reset" register
2240 * Question: Is there any system call that will explicitly force
2241 * all the write commands still pending on the bus to be pushed
2243 * As of now I'am just giving a 250ms delay and hoping that the
2244 * PCI write to sw_reset register is done by this time.
2248 /* Restore the PCI state saved during initializarion. */
2249 pci_restore_state(sp->pdev);
2255 /* Set swapper to enable I/O register access */
2256 s2io_set_swapper(sp);
2258 /* Reset device statistics maintained by OS */
2259 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2261 /* SXE-002: Configure link and activity LED to turn it off */
2262 subid = sp->pdev->subsystem_device;
2263 if ((subid & 0xFF) >= 0x07) {
2264 val64 = readq(&bar0->gpio_control);
2265 val64 |= 0x0000800000000000ULL;
2266 writeq(val64, &bar0->gpio_control);
2267 val64 = 0x0411040400000000ULL;
2268 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2271 sp->device_enabled_once = FALSE;
2275 * s2io_set_swapper - to set the swapper controle on the card
2276 * @sp : private member of the device structure,
2277 * pointer to the s2io_nic structure.
2278 * Description: Function to set the swapper control on the card
2279 * correctly depending on the 'endianness' of the system.
2281 * SUCCESS on success and FAILURE on failure.
2284 int s2io_set_swapper(nic_t * sp)
2286 struct net_device *dev = sp->dev;
2287 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2288 u64 val64, valt, valr;
2291 * Set proper endian settings and verify the same by reading
2292 * the PIF Feed-back register.
2295 val64 = readq(&bar0->pif_rd_swapper_fb);
2296 if (val64 != 0x0123456789ABCDEFULL) {
2298 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2299 0x8100008181000081ULL, /* FE=1, SE=0 */
2300 0x4200004242000042ULL, /* FE=0, SE=1 */
2301 0}; /* FE=0, SE=0 */
2304 writeq(value[i], &bar0->swapper_ctrl);
2305 val64 = readq(&bar0->pif_rd_swapper_fb);
2306 if (val64 == 0x0123456789ABCDEFULL)
2311 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2313 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2314 (unsigned long long) val64);
2319 valr = readq(&bar0->swapper_ctrl);
2322 valt = 0x0123456789ABCDEFULL;
2323 writeq(valt, &bar0->xmsi_address);
2324 val64 = readq(&bar0->xmsi_address);
2328 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2329 0x0081810000818100ULL, /* FE=1, SE=0 */
2330 0x0042420000424200ULL, /* FE=0, SE=1 */
2331 0}; /* FE=0, SE=0 */
2334 writeq((value[i] | valr), &bar0->swapper_ctrl);
2335 writeq(valt, &bar0->xmsi_address);
2336 val64 = readq(&bar0->xmsi_address);
2342 unsigned long long x = val64;
2343 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2344 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2348 val64 = readq(&bar0->swapper_ctrl);
2349 val64 &= 0xFFFF000000000000ULL;
2353 * The device by default set to a big endian format, so a
2354 * big endian driver need not set anything.
2356 val64 |= (SWAPPER_CTRL_TXP_FE |
2357 SWAPPER_CTRL_TXP_SE |
2358 SWAPPER_CTRL_TXD_R_FE |
2359 SWAPPER_CTRL_TXD_W_FE |
2360 SWAPPER_CTRL_TXF_R_FE |
2361 SWAPPER_CTRL_RXD_R_FE |
2362 SWAPPER_CTRL_RXD_W_FE |
2363 SWAPPER_CTRL_RXF_W_FE |
2364 SWAPPER_CTRL_XMSI_FE |
2365 SWAPPER_CTRL_XMSI_SE |
2366 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2367 writeq(val64, &bar0->swapper_ctrl);
2370 * Initially we enable all bits to make it accessible by the
2371 * driver, then we selectively enable only those bits that
2374 val64 |= (SWAPPER_CTRL_TXP_FE |
2375 SWAPPER_CTRL_TXP_SE |
2376 SWAPPER_CTRL_TXD_R_FE |
2377 SWAPPER_CTRL_TXD_R_SE |
2378 SWAPPER_CTRL_TXD_W_FE |
2379 SWAPPER_CTRL_TXD_W_SE |
2380 SWAPPER_CTRL_TXF_R_FE |
2381 SWAPPER_CTRL_RXD_R_FE |
2382 SWAPPER_CTRL_RXD_R_SE |
2383 SWAPPER_CTRL_RXD_W_FE |
2384 SWAPPER_CTRL_RXD_W_SE |
2385 SWAPPER_CTRL_RXF_W_FE |
2386 SWAPPER_CTRL_XMSI_FE |
2387 SWAPPER_CTRL_XMSI_SE |
2388 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2389 writeq(val64, &bar0->swapper_ctrl);
2391 val64 = readq(&bar0->swapper_ctrl);
2394 * Verifying if endian settings are accurate by reading a
2395 * feedback register.
2397 val64 = readq(&bar0->pif_rd_swapper_fb);
2398 if (val64 != 0x0123456789ABCDEFULL) {
2399 /* Endian settings are incorrect, calls for another dekko. */
2400 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2402 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2403 (unsigned long long) val64);
2410 /* ********************************************************* *
2411 * Functions defined below concern the OS part of the driver *
2412 * ********************************************************* */
2415 * s2io_open - open entry point of the driver
2416 * @dev : pointer to the device structure.
2418 * This function is the open entry point of the driver. It mainly calls a
2419 * function to allocate Rx buffers and inserts them into the buffer
2420 * descriptors and then enables the Rx part of the NIC.
2422 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2426 int s2io_open(struct net_device *dev)
2428 nic_t *sp = dev->priv;
2432 * Make sure you have link off by default every time
2433 * Nic is initialized
2435 netif_carrier_off(dev);
2436 sp->last_link_state = LINK_DOWN;
2438 /* Initialize H/W and enable interrupts */
2439 if (s2io_card_up(sp)) {
2440 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2443 goto hw_init_failed;
2446 /* After proper initialization of H/W, register ISR */
2447 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2450 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2452 goto isr_registration_failed;
2455 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2456 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2458 goto setting_mac_address_failed;
2461 netif_start_queue(dev);
2464 setting_mac_address_failed:
2465 free_irq(sp->pdev->irq, dev);
2466 isr_registration_failed:
2473 * s2io_close -close entry point of the driver
2474 * @dev : device pointer.
2476 * This is the stop entry point of the driver. It needs to undo exactly
2477 * whatever was done by the open entry point,thus it's usually referred to
2478 * as the close function.Among other things this function mainly stops the
2479 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2481 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2485 int s2io_close(struct net_device *dev)
2487 nic_t *sp = dev->priv;
2488 flush_scheduled_work();
2489 netif_stop_queue(dev);
2490 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2493 free_irq(sp->pdev->irq, dev);
2494 sp->device_close_flag = TRUE; /* Device is shut down. */
2499 * s2io_xmit - Tx entry point of te driver
2500 * @skb : the socket buffer containing the Tx data.
2501 * @dev : device pointer.
2503 * This function is the Tx entry point of the driver. S2IO NIC supports
2504 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2505 * NOTE: when device cant queue the pkt,just the trans_start variable will
2508 * 0 on success & 1 on failure.
2511 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2513 nic_t *sp = dev->priv;
2514 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2517 TxFIFO_element_t __iomem *tx_fifo;
2518 unsigned long flags;
2522 mac_info_t *mac_control;
2523 struct config_param *config;
2524 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2526 mac_control = &sp->mac_control;
2527 config = &sp->config;
2529 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2530 spin_lock_irqsave(&sp->tx_lock, flags);
2531 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2532 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2534 spin_unlock_irqrestore(&sp->tx_lock, flags);
2541 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2542 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2543 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2546 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2547 /* Avoid "put" pointer going beyond "get" pointer */
2548 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2549 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2550 netif_stop_queue(dev);
2552 spin_unlock_irqrestore(&sp->tx_lock, flags);
2556 mss = skb_shinfo(skb)->tso_size;
2558 txdp->Control_1 |= TXD_TCP_LSO_EN;
2559 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2563 frg_cnt = skb_shinfo(skb)->nr_frags;
2564 frg_len = skb->len - skb->data_len;
2566 txdp->Buffer_Pointer = pci_map_single
2567 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2568 txdp->Host_Control = (unsigned long) skb;
2569 if (skb->ip_summed == CHECKSUM_HW) {
2571 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2575 txdp->Control_2 |= config->tx_intr_type;
2577 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2578 TXD_GATHER_CODE_FIRST);
2579 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2581 /* For fragmented SKB. */
2582 for (i = 0; i < frg_cnt; i++) {
2583 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2585 txdp->Buffer_Pointer = (u64) pci_map_page
2586 (sp->pdev, frag->page, frag->page_offset,
2587 frag->size, PCI_DMA_TODEVICE);
2588 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2590 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2592 tx_fifo = mac_control->tx_FIFO_start[queue];
2593 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2594 writeq(val64, &tx_fifo->TxDL_Pointer);
2596 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2601 val64 |= TX_FIFO_SPECIAL_FUNC;
2603 writeq(val64, &tx_fifo->List_Control);
2605 /* Perform a PCI read to flush previous writes */
2606 val64 = readq(&bar0->general_int_status);
2609 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2610 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2612 /* Avoid "put" pointer going beyond "get" pointer */
2613 if (((put_off + 1) % queue_len) == get_off) {
2615 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2617 netif_stop_queue(dev);
2620 dev->trans_start = jiffies;
2621 spin_unlock_irqrestore(&sp->tx_lock, flags);
2627 * s2io_isr - ISR handler of the device .
2628 * @irq: the irq of the device.
2629 * @dev_id: a void pointer to the dev structure of the NIC.
2630 * @pt_regs: pointer to the registers pushed on the stack.
2631 * Description: This function is the ISR handler of the device. It
2632 * identifies the reason for the interrupt and calls the relevant
2633 * service routines. As a contongency measure, this ISR allocates the
2634 * recv buffers, if their numbers are below the panic value which is
2635 * presently set to 25% of the original number of rcv buffers allocated.
2637 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2638 * IRQ_NONE: will be returned if interrupt is not from our device
2640 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2642 struct net_device *dev = (struct net_device *) dev_id;
2643 nic_t *sp = dev->priv;
2644 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2647 mac_info_t *mac_control;
2648 struct config_param *config;
2650 mac_control = &sp->mac_control;
2651 config = &sp->config;
2654 * Identify the cause for interrupt and call the appropriate
2655 * interrupt handler. Causes for the interrupt could be;
2659 * 4. Error in any functional blocks of the NIC.
2661 reason = readq(&bar0->general_int_status);
2664 /* The interrupt was not raised by Xena. */
2668 if (reason & (GEN_ERROR_INTR))
2669 alarm_intr_handler(sp);
2671 #ifdef CONFIG_S2IO_NAPI
2672 if (reason & GEN_INTR_RXTRAFFIC) {
2673 if (netif_rx_schedule_prep(dev)) {
2674 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2676 __netif_rx_schedule(dev);
2680 /* If Intr is because of Rx Traffic */
2681 if (reason & GEN_INTR_RXTRAFFIC) {
2682 for (i = 0; i < config->rx_ring_num; i++) {
2683 rx_intr_handler(&mac_control->rings[i]);
2688 /* If Intr is because of Tx Traffic */
2689 if (reason & GEN_INTR_TXTRAFFIC) {
2690 for (i = 0; i < config->tx_fifo_num; i++)
2691 tx_intr_handler(&mac_control->fifos[i]);
2695 * If the Rx buffer count is below the panic threshold then
2696 * reallocate the buffers from the interrupt handler itself,
2697 * else schedule a tasklet to reallocate the buffers.
2699 #ifndef CONFIG_S2IO_NAPI
2700 for (i = 0; i < config->rx_ring_num; i++) {
2702 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2703 int level = rx_buffer_level(sp, rxb_size, i);
2705 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2706 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2707 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2708 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2709 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2711 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2712 clear_bit(0, (&sp->tasklet_status));
2715 clear_bit(0, (&sp->tasklet_status));
2716 } else if (level == LOW) {
2717 tasklet_schedule(&sp->task);
2726 * s2io_get_stats - Updates the device statistics structure.
2727 * @dev : pointer to the device structure.
2729 * This function updates the device statistics structure in the s2io_nic
2730 * structure and returns a pointer to the same.
2732 * pointer to the updated net_device_stats structure.
2735 struct net_device_stats *s2io_get_stats(struct net_device *dev)
2737 nic_t *sp = dev->priv;
2738 mac_info_t *mac_control;
2739 struct config_param *config;
2742 mac_control = &sp->mac_control;
2743 config = &sp->config;
2745 sp->stats.tx_errors =
2746 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
2747 sp->stats.rx_errors =
2748 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
2749 sp->stats.multicast =
2750 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
2751 sp->stats.rx_length_errors =
2752 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
2754 return (&sp->stats);
2758 * s2io_set_multicast - entry point for multicast address enable/disable.
2759 * @dev : pointer to the device structure
2761 * This function is a driver entry point which gets called by the kernel
2762 * whenever multicast addresses must be enabled/disabled. This also gets
2763 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2764 * determine, if multicast address must be enabled or if promiscuous mode
2765 * is to be disabled etc.
2770 static void s2io_set_multicast(struct net_device *dev)
2773 struct dev_mc_list *mclist;
2774 nic_t *sp = dev->priv;
2775 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2776 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2778 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2781 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2782 /* Enable all Multicast addresses */
2783 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2784 &bar0->rmac_addr_data0_mem);
2785 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2786 &bar0->rmac_addr_data1_mem);
2787 val64 = RMAC_ADDR_CMD_MEM_WE |
2788 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2789 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2790 writeq(val64, &bar0->rmac_addr_cmd_mem);
2791 /* Wait till command completes */
2792 wait_for_cmd_complete(sp);
2795 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2796 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2797 /* Disable all Multicast addresses */
2798 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2799 &bar0->rmac_addr_data0_mem);
2800 val64 = RMAC_ADDR_CMD_MEM_WE |
2801 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2802 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2803 writeq(val64, &bar0->rmac_addr_cmd_mem);
2804 /* Wait till command completes */
2805 wait_for_cmd_complete(sp);
2808 sp->all_multi_pos = 0;
2811 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2812 /* Put the NIC into promiscuous mode */
2813 add = &bar0->mac_cfg;
2814 val64 = readq(&bar0->mac_cfg);
2815 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2818 writel((u32) val64, add);
2819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2820 writel((u32) (val64 >> 32), (add + 4));
2822 val64 = readq(&bar0->mac_cfg);
2823 sp->promisc_flg = 1;
2824 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2826 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2827 /* Remove the NIC from promiscuous mode */
2828 add = &bar0->mac_cfg;
2829 val64 = readq(&bar0->mac_cfg);
2830 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2832 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2833 writel((u32) val64, add);
2834 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2835 writel((u32) (val64 >> 32), (add + 4));
2837 val64 = readq(&bar0->mac_cfg);
2838 sp->promisc_flg = 0;
2839 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2843 /* Update individual M_CAST address list */
2844 if ((!sp->m_cast_flg) && dev->mc_count) {
2846 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2847 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
2849 DBG_PRINT(ERR_DBG, "can be added, please enable ");
2850 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
2854 prev_cnt = sp->mc_addr_count;
2855 sp->mc_addr_count = dev->mc_count;
2857 /* Clear out the previous list of Mc in the H/W. */
2858 for (i = 0; i < prev_cnt; i++) {
2859 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2860 &bar0->rmac_addr_data0_mem);
2861 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
2862 &bar0->rmac_addr_data1_mem);
2863 val64 = RMAC_ADDR_CMD_MEM_WE |
2864 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2865 RMAC_ADDR_CMD_MEM_OFFSET
2866 (MAC_MC_ADDR_START_OFFSET + i);
2867 writeq(val64, &bar0->rmac_addr_cmd_mem);
2869 /* Wait for command completes */
2870 if (wait_for_cmd_complete(sp)) {
2871 DBG_PRINT(ERR_DBG, "%s: Adding ",
2873 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
2878 /* Create the new Rx filter list and update the same in H/W. */
2879 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2880 i++, mclist = mclist->next) {
2881 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
2883 for (j = 0; j < ETH_ALEN; j++) {
2884 mac_addr |= mclist->dmi_addr[j];
2888 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
2889 &bar0->rmac_addr_data0_mem);
2890 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
2891 &bar0->rmac_addr_data1_mem);
2892 val64 = RMAC_ADDR_CMD_MEM_WE |
2893 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2894 RMAC_ADDR_CMD_MEM_OFFSET
2895 (i + MAC_MC_ADDR_START_OFFSET);
2896 writeq(val64, &bar0->rmac_addr_cmd_mem);
2898 /* Wait for command completes */
2899 if (wait_for_cmd_complete(sp)) {
2900 DBG_PRINT(ERR_DBG, "%s: Adding ",
2902 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
2910 * s2io_set_mac_addr - Programs the Xframe mac address
2911 * @dev : pointer to the device structure.
2912 * @addr: a uchar pointer to the new mac address which is to be set.
2913 * Description : This procedure will program the Xframe to receive
2914 * frames with new Mac Address
2915 * Return value: SUCCESS on success and an appropriate (-)ve integer
2916 * as defined in errno.h file on failure.
2919 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
2921 nic_t *sp = dev->priv;
2922 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2923 register u64 val64, mac_addr = 0;
2927 * Set the new MAC address as the new unicast filter and reflect this
2928 * change on the device address registered with the OS. It will be
2931 for (i = 0; i < ETH_ALEN; i++) {
2933 mac_addr |= addr[i];
2936 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
2937 &bar0->rmac_addr_data0_mem);
2940 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2941 RMAC_ADDR_CMD_MEM_OFFSET(0);
2942 writeq(val64, &bar0->rmac_addr_cmd_mem);
2943 /* Wait till command completes */
2944 if (wait_for_cmd_complete(sp)) {
2945 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
2953 * s2io_ethtool_sset - Sets different link parameters.
2954 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
2955 * @info: pointer to the structure with parameters given by ethtool to set
2958 * The function sets different link parameters provided by the user onto
2964 static int s2io_ethtool_sset(struct net_device *dev,
2965 struct ethtool_cmd *info)
2967 nic_t *sp = dev->priv;
2968 if ((info->autoneg == AUTONEG_ENABLE) ||
2969 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
2972 s2io_close(sp->dev);
2980 * s2io_ethtol_gset - Return link specific information.
2981 * @sp : private member of the device structure, pointer to the
2982 * s2io_nic structure.
2983 * @info : pointer to the structure with parameters given by ethtool
2984 * to return link information.
2986 * Returns link specific information like speed, duplex etc.. to ethtool.
2988 * return 0 on success.
2991 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
2993 nic_t *sp = dev->priv;
2994 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2995 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2996 info->port = PORT_FIBRE;
2997 /* info->transceiver?? TODO */
2999 if (netif_carrier_ok(sp->dev)) {
3000 info->speed = 10000;
3001 info->duplex = DUPLEX_FULL;
3007 info->autoneg = AUTONEG_DISABLE;
3012 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3013 * @sp : private member of the device structure, which is a pointer to the
3014 * s2io_nic structure.
3015 * @info : pointer to the structure with parameters given by ethtool to
3016 * return driver information.
3018 * Returns driver specefic information like name, version etc.. to ethtool.
3023 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3024 struct ethtool_drvinfo *info)
3026 nic_t *sp = dev->priv;
3028 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3029 strncpy(info->version, s2io_driver_version,
3030 sizeof(s2io_driver_version));
3031 strncpy(info->fw_version, "", 32);
3032 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3033 info->regdump_len = XENA_REG_SPACE;
3034 info->eedump_len = XENA_EEPROM_SPACE;
3035 info->testinfo_len = S2IO_TEST_LEN;
3036 info->n_stats = S2IO_STAT_LEN;
3040 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3041 * @sp: private member of the device structure, which is a pointer to the
3042 * s2io_nic structure.
3043 * @regs : pointer to the structure with parameters given by ethtool for
3044 * dumping the registers.
3045 * @reg_space: The input argumnet into which all the registers are dumped.
3047 * Dumps the entire register space of xFrame NIC into the user given
3053 static void s2io_ethtool_gregs(struct net_device *dev,
3054 struct ethtool_regs *regs, void *space)
3058 u8 *reg_space = (u8 *) space;
3059 nic_t *sp = dev->priv;
3061 regs->len = XENA_REG_SPACE;
3062 regs->version = sp->pdev->subsystem_device;
3064 for (i = 0; i < regs->len; i += 8) {
3065 reg = readq(sp->bar0 + i);
3066 memcpy((reg_space + i), ®, 8);
3071 * s2io_phy_id - timer function that alternates adapter LED.
3072 * @data : address of the private member of the device structure, which
3073 * is a pointer to the s2io_nic structure, provided as an u32.
3074 * Description: This is actually the timer function that alternates the
3075 * adapter LED bit of the adapter control bit to set/reset every time on
3076 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3077 * once every second.
3079 static void s2io_phy_id(unsigned long data)
3081 nic_t *sp = (nic_t *) data;
3082 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3086 subid = sp->pdev->subsystem_device;
3087 if ((subid & 0xFF) >= 0x07) {
3088 val64 = readq(&bar0->gpio_control);
3089 val64 ^= GPIO_CTRL_GPIO_0;
3090 writeq(val64, &bar0->gpio_control);
3092 val64 = readq(&bar0->adapter_control);
3093 val64 ^= ADAPTER_LED_ON;
3094 writeq(val64, &bar0->adapter_control);
3097 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3101 * s2io_ethtool_idnic - To physically identify the nic on the system.
3102 * @sp : private member of the device structure, which is a pointer to the
3103 * s2io_nic structure.
3104 * @id : pointer to the structure with identification parameters given by
3106 * Description: Used to physically identify the NIC on the system.
3107 * The Link LED will blink for a time specified by the user for
3109 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3110 * identification is possible only if it's link is up.
3112 * int , returns 0 on success
3115 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3117 u64 val64 = 0, last_gpio_ctrl_val;
3118 nic_t *sp = dev->priv;
3119 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3122 subid = sp->pdev->subsystem_device;
3123 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3124 if ((subid & 0xFF) < 0x07) {
3125 val64 = readq(&bar0->adapter_control);
3126 if (!(val64 & ADAPTER_CNTL_EN)) {
3128 "Adapter Link down, cannot blink LED\n");
3132 if (sp->id_timer.function == NULL) {
3133 init_timer(&sp->id_timer);
3134 sp->id_timer.function = s2io_phy_id;
3135 sp->id_timer.data = (unsigned long) sp;
3137 mod_timer(&sp->id_timer, jiffies);
3139 msleep_interruptible(data * HZ);
3141 msleep_interruptible(MAX_FLICKER_TIME);
3142 del_timer_sync(&sp->id_timer);
3144 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3145 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3146 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3153 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3154 * @sp : private member of the device structure, which is a pointer to the
3155 * s2io_nic structure.
3156 * @ep : pointer to the structure with pause parameters given by ethtool.
3158 * Returns the Pause frame generation and reception capability of the NIC.
3162 static void s2io_ethtool_getpause_data(struct net_device *dev,
3163 struct ethtool_pauseparam *ep)
3166 nic_t *sp = dev->priv;
3167 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3169 val64 = readq(&bar0->rmac_pause_cfg);
3170 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3171 ep->tx_pause = TRUE;
3172 if (val64 & RMAC_PAUSE_RX_ENABLE)
3173 ep->rx_pause = TRUE;
3174 ep->autoneg = FALSE;
3178 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3179 * @sp : private member of the device structure, which is a pointer to the
3180 * s2io_nic structure.
3181 * @ep : pointer to the structure with pause parameters given by ethtool.
3183 * It can be used to set or reset Pause frame generation or reception
3184 * support of the NIC.
3186 * int, returns 0 on Success
3189 static int s2io_ethtool_setpause_data(struct net_device *dev,
3190 struct ethtool_pauseparam *ep)
3193 nic_t *sp = dev->priv;
3194 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3196 val64 = readq(&bar0->rmac_pause_cfg);
3198 val64 |= RMAC_PAUSE_GEN_ENABLE;
3200 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3202 val64 |= RMAC_PAUSE_RX_ENABLE;
3204 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3205 writeq(val64, &bar0->rmac_pause_cfg);
3210 * read_eeprom - reads 4 bytes of data from user given offset.
3211 * @sp : private member of the device structure, which is a pointer to the
3212 * s2io_nic structure.
3213 * @off : offset at which the data must be written
3214 * @data : Its an output parameter where the data read at the given
3217 * Will read 4 bytes of data from the user given offset and return the
3219 * NOTE: Will allow to read only part of the EEPROM visible through the
3222 * -1 on failure and 0 on success.
3225 #define S2IO_DEV_ID 5
3226 static int read_eeprom(nic_t * sp, int off, u32 * data)
3231 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3233 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3234 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3235 I2C_CONTROL_CNTL_START;
3236 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3238 while (exit_cnt < 5) {
3239 val64 = readq(&bar0->i2c_control);
3240 if (I2C_CONTROL_CNTL_END(val64)) {
3241 *data = I2C_CONTROL_GET_DATA(val64);
3253 * write_eeprom - actually writes the relevant part of the data value.
3254 * @sp : private member of the device structure, which is a pointer to the
3255 * s2io_nic structure.
3256 * @off : offset at which the data must be written
3257 * @data : The data that is to be written
3258 * @cnt : Number of bytes of the data that are actually to be written into
3259 * the Eeprom. (max of 3)
3261 * Actually writes the relevant part of the data value into the Eeprom
3262 * through the I2C bus.
3264 * 0 on success, -1 on failure.
3267 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3269 int exit_cnt = 0, ret = -1;
3271 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3273 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3274 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3275 I2C_CONTROL_CNTL_START;
3276 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3278 while (exit_cnt < 5) {
3279 val64 = readq(&bar0->i2c_control);
3280 if (I2C_CONTROL_CNTL_END(val64)) {
3281 if (!(val64 & I2C_CONTROL_NACK))
3293 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3294 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3295 * @eeprom : pointer to the user level structure provided by ethtool,
3296 * containing all relevant information.
3297 * @data_buf : user defined value to be written into Eeprom.
3298 * Description: Reads the values stored in the Eeprom at given offset
3299 * for a given length. Stores these values int the input argument data
3300 * buffer 'data_buf' and returns these to the caller (ethtool.)
3305 static int s2io_ethtool_geeprom(struct net_device *dev,
3306 struct ethtool_eeprom *eeprom, u8 * data_buf)
3309 nic_t *sp = dev->priv;
3311 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3313 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3314 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3316 for (i = 0; i < eeprom->len; i += 4) {
3317 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3318 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3322 memcpy((data_buf + i), &valid, 4);
3328 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3329 * @sp : private member of the device structure, which is a pointer to the
3330 * s2io_nic structure.
3331 * @eeprom : pointer to the user level structure provided by ethtool,
3332 * containing all relevant information.
3333 * @data_buf ; user defined value to be written into Eeprom.
3335 * Tries to write the user provided value in the Eeprom, at the offset
3336 * given by the user.
3338 * 0 on success, -EFAULT on failure.
3341 static int s2io_ethtool_seeprom(struct net_device *dev,
3342 struct ethtool_eeprom *eeprom,
3345 int len = eeprom->len, cnt = 0;
3346 u32 valid = 0, data;
3347 nic_t *sp = dev->priv;
3349 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3351 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3352 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3358 data = (u32) data_buf[cnt] & 0x000000FF;
3360 valid = (u32) (data << 24);
3364 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3366 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3368 "write into the specified offset\n");
3379 * s2io_register_test - reads and writes into all clock domains.
3380 * @sp : private member of the device structure, which is a pointer to the
3381 * s2io_nic structure.
3382 * @data : variable that returns the result of each of the test conducted b
3385 * Read and write into all clock domains. The NIC has 3 clock domains,
3386 * see that registers in all the three regions are accessible.
3391 static int s2io_register_test(nic_t * sp, uint64_t * data)
3393 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3397 val64 = readq(&bar0->pif_rd_swapper_fb);
3398 if (val64 != 0x123456789abcdefULL) {
3400 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3403 val64 = readq(&bar0->rmac_pause_cfg);
3404 if (val64 != 0xc000ffff00000000ULL) {
3406 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3409 val64 = readq(&bar0->rx_queue_cfg);
3410 if (val64 != 0x0808080808080808ULL) {
3412 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3415 val64 = readq(&bar0->xgxs_efifo_cfg);
3416 if (val64 != 0x000000001923141EULL) {
3418 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3421 val64 = 0x5A5A5A5A5A5A5A5AULL;
3422 writeq(val64, &bar0->xmsi_data);
3423 val64 = readq(&bar0->xmsi_data);
3424 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3426 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3429 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3430 writeq(val64, &bar0->xmsi_data);
3431 val64 = readq(&bar0->xmsi_data);
3432 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3434 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3442 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3443 * @sp : private member of the device structure, which is a pointer to the
3444 * s2io_nic structure.
3445 * @data:variable that returns the result of each of the test conducted by
3448 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3454 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3459 /* Test Write Error at offset 0 */
3460 if (!write_eeprom(sp, 0, 0, 3))
3463 /* Test Write at offset 4f0 */
3464 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3466 if (read_eeprom(sp, 0x4F0, &ret_data))
3469 if (ret_data != 0x01234567)
3472 /* Reset the EEPROM data go FFFF */
3473 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3475 /* Test Write Request Error at offset 0x7c */
3476 if (!write_eeprom(sp, 0x07C, 0, 3))
3479 /* Test Write Request at offset 0x7fc */
3480 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3482 if (read_eeprom(sp, 0x7FC, &ret_data))
3485 if (ret_data != 0x01234567)
3488 /* Reset the EEPROM data go FFFF */
3489 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3491 /* Test Write Error at offset 0x80 */
3492 if (!write_eeprom(sp, 0x080, 0, 3))
3495 /* Test Write Error at offset 0xfc */
3496 if (!write_eeprom(sp, 0x0FC, 0, 3))
3499 /* Test Write Error at offset 0x100 */
3500 if (!write_eeprom(sp, 0x100, 0, 3))
3503 /* Test Write Error at offset 4ec */
3504 if (!write_eeprom(sp, 0x4EC, 0, 3))
3512 * s2io_bist_test - invokes the MemBist test of the card .
3513 * @sp : private member of the device structure, which is a pointer to the
3514 * s2io_nic structure.
3515 * @data:variable that returns the result of each of the test conducted by
3518 * This invokes the MemBist test of the card. We give around
3519 * 2 secs time for the Test to complete. If it's still not complete
3520 * within this peiod, we consider that the test failed.
3522 * 0 on success and -1 on failure.
3525 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3528 int cnt = 0, ret = -1;
3530 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3531 bist |= PCI_BIST_START;
3532 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3535 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3536 if (!(bist & PCI_BIST_START)) {
3537 *data = (bist & PCI_BIST_CODE_MASK);
3549 * s2io-link_test - verifies the link state of the nic
3550 * @sp ; private member of the device structure, which is a pointer to the
3551 * s2io_nic structure.
3552 * @data: variable that returns the result of each of the test conducted by
3555 * The function verifies the link state of the NIC and updates the input
3556 * argument 'data' appropriately.
3561 static int s2io_link_test(nic_t * sp, uint64_t * data)
3563 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3566 val64 = readq(&bar0->adapter_status);
3567 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3574 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3575 * @sp - private member of the device structure, which is a pointer to the
3576 * s2io_nic structure.
3577 * @data - variable that returns the result of each of the test
3578 * conducted by the driver.
3580 * This is one of the offline test that tests the read and write
3581 * access to the RldRam chip on the NIC.
3586 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3588 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3590 int cnt, iteration = 0, test_pass = 0;
3592 val64 = readq(&bar0->adapter_control);
3593 val64 &= ~ADAPTER_ECC_EN;
3594 writeq(val64, &bar0->adapter_control);
3596 val64 = readq(&bar0->mc_rldram_test_ctrl);
3597 val64 |= MC_RLDRAM_TEST_MODE;
3598 writeq(val64, &bar0->mc_rldram_test_ctrl);
3600 val64 = readq(&bar0->mc_rldram_mrs);
3601 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3602 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3604 val64 |= MC_RLDRAM_MRS_ENABLE;
3605 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3607 while (iteration < 2) {
3608 val64 = 0x55555555aaaa0000ULL;
3609 if (iteration == 1) {
3610 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3612 writeq(val64, &bar0->mc_rldram_test_d0);
3614 val64 = 0xaaaa5a5555550000ULL;
3615 if (iteration == 1) {
3616 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3618 writeq(val64, &bar0->mc_rldram_test_d1);
3620 val64 = 0x55aaaaaaaa5a0000ULL;
3621 if (iteration == 1) {
3622 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3624 writeq(val64, &bar0->mc_rldram_test_d2);
3626 val64 = (u64) (0x0000003fffff0000ULL);
3627 writeq(val64, &bar0->mc_rldram_test_add);
3630 val64 = MC_RLDRAM_TEST_MODE;
3631 writeq(val64, &bar0->mc_rldram_test_ctrl);
3634 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3636 writeq(val64, &bar0->mc_rldram_test_ctrl);
3638 for (cnt = 0; cnt < 5; cnt++) {
3639 val64 = readq(&bar0->mc_rldram_test_ctrl);
3640 if (val64 & MC_RLDRAM_TEST_DONE)
3648 val64 = MC_RLDRAM_TEST_MODE;
3649 writeq(val64, &bar0->mc_rldram_test_ctrl);
3651 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3652 writeq(val64, &bar0->mc_rldram_test_ctrl);
3654 for (cnt = 0; cnt < 5; cnt++) {
3655 val64 = readq(&bar0->mc_rldram_test_ctrl);
3656 if (val64 & MC_RLDRAM_TEST_DONE)
3664 val64 = readq(&bar0->mc_rldram_test_ctrl);
3665 if (val64 & MC_RLDRAM_TEST_PASS)
3680 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3681 * @sp : private member of the device structure, which is a pointer to the
3682 * s2io_nic structure.
3683 * @ethtest : pointer to a ethtool command specific structure that will be
3684 * returned to the user.
3685 * @data : variable that returns the result of each of the test
3686 * conducted by the driver.
3688 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3689 * the health of the card.
3694 static void s2io_ethtool_test(struct net_device *dev,
3695 struct ethtool_test *ethtest,
3698 nic_t *sp = dev->priv;
3699 int orig_state = netif_running(sp->dev);
3701 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3702 /* Offline Tests. */
3704 s2io_close(sp->dev);
3706 if (s2io_register_test(sp, &data[0]))
3707 ethtest->flags |= ETH_TEST_FL_FAILED;
3711 if (s2io_rldram_test(sp, &data[3]))
3712 ethtest->flags |= ETH_TEST_FL_FAILED;
3716 if (s2io_eeprom_test(sp, &data[1]))
3717 ethtest->flags |= ETH_TEST_FL_FAILED;
3719 if (s2io_bist_test(sp, &data[4]))
3720 ethtest->flags |= ETH_TEST_FL_FAILED;
3730 "%s: is not up, cannot run test\n",
3739 if (s2io_link_test(sp, &data[2]))
3740 ethtest->flags |= ETH_TEST_FL_FAILED;
3749 static void s2io_get_ethtool_stats(struct net_device *dev,
3750 struct ethtool_stats *estats,
3754 nic_t *sp = dev->priv;
3755 StatInfo_t *stat_info = sp->mac_control.stats_info;
3757 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3758 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3759 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3760 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3761 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3762 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3763 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3764 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3765 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3766 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3767 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3768 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3769 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3770 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3771 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3772 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3773 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3774 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3775 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3776 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3777 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3778 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3779 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3780 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3781 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3782 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3783 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3784 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3785 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3786 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3787 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3788 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3789 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3790 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3791 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3792 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3793 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3794 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3795 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3798 int s2io_ethtool_get_regs_len(struct net_device *dev)
3800 return (XENA_REG_SPACE);
3804 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3806 nic_t *sp = dev->priv;
3808 return (sp->rx_csum);
3810 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3812 nic_t *sp = dev->priv;
3821 int s2io_get_eeprom_len(struct net_device *dev)
3823 return (XENA_EEPROM_SPACE);
3826 int s2io_ethtool_self_test_count(struct net_device *dev)
3828 return (S2IO_TEST_LEN);
3830 void s2io_ethtool_get_strings(struct net_device *dev,
3831 u32 stringset, u8 * data)
3833 switch (stringset) {
3835 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3838 memcpy(data, ðtool_stats_keys,
3839 sizeof(ethtool_stats_keys));
3842 static int s2io_ethtool_get_stats_count(struct net_device *dev)
3844 return (S2IO_STAT_LEN);
3847 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
3850 dev->features |= NETIF_F_IP_CSUM;
3852 dev->features &= ~NETIF_F_IP_CSUM;
3858 static struct ethtool_ops netdev_ethtool_ops = {
3859 .get_settings = s2io_ethtool_gset,
3860 .set_settings = s2io_ethtool_sset,
3861 .get_drvinfo = s2io_ethtool_gdrvinfo,
3862 .get_regs_len = s2io_ethtool_get_regs_len,
3863 .get_regs = s2io_ethtool_gregs,
3864 .get_link = ethtool_op_get_link,
3865 .get_eeprom_len = s2io_get_eeprom_len,
3866 .get_eeprom = s2io_ethtool_geeprom,
3867 .set_eeprom = s2io_ethtool_seeprom,
3868 .get_pauseparam = s2io_ethtool_getpause_data,
3869 .set_pauseparam = s2io_ethtool_setpause_data,
3870 .get_rx_csum = s2io_ethtool_get_rx_csum,
3871 .set_rx_csum = s2io_ethtool_set_rx_csum,
3872 .get_tx_csum = ethtool_op_get_tx_csum,
3873 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
3874 .get_sg = ethtool_op_get_sg,
3875 .set_sg = ethtool_op_set_sg,
3877 .get_tso = ethtool_op_get_tso,
3878 .set_tso = ethtool_op_set_tso,
3880 .self_test_count = s2io_ethtool_self_test_count,
3881 .self_test = s2io_ethtool_test,
3882 .get_strings = s2io_ethtool_get_strings,
3883 .phys_id = s2io_ethtool_idnic,
3884 .get_stats_count = s2io_ethtool_get_stats_count,
3885 .get_ethtool_stats = s2io_get_ethtool_stats
3889 * s2io_ioctl - Entry point for the Ioctl
3890 * @dev : Device pointer.
3891 * @ifr : An IOCTL specefic structure, that can contain a pointer to
3892 * a proprietary structure used to pass information to the driver.
3893 * @cmd : This is used to distinguish between the different commands that
3894 * can be passed to the IOCTL functions.
3896 * Currently there are no special functionality supported in IOCTL, hence
3897 * function always return EOPNOTSUPPORTED
3900 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3906 * s2io_change_mtu - entry point to change MTU size for the device.
3907 * @dev : device pointer.
3908 * @new_mtu : the new MTU size for the device.
3909 * Description: A driver entry point to change MTU size for the device.
3910 * Before changing the MTU the device must be stopped.
3912 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3916 int s2io_change_mtu(struct net_device *dev, int new_mtu)
3918 nic_t *sp = dev->priv;
3919 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3922 if (netif_running(dev)) {
3923 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
3924 DBG_PRINT(ERR_DBG, "change its MTU\n");
3928 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
3929 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
3934 /* Set the new MTU into the PYLD register of the NIC */
3936 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
3944 * s2io_tasklet - Bottom half of the ISR.
3945 * @dev_adr : address of the device structure in dma_addr_t format.
3947 * This is the tasklet or the bottom half of the ISR. This is
3948 * an extension of the ISR which is scheduled by the scheduler to be run
3949 * when the load on the CPU is low. All low priority tasks of the ISR can
3950 * be pushed into the tasklet. For now the tasklet is used only to
3951 * replenish the Rx buffers in the Rx buffer descriptors.
3956 static void s2io_tasklet(unsigned long dev_addr)
3958 struct net_device *dev = (struct net_device *) dev_addr;
3959 nic_t *sp = dev->priv;
3961 mac_info_t *mac_control;
3962 struct config_param *config;
3964 mac_control = &sp->mac_control;
3965 config = &sp->config;
3967 if (!TASKLET_IN_USE) {
3968 for (i = 0; i < config->rx_ring_num; i++) {
3969 ret = fill_rx_buffers(sp, i);
3970 if (ret == -ENOMEM) {
3971 DBG_PRINT(ERR_DBG, "%s: Out of ",
3973 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
3975 } else if (ret == -EFILL) {
3977 "%s: Rx Ring %d is full\n",
3982 clear_bit(0, (&sp->tasklet_status));
3987 * s2io_set_link - Set the LInk status
3988 * @data: long pointer to device private structue
3989 * Description: Sets the link status for the adapter
3992 static void s2io_set_link(unsigned long data)
3994 nic_t *nic = (nic_t *) data;
3995 struct net_device *dev = nic->dev;
3996 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4000 if (test_and_set_bit(0, &(nic->link_state))) {
4001 /* The card is being reset, no point doing anything */
4005 subid = nic->pdev->subsystem_device;
4007 * Allow a small delay for the NICs self initiated
4008 * cleanup to complete.
4012 val64 = readq(&bar0->adapter_status);
4013 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4014 if (LINK_IS_UP(val64)) {
4015 val64 = readq(&bar0->adapter_control);
4016 val64 |= ADAPTER_CNTL_EN;
4017 writeq(val64, &bar0->adapter_control);
4018 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4019 val64 = readq(&bar0->gpio_control);
4020 val64 |= GPIO_CTRL_GPIO_0;
4021 writeq(val64, &bar0->gpio_control);
4022 val64 = readq(&bar0->gpio_control);
4024 val64 |= ADAPTER_LED_ON;
4025 writeq(val64, &bar0->adapter_control);
4027 val64 = readq(&bar0->adapter_status);
4028 if (!LINK_IS_UP(val64)) {
4029 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4030 DBG_PRINT(ERR_DBG, " Link down");
4031 DBG_PRINT(ERR_DBG, "after ");
4032 DBG_PRINT(ERR_DBG, "enabling ");
4033 DBG_PRINT(ERR_DBG, "device \n");
4035 if (nic->device_enabled_once == FALSE) {
4036 nic->device_enabled_once = TRUE;
4038 s2io_link(nic, LINK_UP);
4040 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4041 val64 = readq(&bar0->gpio_control);
4042 val64 &= ~GPIO_CTRL_GPIO_0;
4043 writeq(val64, &bar0->gpio_control);
4044 val64 = readq(&bar0->gpio_control);
4046 s2io_link(nic, LINK_DOWN);
4048 } else { /* NIC is not Quiescent. */
4049 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4050 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4051 netif_stop_queue(dev);
4053 clear_bit(0, &(nic->link_state));
4056 static void s2io_card_down(nic_t * sp)
4059 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4060 unsigned long flags;
4061 register u64 val64 = 0;
4063 /* If s2io_set_link task is executing, wait till it completes. */
4064 while (test_and_set_bit(0, &(sp->link_state))) {
4067 atomic_set(&sp->card_state, CARD_DOWN);
4069 /* disable Tx and Rx traffic on the NIC */
4073 tasklet_kill(&sp->task);
4075 /* Check if the device is Quiescent and then Reset the NIC */
4077 val64 = readq(&bar0->adapter_status);
4078 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4086 "s2io_close:Device not Quiescent ");
4087 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4088 (unsigned long long) val64);
4092 spin_lock_irqsave(&sp->tx_lock, flags);
4095 /* Free all unused Tx and Rx buffers */
4096 free_tx_buffers(sp);
4097 free_rx_buffers(sp);
4099 spin_unlock_irqrestore(&sp->tx_lock, flags);
4100 clear_bit(0, &(sp->link_state));
4103 static int s2io_card_up(nic_t * sp)
4106 mac_info_t *mac_control;
4107 struct config_param *config;
4108 struct net_device *dev = (struct net_device *) sp->dev;
4110 /* Initialize the H/W I/O registers */
4111 if (init_nic(sp) != 0) {
4112 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4118 * Initializing the Rx buffers. For now we are considering only 1
4119 * Rx ring and initializing buffers into 30 Rx blocks
4121 mac_control = &sp->mac_control;
4122 config = &sp->config;
4124 for (i = 0; i < config->rx_ring_num; i++) {
4125 if ((ret = fill_rx_buffers(sp, i))) {
4126 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4129 free_rx_buffers(sp);
4132 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4133 atomic_read(&sp->rx_bufs_left[i]));
4136 /* Setting its receive mode */
4137 s2io_set_multicast(dev);
4139 /* Enable tasklet for the device */
4140 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4142 /* Enable Rx Traffic and interrupts on the NIC */
4143 if (start_nic(sp)) {
4144 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4145 tasklet_kill(&sp->task);
4147 free_irq(dev->irq, dev);
4148 free_rx_buffers(sp);
4152 atomic_set(&sp->card_state, CARD_UP);
4157 * s2io_restart_nic - Resets the NIC.
4158 * @data : long pointer to the device private structure
4160 * This function is scheduled to be run by the s2io_tx_watchdog
4161 * function after 0.5 secs to reset the NIC. The idea is to reduce
4162 * the run time of the watch dog routine which is run holding a
4166 static void s2io_restart_nic(unsigned long data)
4168 struct net_device *dev = (struct net_device *) data;
4169 nic_t *sp = dev->priv;
4172 if (s2io_card_up(sp)) {
4173 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4176 netif_wake_queue(dev);
4177 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4183 * s2io_tx_watchdog - Watchdog for transmit side.
4184 * @dev : Pointer to net device structure
4186 * This function is triggered if the Tx Queue is stopped
4187 * for a pre-defined amount of time when the Interface is still up.
4188 * If the Interface is jammed in such a situation, the hardware is
4189 * reset (by s2io_close) and restarted again (by s2io_open) to
4190 * overcome any problem that might have been caused in the hardware.
4195 static void s2io_tx_watchdog(struct net_device *dev)
4197 nic_t *sp = dev->priv;
4199 if (netif_carrier_ok(dev)) {
4200 schedule_work(&sp->rst_timer_task);
4205 * rx_osm_handler - To perform some OS related operations on SKB.
4206 * @sp: private member of the device structure,pointer to s2io_nic structure.
4207 * @skb : the socket buffer pointer.
4208 * @len : length of the packet
4209 * @cksum : FCS checksum of the frame.
4210 * @ring_no : the ring from which this RxD was extracted.
4212 * This function is called by the Tx interrupt serivce routine to perform
4213 * some OS related operations on the SKB before passing it to the upper
4214 * layers. It mainly checks if the checksum is OK, if so adds it to the
4215 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4216 * to the upper layer. If the checksum is wrong, it increments the Rx
4217 * packet error count, frees the SKB and returns error.
4219 * SUCCESS on success and -1 on failure.
4221 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4223 nic_t *sp = ring_data->nic;
4224 struct net_device *dev = (struct net_device *) sp->dev;
4225 struct sk_buff *skb = (struct sk_buff *)
4226 ((unsigned long) rxdp->Host_Control);
4227 int ring_no = ring_data->ring_no;
4228 u16 l3_csum, l4_csum;
4229 #ifdef CONFIG_2BUFF_MODE
4230 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4231 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4232 int get_block = ring_data->rx_curr_get_info.block_index;
4233 int get_off = ring_data->rx_curr_get_info.offset;
4234 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4235 unsigned char *buff;
4237 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4240 if (rxdp->Control_1 & RXD_T_CODE) {
4241 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4242 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4246 /* Updating statistics */
4247 rxdp->Host_Control = 0;
4249 sp->stats.rx_packets++;
4250 #ifndef CONFIG_2BUFF_MODE
4251 sp->stats.rx_bytes += len;
4253 sp->stats.rx_bytes += buf0_len + buf2_len;
4256 #ifndef CONFIG_2BUFF_MODE
4259 buff = skb_push(skb, buf0_len);
4260 memcpy(buff, ba->ba_0, buf0_len);
4261 skb_put(skb, buf2_len);
4264 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4266 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4267 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4268 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4270 * NIC verifies if the Checksum of the received
4271 * frame is Ok or not and accordingly returns
4272 * a flag in the RxD.
4274 skb->ip_summed = CHECKSUM_UNNECESSARY;
4277 * Packet with erroneous checksum, let the
4278 * upper layers deal with it.
4280 skb->ip_summed = CHECKSUM_NONE;
4283 skb->ip_summed = CHECKSUM_NONE;
4286 skb->protocol = eth_type_trans(skb, dev);
4287 #ifdef CONFIG_S2IO_NAPI
4288 netif_receive_skb(skb);
4292 dev->last_rx = jiffies;
4293 atomic_dec(&sp->rx_bufs_left[ring_no]);
4298 * s2io_link - stops/starts the Tx queue.
4299 * @sp : private member of the device structure, which is a pointer to the
4300 * s2io_nic structure.
4301 * @link : inidicates whether link is UP/DOWN.
4303 * This function stops/starts the Tx queue depending on whether the link
4304 * status of the NIC is is down or up. This is called by the Alarm
4305 * interrupt handler whenever a link change interrupt comes up.
4310 void s2io_link(nic_t * sp, int link)
4312 struct net_device *dev = (struct net_device *) sp->dev;
4314 if (link != sp->last_link_state) {
4315 if (link == LINK_DOWN) {
4316 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4317 netif_carrier_off(dev);
4319 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4320 netif_carrier_on(dev);
4323 sp->last_link_state = link;
4327 * get_xena_rev_id - to identify revision ID of xena.
4328 * @pdev : PCI Dev structure
4330 * Function to identify the Revision ID of xena.
4332 * returns the revision ID of the device.
4335 int get_xena_rev_id(struct pci_dev *pdev)
4339 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4344 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4345 * @sp : private member of the device structure, which is a pointer to the
4346 * s2io_nic structure.
4348 * This function initializes a few of the PCI and PCI-X configuration registers
4349 * with recommended values.
4354 static void s2io_init_pci(nic_t * sp)
4356 u16 pci_cmd = 0, pcix_cmd = 0;
4358 /* Enable Data Parity Error Recovery in PCI-X command register. */
4359 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4361 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4363 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4366 /* Set the PErr Response bit in PCI command register. */
4367 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4368 pci_write_config_word(sp->pdev, PCI_COMMAND,
4369 (pci_cmd | PCI_COMMAND_PARITY));
4370 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4372 /* Set MMRB count to 1024 in PCI-X Command register. */
4374 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4375 (pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4376 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4379 /* Setting Maximum outstanding splits based on system type. */
4381 pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4382 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4384 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4387 /* Forcibly disabling relaxed ordering capability of the card. */
4389 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4391 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4395 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4396 MODULE_LICENSE("GPL");
4397 module_param(tx_fifo_num, int, 0);
4398 module_param(rx_ring_num, int, 0);
4399 module_param_array(tx_fifo_len, uint, NULL, 0);
4400 module_param_array(rx_ring_sz, uint, NULL, 0);
4401 module_param(Stats_refresh_time, int, 0);
4402 module_param_array(rts_frm_len, uint, NULL, 0);
4403 module_param(rmac_pause_time, int, 0);
4404 module_param(mc_pause_threshold_q0q3, int, 0);
4405 module_param(mc_pause_threshold_q4q7, int, 0);
4406 module_param(shared_splits, int, 0);
4407 module_param(tmac_util_period, int, 0);
4408 module_param(rmac_util_period, int, 0);
4409 #ifndef CONFIG_S2IO_NAPI
4410 module_param(indicate_max_pkts, int, 0);
4414 * s2io_init_nic - Initialization of the adapter .
4415 * @pdev : structure containing the PCI related information of the device.
4416 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4418 * The function initializes an adapter identified by the pci_dec structure.
4419 * All OS related initialization including memory and device structure and
4420 * initlaization of the device private variable is done. Also the swapper
4421 * control register is initialized to enable read and write into the I/O
4422 * registers of the device.
4424 * returns 0 on success and negative on failure.
4427 static int __devinit
4428 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4431 struct net_device *dev;
4433 int dma_flag = FALSE;
4434 u32 mac_up, mac_down;
4435 u64 val64 = 0, tmp64 = 0;
4436 XENA_dev_config_t __iomem *bar0 = NULL;
4438 mac_info_t *mac_control;
4439 struct config_param *config;
4441 #ifdef CONFIG_S2IO_NAPI
4442 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4445 if ((ret = pci_enable_device(pdev))) {
4447 "s2io_init_nic: pci_enable_device failed\n");
4451 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4452 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4454 if (pci_set_consistent_dma_mask
4455 (pdev, DMA_64BIT_MASK)) {
4457 "Unable to obtain 64bit DMA for \
4458 consistent allocations\n");
4459 pci_disable_device(pdev);
4462 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4463 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4465 pci_disable_device(pdev);
4469 if (pci_request_regions(pdev, s2io_driver_name)) {
4470 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4471 pci_disable_device(pdev);
4475 dev = alloc_etherdev(sizeof(nic_t));
4477 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4478 pci_disable_device(pdev);
4479 pci_release_regions(pdev);
4483 pci_set_master(pdev);
4484 pci_set_drvdata(pdev, dev);
4485 SET_MODULE_OWNER(dev);
4486 SET_NETDEV_DEV(dev, &pdev->dev);
4488 /* Private member variable initialized to s2io NIC structure */
4490 memset(sp, 0, sizeof(nic_t));
4493 sp->high_dma_flag = dma_flag;
4494 sp->device_enabled_once = FALSE;
4496 /* Initialize some PCI/PCI-X fields of the NIC. */
4500 * Setting the device configuration parameters.
4501 * Most of these parameters can be specified by the user during
4502 * module insertion as they are module loadable parameters. If
4503 * these parameters are not not specified during load time, they
4504 * are initialized with default values.
4506 mac_control = &sp->mac_control;
4507 config = &sp->config;
4509 /* Tx side parameters. */
4510 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4511 config->tx_fifo_num = tx_fifo_num;
4512 for (i = 0; i < MAX_TX_FIFOS; i++) {
4513 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4514 config->tx_cfg[i].fifo_priority = i;
4517 /* mapping the QoS priority to the configured fifos */
4518 for (i = 0; i < MAX_TX_FIFOS; i++)
4519 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4521 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4522 for (i = 0; i < config->tx_fifo_num; i++) {
4523 config->tx_cfg[i].f_no_snoop =
4524 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4525 if (config->tx_cfg[i].fifo_len < 65) {
4526 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4530 config->max_txds = MAX_SKB_FRAGS;
4532 /* Rx side parameters. */
4533 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4534 config->rx_ring_num = rx_ring_num;
4535 for (i = 0; i < MAX_RX_RINGS; i++) {
4536 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4537 (MAX_RXDS_PER_BLOCK + 1);
4538 config->rx_cfg[i].ring_priority = i;
4541 for (i = 0; i < rx_ring_num; i++) {
4542 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4543 config->rx_cfg[i].f_no_snoop =
4544 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4547 /* Setting Mac Control parameters */
4548 mac_control->rmac_pause_time = rmac_pause_time;
4549 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4550 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4553 /* Initialize Ring buffer parameters. */
4554 for (i = 0; i < config->rx_ring_num; i++)
4555 atomic_set(&sp->rx_bufs_left[i], 0);
4557 /* initialize the shared memory used by the NIC and the host */
4558 if (init_shared_mem(sp)) {
4559 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4562 goto mem_alloc_failed;
4565 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4566 pci_resource_len(pdev, 0));
4568 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4571 goto bar0_remap_failed;
4574 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4575 pci_resource_len(pdev, 2));
4577 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4580 goto bar1_remap_failed;
4583 dev->irq = pdev->irq;
4584 dev->base_addr = (unsigned long) sp->bar0;
4586 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4587 for (j = 0; j < MAX_TX_FIFOS; j++) {
4588 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4589 (sp->bar1 + (j * 0x00020000));
4592 /* Driver entry points */
4593 dev->open = &s2io_open;
4594 dev->stop = &s2io_close;
4595 dev->hard_start_xmit = &s2io_xmit;
4596 dev->get_stats = &s2io_get_stats;
4597 dev->set_multicast_list = &s2io_set_multicast;
4598 dev->do_ioctl = &s2io_ioctl;
4599 dev->change_mtu = &s2io_change_mtu;
4600 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4603 * will use eth_mac_addr() for dev->set_mac_address
4604 * mac address will be set every time dev->open() is called
4606 #if defined(CONFIG_S2IO_NAPI)
4607 dev->poll = s2io_poll;
4611 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4612 if (sp->high_dma_flag == TRUE)
4613 dev->features |= NETIF_F_HIGHDMA;
4615 dev->features |= NETIF_F_TSO;
4618 dev->tx_timeout = &s2io_tx_watchdog;
4619 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4620 INIT_WORK(&sp->rst_timer_task,
4621 (void (*)(void *)) s2io_restart_nic, dev);
4622 INIT_WORK(&sp->set_link_task,
4623 (void (*)(void *)) s2io_set_link, sp);
4625 pci_save_state(sp->pdev);
4627 /* Setting swapper control on the NIC, for proper reset operation */
4628 if (s2io_set_swapper(sp)) {
4629 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4632 goto set_swap_failed;
4636 * Fix for all "FFs" MAC address problems observed on
4639 fix_mac_address(sp);
4643 * MAC address initialization.
4644 * For now only one mac address will be read and used.
4647 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4648 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4649 writeq(val64, &bar0->rmac_addr_cmd_mem);
4650 wait_for_cmd_complete(sp);
4652 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4653 mac_down = (u32) tmp64;
4654 mac_up = (u32) (tmp64 >> 32);
4656 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4658 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4659 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4660 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4661 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4662 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4663 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4666 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4667 sp->def_mac_addr[0].mac_addr[0],
4668 sp->def_mac_addr[0].mac_addr[1],
4669 sp->def_mac_addr[0].mac_addr[2],
4670 sp->def_mac_addr[0].mac_addr[3],
4671 sp->def_mac_addr[0].mac_addr[4],
4672 sp->def_mac_addr[0].mac_addr[5]);
4674 /* Set the factory defined MAC address initially */
4675 dev->addr_len = ETH_ALEN;
4676 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4679 * Initialize the tasklet status and link state flags
4680 * and the card statte parameter
4682 atomic_set(&(sp->card_state), 0);
4683 sp->tasklet_status = 0;
4686 /* Initialize spinlocks */
4687 spin_lock_init(&sp->tx_lock);
4688 #ifndef CONFIG_S2IO_NAPI
4689 spin_lock_init(&sp->put_lock);
4693 * SXE-002: Configure link and activity LED to init state
4696 subid = sp->pdev->subsystem_device;
4697 if ((subid & 0xFF) >= 0x07) {
4698 val64 = readq(&bar0->gpio_control);
4699 val64 |= 0x0000800000000000ULL;
4700 writeq(val64, &bar0->gpio_control);
4701 val64 = 0x0411040400000000ULL;
4702 writeq(val64, (void __iomem *) bar0 + 0x2700);
4703 val64 = readq(&bar0->gpio_control);
4706 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4708 if (register_netdev(dev)) {
4709 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4711 goto register_failed;
4715 * Make Link state as off at this point, when the Link change
4716 * interrupt comes the state will be automatically changed to
4719 netif_carrier_off(dev);
4720 sp->last_link_state = LINK_DOWN;
4731 free_shared_mem(sp);
4732 pci_disable_device(pdev);
4733 pci_release_regions(pdev);
4734 pci_set_drvdata(pdev, NULL);
4741 * s2io_rem_nic - Free the PCI device
4742 * @pdev: structure containing the PCI related information of the device.
4743 * Description: This function is called by the Pci subsystem to release a
4744 * PCI device and free up all resource held up by the device. This could
4745 * be in response to a Hot plug event or when the driver is to be removed
4749 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4751 struct net_device *dev =
4752 (struct net_device *) pci_get_drvdata(pdev);
4756 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4761 unregister_netdev(dev);
4763 free_shared_mem(sp);
4766 pci_disable_device(pdev);
4767 pci_release_regions(pdev);
4768 pci_set_drvdata(pdev, NULL);
4773 * s2io_starter - Entry point for the driver
4774 * Description: This function is the entry point for the driver. It verifies
4775 * the module loadable parameters and initializes PCI configuration space.
4778 int __init s2io_starter(void)
4780 return pci_module_init(&s2io_driver);
4784 * s2io_closer - Cleanup routine for the driver
4785 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4788 void s2io_closer(void)
4790 pci_unregister_driver(&s2io_driver);
4791 DBG_PRINT(INIT_DBG, "cleanup done\n");
4794 module_init(s2io_starter);
4795 module_exit(s2io_closer);