1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_sz: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58 #include <linux/if_vlan.h>
60 #include <asm/system.h>
61 #include <asm/uaccess.h>
66 #include "s2io-regs.h"
68 /* S2io Driver name & version. */
69 static char s2io_driver_name[] = "Neterion";
70 static char s2io_driver_version[] = "Version 2.0.8.1";
72 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
76 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
77 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
83 * Cards with following subsystem_id have a link state indication
84 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
85 * macro below identifies these cards given the subsystem_id.
87 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
88 (dev_type == XFRAME_I_DEVICE) ? \
89 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
90 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
92 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
93 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
94 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
97 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
100 mac_info_t *mac_control;
102 mac_control = &sp->mac_control;
103 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
105 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
113 /* Ethtool related variables and Macros. */
114 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
115 "Register test\t(offline)",
116 "Eeprom test\t(offline)",
117 "Link test\t(online)",
118 "RLDRAM test\t(offline)",
119 "BIST Test\t(offline)"
122 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
124 {"tmac_data_octets"},
128 {"tmac_pause_ctrl_frms"},
129 {"tmac_any_err_frms"},
130 {"tmac_vld_ip_octets"},
138 {"rmac_data_octets"},
139 {"rmac_fcs_err_frms"},
141 {"rmac_vld_mcst_frms"},
142 {"rmac_vld_bcst_frms"},
143 {"rmac_in_rng_len_err_frms"},
145 {"rmac_pause_ctrl_frms"},
146 {"rmac_discarded_frms"},
147 {"rmac_usized_frms"},
148 {"rmac_osized_frms"},
150 {"rmac_jabber_frms"},
158 {"rmac_err_drp_udp"},
160 {"rmac_accepted_ip"},
162 {"\n DRIVER STATISTICS"},
163 {"single_bit_ecc_errs"},
164 {"double_bit_ecc_errs"},
167 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
168 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
170 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
171 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
173 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
174 init_timer(&timer); \
175 timer.function = handle; \
176 timer.data = (unsigned long) arg; \
177 mod_timer(&timer, (jiffies + exp)) \
180 static void s2io_vlan_rx_register(struct net_device *dev,
181 struct vlan_group *grp)
183 nic_t *nic = dev->priv;
186 spin_lock_irqsave(&nic->tx_lock, flags);
188 spin_unlock_irqrestore(&nic->tx_lock, flags);
191 /* Unregister the vlan */
192 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
194 nic_t *nic = dev->priv;
197 spin_lock_irqsave(&nic->tx_lock, flags);
199 nic->vlgrp->vlan_devices[vid] = NULL;
200 spin_unlock_irqrestore(&nic->tx_lock, flags);
204 * Constants to be programmed into the Xena's registers, to configure
208 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
211 static u64 herc_act_dtx_cfg[] = {
213 0x8000051536750000ULL, 0x80000515367500E0ULL,
215 0x8000051536750004ULL, 0x80000515367500E4ULL,
217 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
219 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
221 0x801205150D440000ULL, 0x801205150D4400E0ULL,
223 0x801205150D440004ULL, 0x801205150D4400E4ULL,
225 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
227 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
232 static u64 xena_mdio_cfg[] = {
234 0xC001010000000000ULL, 0xC0010100000000E0ULL,
235 0xC0010100008000E4ULL,
236 /* Remove Reset from PMA PLL */
237 0xC001010000000000ULL, 0xC0010100000000E0ULL,
238 0xC0010100000000E4ULL,
242 static u64 xena_dtx_cfg[] = {
243 0x8000051500000000ULL, 0x80000515000000E0ULL,
244 0x80000515D93500E4ULL, 0x8001051500000000ULL,
245 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
246 0x8002051500000000ULL, 0x80020515000000E0ULL,
247 0x80020515F21000E4ULL,
248 /* Set PADLOOPBACKN */
249 0x8002051500000000ULL, 0x80020515000000E0ULL,
250 0x80020515B20000E4ULL, 0x8003051500000000ULL,
251 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
252 0x8004051500000000ULL, 0x80040515000000E0ULL,
253 0x80040515B20000E4ULL, 0x8005051500000000ULL,
254 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
256 /* Remove PADLOOPBACKN */
257 0x8002051500000000ULL, 0x80020515000000E0ULL,
258 0x80020515F20000E4ULL, 0x8003051500000000ULL,
259 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
260 0x8004051500000000ULL, 0x80040515000000E0ULL,
261 0x80040515F20000E4ULL, 0x8005051500000000ULL,
262 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
267 * Constants for Fixing the MacAddress problem seen mostly on
270 static u64 fix_mac[] = {
271 0x0060000000000000ULL, 0x0060600000000000ULL,
272 0x0040600000000000ULL, 0x0000600000000000ULL,
273 0x0020600000000000ULL, 0x0060600000000000ULL,
274 0x0020600000000000ULL, 0x0060600000000000ULL,
275 0x0020600000000000ULL, 0x0060600000000000ULL,
276 0x0020600000000000ULL, 0x0060600000000000ULL,
277 0x0020600000000000ULL, 0x0060600000000000ULL,
278 0x0020600000000000ULL, 0x0060600000000000ULL,
279 0x0020600000000000ULL, 0x0060600000000000ULL,
280 0x0020600000000000ULL, 0x0060600000000000ULL,
281 0x0020600000000000ULL, 0x0060600000000000ULL,
282 0x0020600000000000ULL, 0x0060600000000000ULL,
283 0x0020600000000000ULL, 0x0000600000000000ULL,
284 0x0040600000000000ULL, 0x0060600000000000ULL,
288 /* Module Loadable parameters. */
289 static unsigned int tx_fifo_num = 1;
290 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
291 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
292 static unsigned int rx_ring_num = 1;
293 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
294 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
295 static unsigned int rts_frm_len[MAX_RX_RINGS] =
296 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
297 static unsigned int use_continuous_tx_intrs = 1;
298 static unsigned int rmac_pause_time = 65535;
299 static unsigned int mc_pause_threshold_q0q3 = 187;
300 static unsigned int mc_pause_threshold_q4q7 = 187;
301 static unsigned int shared_splits;
302 static unsigned int tmac_util_period = 5;
303 static unsigned int rmac_util_period = 5;
304 static unsigned int bimodal = 0;
305 #ifndef CONFIG_S2IO_NAPI
306 static unsigned int indicate_max_pkts;
308 /* Frequency of Rx desc syncs expressed as power of 2 */
309 static unsigned int rxsync_frequency = 3;
313 * This table lists all the devices that this driver supports.
315 static struct pci_device_id s2io_tbl[] __devinitdata = {
316 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
317 PCI_ANY_ID, PCI_ANY_ID},
318 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
319 PCI_ANY_ID, PCI_ANY_ID},
320 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
321 PCI_ANY_ID, PCI_ANY_ID},
322 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
323 PCI_ANY_ID, PCI_ANY_ID},
327 MODULE_DEVICE_TABLE(pci, s2io_tbl);
329 static struct pci_driver s2io_driver = {
331 .id_table = s2io_tbl,
332 .probe = s2io_init_nic,
333 .remove = __devexit_p(s2io_rem_nic),
336 /* A simplifier macro used both by init and free shared_mem Fns(). */
337 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
340 * init_shared_mem - Allocation and Initialization of Memory
341 * @nic: Device private variable.
342 * Description: The function allocates all the memory areas shared
343 * between the NIC and the driver. This includes Tx descriptors,
344 * Rx descriptors and the statistics block.
347 static int init_shared_mem(struct s2io_nic *nic)
350 void *tmp_v_addr, *tmp_v_addr_next;
351 dma_addr_t tmp_p_addr, tmp_p_addr_next;
352 RxD_block_t *pre_rxd_blk = NULL;
353 int i, j, blk_cnt, rx_sz, tx_sz;
354 int lst_size, lst_per_page;
355 struct net_device *dev = nic->dev;
356 #ifdef CONFIG_2BUFF_MODE
361 mac_info_t *mac_control;
362 struct config_param *config;
364 mac_control = &nic->mac_control;
365 config = &nic->config;
368 /* Allocation and initialization of TXDLs in FIOFs */
370 for (i = 0; i < config->tx_fifo_num; i++) {
371 size += config->tx_cfg[i].fifo_len;
373 if (size > MAX_AVAILABLE_TXDS) {
374 DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
376 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
380 lst_size = (sizeof(TxD_t) * config->max_txds);
381 tx_sz = lst_size * size;
382 lst_per_page = PAGE_SIZE / lst_size;
384 for (i = 0; i < config->tx_fifo_num; i++) {
385 int fifo_len = config->tx_cfg[i].fifo_len;
386 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
387 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
389 if (!mac_control->fifos[i].list_info) {
391 "Malloc failed for list_info\n");
394 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
396 for (i = 0; i < config->tx_fifo_num; i++) {
397 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
399 mac_control->fifos[i].tx_curr_put_info.offset = 0;
400 mac_control->fifos[i].tx_curr_put_info.fifo_len =
401 config->tx_cfg[i].fifo_len - 1;
402 mac_control->fifos[i].tx_curr_get_info.offset = 0;
403 mac_control->fifos[i].tx_curr_get_info.fifo_len =
404 config->tx_cfg[i].fifo_len - 1;
405 mac_control->fifos[i].fifo_no = i;
406 mac_control->fifos[i].nic = nic;
407 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 1;
409 for (j = 0; j < page_num; j++) {
413 tmp_v = pci_alloc_consistent(nic->pdev,
417 "pci_alloc_consistent ");
418 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
421 /* If we got a zero DMA address(can happen on
422 * certain platforms like PPC), reallocate.
423 * Store virtual address of page we don't want,
427 mac_control->zerodma_virt_addr = tmp_v;
429 "%s: Zero DMA address for TxDL. ", dev->name);
431 "Virtual address %llx\n", (u64)tmp_v);
432 tmp_v = pci_alloc_consistent(nic->pdev,
436 "pci_alloc_consistent ");
437 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
441 while (k < lst_per_page) {
442 int l = (j * lst_per_page) + k;
443 if (l == config->tx_cfg[i].fifo_len)
445 mac_control->fifos[i].list_info[l].list_virt_addr =
446 tmp_v + (k * lst_size);
447 mac_control->fifos[i].list_info[l].list_phy_addr =
448 tmp_p + (k * lst_size);
454 /* Allocation and initialization of RXDs in Rings */
456 for (i = 0; i < config->rx_ring_num; i++) {
457 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
458 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
459 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
461 DBG_PRINT(ERR_DBG, "RxDs per Block");
464 size += config->rx_cfg[i].num_rxd;
465 mac_control->rings[i].block_count =
466 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
467 mac_control->rings[i].pkt_cnt =
468 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
470 size = (size * (sizeof(RxD_t)));
473 for (i = 0; i < config->rx_ring_num; i++) {
474 mac_control->rings[i].rx_curr_get_info.block_index = 0;
475 mac_control->rings[i].rx_curr_get_info.offset = 0;
476 mac_control->rings[i].rx_curr_get_info.ring_len =
477 config->rx_cfg[i].num_rxd - 1;
478 mac_control->rings[i].rx_curr_put_info.block_index = 0;
479 mac_control->rings[i].rx_curr_put_info.offset = 0;
480 mac_control->rings[i].rx_curr_put_info.ring_len =
481 config->rx_cfg[i].num_rxd - 1;
482 mac_control->rings[i].nic = nic;
483 mac_control->rings[i].ring_no = i;
486 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
487 /* Allocating all the Rx blocks */
488 for (j = 0; j < blk_cnt; j++) {
489 #ifndef CONFIG_2BUFF_MODE
490 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
492 size = SIZE_OF_BLOCK;
494 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
496 if (tmp_v_addr == NULL) {
498 * In case of failure, free_shared_mem()
499 * is called, which should free any
500 * memory that was alloced till the
503 mac_control->rings[i].rx_blocks[j].block_virt_addr =
507 memset(tmp_v_addr, 0, size);
508 mac_control->rings[i].rx_blocks[j].block_virt_addr =
510 mac_control->rings[i].rx_blocks[j].block_dma_addr =
513 /* Interlinking all Rx Blocks */
514 for (j = 0; j < blk_cnt; j++) {
516 mac_control->rings[i].rx_blocks[j].block_virt_addr;
518 mac_control->rings[i].rx_blocks[(j + 1) %
519 blk_cnt].block_virt_addr;
521 mac_control->rings[i].rx_blocks[j].block_dma_addr;
523 mac_control->rings[i].rx_blocks[(j + 1) %
524 blk_cnt].block_dma_addr;
526 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
527 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
530 #ifndef CONFIG_2BUFF_MODE
531 pre_rxd_blk->reserved_2_pNext_RxD_block =
532 (unsigned long) tmp_v_addr_next;
534 pre_rxd_blk->pNext_RxD_Blk_physical =
535 (u64) tmp_p_addr_next;
539 #ifdef CONFIG_2BUFF_MODE
541 * Allocation of Storages for buffer addresses in 2BUFF mode
542 * and the buffers as well.
544 for (i = 0; i < config->rx_ring_num; i++) {
546 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
547 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
549 if (!mac_control->rings[i].ba)
551 for (j = 0; j < blk_cnt; j++) {
553 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
554 (MAX_RXDS_PER_BLOCK + 1)),
556 if (!mac_control->rings[i].ba[j])
558 while (k != MAX_RXDS_PER_BLOCK) {
559 ba = &mac_control->rings[i].ba[j][k];
561 ba->ba_0_org = (void *) kmalloc
562 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
565 tmp = (unsigned long) ba->ba_0_org;
567 tmp &= ~((unsigned long) ALIGN_SIZE);
568 ba->ba_0 = (void *) tmp;
570 ba->ba_1_org = (void *) kmalloc
571 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
574 tmp = (unsigned long) ba->ba_1_org;
576 tmp &= ~((unsigned long) ALIGN_SIZE);
577 ba->ba_1 = (void *) tmp;
584 /* Allocation and initialization of Statistics block */
585 size = sizeof(StatInfo_t);
586 mac_control->stats_mem = pci_alloc_consistent
587 (nic->pdev, size, &mac_control->stats_mem_phy);
589 if (!mac_control->stats_mem) {
591 * In case of failure, free_shared_mem() is called, which
592 * should free any memory that was alloced till the
597 mac_control->stats_mem_sz = size;
599 tmp_v_addr = mac_control->stats_mem;
600 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
601 memset(tmp_v_addr, 0, size);
602 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
603 (unsigned long long) tmp_p_addr);
609 * free_shared_mem - Free the allocated Memory
610 * @nic: Device private variable.
611 * Description: This function is to free all memory locations allocated by
612 * the init_shared_mem() function and return it to the kernel.
615 static void free_shared_mem(struct s2io_nic *nic)
617 int i, j, blk_cnt, size;
619 dma_addr_t tmp_p_addr;
620 mac_info_t *mac_control;
621 struct config_param *config;
622 int lst_size, lst_per_page;
623 struct net_device *dev = nic->dev;
628 mac_control = &nic->mac_control;
629 config = &nic->config;
631 lst_size = (sizeof(TxD_t) * config->max_txds);
632 lst_per_page = PAGE_SIZE / lst_size;
634 for (i = 0; i < config->tx_fifo_num; i++) {
635 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
637 for (j = 0; j < page_num; j++) {
638 int mem_blks = (j * lst_per_page);
639 if (!mac_control->fifos[i].list_info)
641 if (!mac_control->fifos[i].list_info[mem_blks].
644 pci_free_consistent(nic->pdev, PAGE_SIZE,
645 mac_control->fifos[i].
648 mac_control->fifos[i].
652 /* If we got a zero DMA address during allocation,
655 if (mac_control->zerodma_virt_addr) {
656 pci_free_consistent(nic->pdev, PAGE_SIZE,
657 mac_control->zerodma_virt_addr,
660 "%s: Freeing TxDL with zero DMA addr. ", dev->name);
661 DBG_PRINT(INIT_DBG, "Virtual address %llx\n",
662 (u64)(mac_control->zerodma_virt_addr));
664 kfree(mac_control->fifos[i].list_info);
667 #ifndef CONFIG_2BUFF_MODE
668 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
670 size = SIZE_OF_BLOCK;
672 for (i = 0; i < config->rx_ring_num; i++) {
673 blk_cnt = mac_control->rings[i].block_count;
674 for (j = 0; j < blk_cnt; j++) {
675 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
677 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
679 if (tmp_v_addr == NULL)
681 pci_free_consistent(nic->pdev, size,
682 tmp_v_addr, tmp_p_addr);
686 #ifdef CONFIG_2BUFF_MODE
687 /* Freeing buffer storage addresses in 2BUFF mode. */
688 for (i = 0; i < config->rx_ring_num; i++) {
690 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
691 for (j = 0; j < blk_cnt; j++) {
693 if (!mac_control->rings[i].ba[j])
695 while (k != MAX_RXDS_PER_BLOCK) {
696 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
701 kfree(mac_control->rings[i].ba[j]);
703 if (mac_control->rings[i].ba)
704 kfree(mac_control->rings[i].ba);
708 if (mac_control->stats_mem) {
709 pci_free_consistent(nic->pdev,
710 mac_control->stats_mem_sz,
711 mac_control->stats_mem,
712 mac_control->stats_mem_phy);
717 * s2io_verify_pci_mode -
720 static int s2io_verify_pci_mode(nic_t *nic)
722 XENA_dev_config_t __iomem *bar0 = nic->bar0;
723 register u64 val64 = 0;
726 val64 = readq(&bar0->pci_mode);
727 mode = (u8)GET_PCI_MODE(val64);
729 if ( val64 & PCI_MODE_UNKNOWN_MODE)
730 return -1; /* Unknown PCI mode */
736 * s2io_print_pci_mode -
738 static int s2io_print_pci_mode(nic_t *nic)
740 XENA_dev_config_t __iomem *bar0 = nic->bar0;
741 register u64 val64 = 0;
743 struct config_param *config = &nic->config;
745 val64 = readq(&bar0->pci_mode);
746 mode = (u8)GET_PCI_MODE(val64);
748 if ( val64 & PCI_MODE_UNKNOWN_MODE)
749 return -1; /* Unknown PCI mode */
751 if (val64 & PCI_MODE_32_BITS) {
752 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
754 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
758 case PCI_MODE_PCI_33:
759 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
760 config->bus_speed = 33;
762 case PCI_MODE_PCI_66:
763 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
764 config->bus_speed = 133;
766 case PCI_MODE_PCIX_M1_66:
767 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
768 config->bus_speed = 133; /* Herc doubles the clock rate */
770 case PCI_MODE_PCIX_M1_100:
771 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
772 config->bus_speed = 200;
774 case PCI_MODE_PCIX_M1_133:
775 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
776 config->bus_speed = 266;
778 case PCI_MODE_PCIX_M2_66:
779 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
780 config->bus_speed = 133;
782 case PCI_MODE_PCIX_M2_100:
783 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
784 config->bus_speed = 200;
786 case PCI_MODE_PCIX_M2_133:
787 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
788 config->bus_speed = 266;
791 return -1; /* Unsupported bus speed */
798 * init_nic - Initialization of hardware
799 * @nic: device peivate variable
800 * Description: The function sequentially configures every block
801 * of the H/W from their reset values.
802 * Return Value: SUCCESS on success and
803 * '-1' on failure (endian settings incorrect).
806 static int init_nic(struct s2io_nic *nic)
808 XENA_dev_config_t __iomem *bar0 = nic->bar0;
809 struct net_device *dev = nic->dev;
810 register u64 val64 = 0;
814 mac_info_t *mac_control;
815 struct config_param *config;
816 int mdio_cnt = 0, dtx_cnt = 0;
817 unsigned long long mem_share;
820 mac_control = &nic->mac_control;
821 config = &nic->config;
823 /* to set the swapper controle on the card */
824 if(s2io_set_swapper(nic)) {
825 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
830 * Herc requires EOI to be removed from reset before XGXS, so..
832 if (nic->device_type & XFRAME_II_DEVICE) {
833 val64 = 0xA500000000ULL;
834 writeq(val64, &bar0->sw_reset);
836 val64 = readq(&bar0->sw_reset);
839 /* Remove XGXS from reset state */
841 writeq(val64, &bar0->sw_reset);
843 val64 = readq(&bar0->sw_reset);
845 /* Enable Receiving broadcasts */
846 add = &bar0->mac_cfg;
847 val64 = readq(&bar0->mac_cfg);
848 val64 |= MAC_RMAC_BCAST_ENABLE;
849 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
850 writel((u32) val64, add);
851 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
852 writel((u32) (val64 >> 32), (add + 4));
854 /* Read registers in all blocks */
855 val64 = readq(&bar0->mac_int_mask);
856 val64 = readq(&bar0->mc_int_mask);
857 val64 = readq(&bar0->xgxs_int_mask);
861 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
864 * Configuring the XAUI Interface of Xena.
865 * ***************************************
866 * To Configure the Xena's XAUI, one has to write a series
867 * of 64 bit values into two registers in a particular
868 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
869 * which will be defined in the array of configuration values
870 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
871 * to switch writing from one regsiter to another. We continue
872 * writing these values until we encounter the 'END_SIGN' macro.
873 * For example, After making a series of 21 writes into
874 * dtx_control register the 'SWITCH_SIGN' appears and hence we
875 * start writing into mdio_control until we encounter END_SIGN.
877 if (nic->device_type & XFRAME_II_DEVICE) {
878 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
879 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
880 &bar0->dtx_control, UF);
882 msleep(1); /* Necessary!! */
888 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
889 if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
893 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
894 &bar0->dtx_control, UF);
895 val64 = readq(&bar0->dtx_control);
899 while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
900 if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
904 SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
905 &bar0->mdio_control, UF);
906 val64 = readq(&bar0->mdio_control);
909 if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
910 (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
918 /* Tx DMA Initialization */
920 writeq(val64, &bar0->tx_fifo_partition_0);
921 writeq(val64, &bar0->tx_fifo_partition_1);
922 writeq(val64, &bar0->tx_fifo_partition_2);
923 writeq(val64, &bar0->tx_fifo_partition_3);
926 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
928 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
929 13) | vBIT(config->tx_cfg[i].fifo_priority,
932 if (i == (config->tx_fifo_num - 1)) {
939 writeq(val64, &bar0->tx_fifo_partition_0);
943 writeq(val64, &bar0->tx_fifo_partition_1);
947 writeq(val64, &bar0->tx_fifo_partition_2);
951 writeq(val64, &bar0->tx_fifo_partition_3);
956 /* Enable Tx FIFO partition 0. */
957 val64 = readq(&bar0->tx_fifo_partition_0);
958 val64 |= BIT(0); /* To enable the FIFO partition. */
959 writeq(val64, &bar0->tx_fifo_partition_0);
962 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
963 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
965 if ((nic->device_type == XFRAME_I_DEVICE) &&
966 (get_xena_rev_id(nic->pdev) < 4))
967 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
969 val64 = readq(&bar0->tx_fifo_partition_0);
970 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
971 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
974 * Initialization of Tx_PA_CONFIG register to ignore packet
975 * integrity checking.
977 val64 = readq(&bar0->tx_pa_cfg);
978 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
979 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
980 writeq(val64, &bar0->tx_pa_cfg);
982 /* Rx DMA intialization. */
984 for (i = 0; i < config->rx_ring_num; i++) {
986 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
989 writeq(val64, &bar0->rx_queue_priority);
992 * Allocating equal share of memory to all the
996 if (nic->device_type & XFRAME_II_DEVICE)
1001 for (i = 0; i < config->rx_ring_num; i++) {
1004 mem_share = (mem_size / config->rx_ring_num +
1005 mem_size % config->rx_ring_num);
1006 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1009 mem_share = (mem_size / config->rx_ring_num);
1010 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1013 mem_share = (mem_size / config->rx_ring_num);
1014 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1017 mem_share = (mem_size / config->rx_ring_num);
1018 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1021 mem_share = (mem_size / config->rx_ring_num);
1022 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1025 mem_share = (mem_size / config->rx_ring_num);
1026 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1029 mem_share = (mem_size / config->rx_ring_num);
1030 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1033 mem_share = (mem_size / config->rx_ring_num);
1034 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1038 writeq(val64, &bar0->rx_queue_cfg);
1041 * Filling Tx round robin registers
1042 * as per the number of FIFOs
1044 switch (config->tx_fifo_num) {
1046 val64 = 0x0000000000000000ULL;
1047 writeq(val64, &bar0->tx_w_round_robin_0);
1048 writeq(val64, &bar0->tx_w_round_robin_1);
1049 writeq(val64, &bar0->tx_w_round_robin_2);
1050 writeq(val64, &bar0->tx_w_round_robin_3);
1051 writeq(val64, &bar0->tx_w_round_robin_4);
1054 val64 = 0x0000010000010000ULL;
1055 writeq(val64, &bar0->tx_w_round_robin_0);
1056 val64 = 0x0100000100000100ULL;
1057 writeq(val64, &bar0->tx_w_round_robin_1);
1058 val64 = 0x0001000001000001ULL;
1059 writeq(val64, &bar0->tx_w_round_robin_2);
1060 val64 = 0x0000010000010000ULL;
1061 writeq(val64, &bar0->tx_w_round_robin_3);
1062 val64 = 0x0100000000000000ULL;
1063 writeq(val64, &bar0->tx_w_round_robin_4);
1066 val64 = 0x0001000102000001ULL;
1067 writeq(val64, &bar0->tx_w_round_robin_0);
1068 val64 = 0x0001020000010001ULL;
1069 writeq(val64, &bar0->tx_w_round_robin_1);
1070 val64 = 0x0200000100010200ULL;
1071 writeq(val64, &bar0->tx_w_round_robin_2);
1072 val64 = 0x0001000102000001ULL;
1073 writeq(val64, &bar0->tx_w_round_robin_3);
1074 val64 = 0x0001020000000000ULL;
1075 writeq(val64, &bar0->tx_w_round_robin_4);
1078 val64 = 0x0001020300010200ULL;
1079 writeq(val64, &bar0->tx_w_round_robin_0);
1080 val64 = 0x0100000102030001ULL;
1081 writeq(val64, &bar0->tx_w_round_robin_1);
1082 val64 = 0x0200010000010203ULL;
1083 writeq(val64, &bar0->tx_w_round_robin_2);
1084 val64 = 0x0001020001000001ULL;
1085 writeq(val64, &bar0->tx_w_round_robin_3);
1086 val64 = 0x0203000100000000ULL;
1087 writeq(val64, &bar0->tx_w_round_robin_4);
1090 val64 = 0x0001000203000102ULL;
1091 writeq(val64, &bar0->tx_w_round_robin_0);
1092 val64 = 0x0001020001030004ULL;
1093 writeq(val64, &bar0->tx_w_round_robin_1);
1094 val64 = 0x0001000203000102ULL;
1095 writeq(val64, &bar0->tx_w_round_robin_2);
1096 val64 = 0x0001020001030004ULL;
1097 writeq(val64, &bar0->tx_w_round_robin_3);
1098 val64 = 0x0001000000000000ULL;
1099 writeq(val64, &bar0->tx_w_round_robin_4);
1102 val64 = 0x0001020304000102ULL;
1103 writeq(val64, &bar0->tx_w_round_robin_0);
1104 val64 = 0x0304050001020001ULL;
1105 writeq(val64, &bar0->tx_w_round_robin_1);
1106 val64 = 0x0203000100000102ULL;
1107 writeq(val64, &bar0->tx_w_round_robin_2);
1108 val64 = 0x0304000102030405ULL;
1109 writeq(val64, &bar0->tx_w_round_robin_3);
1110 val64 = 0x0001000200000000ULL;
1111 writeq(val64, &bar0->tx_w_round_robin_4);
1114 val64 = 0x0001020001020300ULL;
1115 writeq(val64, &bar0->tx_w_round_robin_0);
1116 val64 = 0x0102030400010203ULL;
1117 writeq(val64, &bar0->tx_w_round_robin_1);
1118 val64 = 0x0405060001020001ULL;
1119 writeq(val64, &bar0->tx_w_round_robin_2);
1120 val64 = 0x0304050000010200ULL;
1121 writeq(val64, &bar0->tx_w_round_robin_3);
1122 val64 = 0x0102030000000000ULL;
1123 writeq(val64, &bar0->tx_w_round_robin_4);
1126 val64 = 0x0001020300040105ULL;
1127 writeq(val64, &bar0->tx_w_round_robin_0);
1128 val64 = 0x0200030106000204ULL;
1129 writeq(val64, &bar0->tx_w_round_robin_1);
1130 val64 = 0x0103000502010007ULL;
1131 writeq(val64, &bar0->tx_w_round_robin_2);
1132 val64 = 0x0304010002060500ULL;
1133 writeq(val64, &bar0->tx_w_round_robin_3);
1134 val64 = 0x0103020400000000ULL;
1135 writeq(val64, &bar0->tx_w_round_robin_4);
1139 /* Filling the Rx round robin registers as per the
1140 * number of Rings and steering based on QoS.
1142 switch (config->rx_ring_num) {
1144 val64 = 0x8080808080808080ULL;
1145 writeq(val64, &bar0->rts_qos_steering);
1148 val64 = 0x0000010000010000ULL;
1149 writeq(val64, &bar0->rx_w_round_robin_0);
1150 val64 = 0x0100000100000100ULL;
1151 writeq(val64, &bar0->rx_w_round_robin_1);
1152 val64 = 0x0001000001000001ULL;
1153 writeq(val64, &bar0->rx_w_round_robin_2);
1154 val64 = 0x0000010000010000ULL;
1155 writeq(val64, &bar0->rx_w_round_robin_3);
1156 val64 = 0x0100000000000000ULL;
1157 writeq(val64, &bar0->rx_w_round_robin_4);
1159 val64 = 0x8080808040404040ULL;
1160 writeq(val64, &bar0->rts_qos_steering);
1163 val64 = 0x0001000102000001ULL;
1164 writeq(val64, &bar0->rx_w_round_robin_0);
1165 val64 = 0x0001020000010001ULL;
1166 writeq(val64, &bar0->rx_w_round_robin_1);
1167 val64 = 0x0200000100010200ULL;
1168 writeq(val64, &bar0->rx_w_round_robin_2);
1169 val64 = 0x0001000102000001ULL;
1170 writeq(val64, &bar0->rx_w_round_robin_3);
1171 val64 = 0x0001020000000000ULL;
1172 writeq(val64, &bar0->rx_w_round_robin_4);
1174 val64 = 0x8080804040402020ULL;
1175 writeq(val64, &bar0->rts_qos_steering);
1178 val64 = 0x0001020300010200ULL;
1179 writeq(val64, &bar0->rx_w_round_robin_0);
1180 val64 = 0x0100000102030001ULL;
1181 writeq(val64, &bar0->rx_w_round_robin_1);
1182 val64 = 0x0200010000010203ULL;
1183 writeq(val64, &bar0->rx_w_round_robin_2);
1184 val64 = 0x0001020001000001ULL;
1185 writeq(val64, &bar0->rx_w_round_robin_3);
1186 val64 = 0x0203000100000000ULL;
1187 writeq(val64, &bar0->rx_w_round_robin_4);
1189 val64 = 0x8080404020201010ULL;
1190 writeq(val64, &bar0->rts_qos_steering);
1193 val64 = 0x0001000203000102ULL;
1194 writeq(val64, &bar0->rx_w_round_robin_0);
1195 val64 = 0x0001020001030004ULL;
1196 writeq(val64, &bar0->rx_w_round_robin_1);
1197 val64 = 0x0001000203000102ULL;
1198 writeq(val64, &bar0->rx_w_round_robin_2);
1199 val64 = 0x0001020001030004ULL;
1200 writeq(val64, &bar0->rx_w_round_robin_3);
1201 val64 = 0x0001000000000000ULL;
1202 writeq(val64, &bar0->rx_w_round_robin_4);
1204 val64 = 0x8080404020201008ULL;
1205 writeq(val64, &bar0->rts_qos_steering);
1208 val64 = 0x0001020304000102ULL;
1209 writeq(val64, &bar0->rx_w_round_robin_0);
1210 val64 = 0x0304050001020001ULL;
1211 writeq(val64, &bar0->rx_w_round_robin_1);
1212 val64 = 0x0203000100000102ULL;
1213 writeq(val64, &bar0->rx_w_round_robin_2);
1214 val64 = 0x0304000102030405ULL;
1215 writeq(val64, &bar0->rx_w_round_robin_3);
1216 val64 = 0x0001000200000000ULL;
1217 writeq(val64, &bar0->rx_w_round_robin_4);
1219 val64 = 0x8080404020100804ULL;
1220 writeq(val64, &bar0->rts_qos_steering);
1223 val64 = 0x0001020001020300ULL;
1224 writeq(val64, &bar0->rx_w_round_robin_0);
1225 val64 = 0x0102030400010203ULL;
1226 writeq(val64, &bar0->rx_w_round_robin_1);
1227 val64 = 0x0405060001020001ULL;
1228 writeq(val64, &bar0->rx_w_round_robin_2);
1229 val64 = 0x0304050000010200ULL;
1230 writeq(val64, &bar0->rx_w_round_robin_3);
1231 val64 = 0x0102030000000000ULL;
1232 writeq(val64, &bar0->rx_w_round_robin_4);
1234 val64 = 0x8080402010080402ULL;
1235 writeq(val64, &bar0->rts_qos_steering);
1238 val64 = 0x0001020300040105ULL;
1239 writeq(val64, &bar0->rx_w_round_robin_0);
1240 val64 = 0x0200030106000204ULL;
1241 writeq(val64, &bar0->rx_w_round_robin_1);
1242 val64 = 0x0103000502010007ULL;
1243 writeq(val64, &bar0->rx_w_round_robin_2);
1244 val64 = 0x0304010002060500ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_3);
1246 val64 = 0x0103020400000000ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_4);
1249 val64 = 0x8040201008040201ULL;
1250 writeq(val64, &bar0->rts_qos_steering);
1256 for (i = 0; i < 8; i++)
1257 writeq(val64, &bar0->rts_frm_len_n[i]);
1259 /* Set the default rts frame length for the rings configured */
1260 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1261 for (i = 0 ; i < config->rx_ring_num ; i++)
1262 writeq(val64, &bar0->rts_frm_len_n[i]);
1264 /* Set the frame length for the configured rings
1265 * desired by the user
1267 for (i = 0; i < config->rx_ring_num; i++) {
1268 /* If rts_frm_len[i] == 0 then it is assumed that user not
1269 * specified frame length steering.
1270 * If the user provides the frame length then program
1271 * the rts_frm_len register for those values or else
1272 * leave it as it is.
1274 if (rts_frm_len[i] != 0) {
1275 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1276 &bar0->rts_frm_len_n[i]);
1280 /* Program statistics memory */
1281 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1283 if (nic->device_type == XFRAME_II_DEVICE) {
1284 val64 = STAT_BC(0x320);
1285 writeq(val64, &bar0->stat_byte_cnt);
1289 * Initializing the sampling rate for the device to calculate the
1290 * bandwidth utilization.
1292 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1293 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1294 writeq(val64, &bar0->mac_link_util);
1298 * Initializing the Transmit and Receive Traffic Interrupt
1302 * TTI Initialization. Default Tx timer gets us about
1303 * 250 interrupts per sec. Continuous interrupts are enabled
1306 if (nic->device_type == XFRAME_II_DEVICE) {
1307 int count = (nic->config.bus_speed * 125)/2;
1308 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1311 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1313 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1314 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1315 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1316 if (use_continuous_tx_intrs)
1317 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1318 writeq(val64, &bar0->tti_data1_mem);
1320 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1321 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1322 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1323 writeq(val64, &bar0->tti_data2_mem);
1325 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1326 writeq(val64, &bar0->tti_command_mem);
1329 * Once the operation completes, the Strobe bit of the command
1330 * register will be reset. We poll for this particular condition
1331 * We wait for a maximum of 500ms for the operation to complete,
1332 * if it's not complete by then we return error.
1336 val64 = readq(&bar0->tti_command_mem);
1337 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1341 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1349 if (nic->config.bimodal) {
1351 for (k = 0; k < config->rx_ring_num; k++) {
1352 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1353 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1354 writeq(val64, &bar0->tti_command_mem);
1357 * Once the operation completes, the Strobe bit of the command
1358 * register will be reset. We poll for this particular condition
1359 * We wait for a maximum of 500ms for the operation to complete,
1360 * if it's not complete by then we return error.
1364 val64 = readq(&bar0->tti_command_mem);
1365 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1370 "%s: TTI init Failed\n",
1380 /* RTI Initialization */
1381 if (nic->device_type == XFRAME_II_DEVICE) {
1383 * Programmed to generate Apprx 500 Intrs per
1386 int count = (nic->config.bus_speed * 125)/4;
1387 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1389 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1391 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1392 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1393 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1395 writeq(val64, &bar0->rti_data1_mem);
1397 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1398 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1399 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1400 writeq(val64, &bar0->rti_data2_mem);
1402 for (i = 0; i < config->rx_ring_num; i++) {
1403 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1404 | RTI_CMD_MEM_OFFSET(i);
1405 writeq(val64, &bar0->rti_command_mem);
1408 * Once the operation completes, the Strobe bit of the
1409 * command register will be reset. We poll for this
1410 * particular condition. We wait for a maximum of 500ms
1411 * for the operation to complete, if it's not complete
1412 * by then we return error.
1416 val64 = readq(&bar0->rti_command_mem);
1417 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1421 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1432 * Initializing proper values as Pause threshold into all
1433 * the 8 Queues on Rx side.
1435 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1436 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1438 /* Disable RMAC PAD STRIPPING */
1439 add = &bar0->mac_cfg;
1440 val64 = readq(&bar0->mac_cfg);
1441 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1442 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1443 writel((u32) (val64), add);
1444 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1445 writel((u32) (val64 >> 32), (add + 4));
1446 val64 = readq(&bar0->mac_cfg);
1449 * Set the time value to be inserted in the pause frame
1450 * generated by xena.
1452 val64 = readq(&bar0->rmac_pause_cfg);
1453 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1454 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1455 writeq(val64, &bar0->rmac_pause_cfg);
1458 * Set the Threshold Limit for Generating the pause frame
1459 * If the amount of data in any Queue exceeds ratio of
1460 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1461 * pause frame is generated
1464 for (i = 0; i < 4; i++) {
1466 (((u64) 0xFF00 | nic->mac_control.
1467 mc_pause_threshold_q0q3)
1470 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1473 for (i = 0; i < 4; i++) {
1475 (((u64) 0xFF00 | nic->mac_control.
1476 mc_pause_threshold_q4q7)
1479 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1482 * TxDMA will stop Read request if the number of read split has
1483 * exceeded the limit pointed by shared_splits
1485 val64 = readq(&bar0->pic_control);
1486 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1487 writeq(val64, &bar0->pic_control);
1490 * Programming the Herc to split every write transaction
1491 * that does not start on an ADB to reduce disconnects.
1493 if (nic->device_type == XFRAME_II_DEVICE) {
1494 val64 = WREQ_SPLIT_MASK_SET_MASK(255);
1495 writeq(val64, &bar0->wreq_split_mask);
1498 /* Setting Link stability period to 64 ms */
1499 if (nic->device_type == XFRAME_II_DEVICE) {
1500 val64 = MISC_LINK_STABILITY_PRD(3);
1501 writeq(val64, &bar0->misc_control);
1506 #define LINK_UP_DOWN_INTERRUPT 1
1507 #define MAC_RMAC_ERR_TIMER 2
1509 #if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
1510 #define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
1512 int s2io_link_fault_indication(nic_t *nic)
1514 if (nic->device_type == XFRAME_II_DEVICE)
1515 return LINK_UP_DOWN_INTERRUPT;
1517 return MAC_RMAC_ERR_TIMER;
1522 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1523 * @nic: device private variable,
1524 * @mask: A mask indicating which Intr block must be modified and,
1525 * @flag: A flag indicating whether to enable or disable the Intrs.
1526 * Description: This function will either disable or enable the interrupts
1527 * depending on the flag argument. The mask argument can be used to
1528 * enable/disable any Intr block.
1529 * Return Value: NONE.
1532 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1534 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1535 register u64 val64 = 0, temp64 = 0;
1537 /* Top level interrupt classification */
1538 /* PIC Interrupts */
1539 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1540 /* Enable PIC Intrs in the general intr mask register */
1541 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1542 if (flag == ENABLE_INTRS) {
1543 temp64 = readq(&bar0->general_int_mask);
1544 temp64 &= ~((u64) val64);
1545 writeq(temp64, &bar0->general_int_mask);
1547 * If Hercules adapter enable GPIO otherwise
1548 * disabled all PCIX, Flash, MDIO, IIC and GPIO
1549 * interrupts for now.
1552 if (s2io_link_fault_indication(nic) ==
1553 LINK_UP_DOWN_INTERRUPT ) {
1554 temp64 = readq(&bar0->pic_int_mask);
1555 temp64 &= ~((u64) PIC_INT_GPIO);
1556 writeq(temp64, &bar0->pic_int_mask);
1557 temp64 = readq(&bar0->gpio_int_mask);
1558 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1559 writeq(temp64, &bar0->gpio_int_mask);
1561 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1564 * No MSI Support is available presently, so TTI and
1565 * RTI interrupts are also disabled.
1567 } else if (flag == DISABLE_INTRS) {
1569 * Disable PIC Intrs in the general
1570 * intr mask register
1572 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1573 temp64 = readq(&bar0->general_int_mask);
1575 writeq(val64, &bar0->general_int_mask);
1579 /* DMA Interrupts */
1580 /* Enabling/Disabling Tx DMA interrupts */
1581 if (mask & TX_DMA_INTR) {
1582 /* Enable TxDMA Intrs in the general intr mask register */
1583 val64 = TXDMA_INT_M;
1584 if (flag == ENABLE_INTRS) {
1585 temp64 = readq(&bar0->general_int_mask);
1586 temp64 &= ~((u64) val64);
1587 writeq(temp64, &bar0->general_int_mask);
1589 * Keep all interrupts other than PFC interrupt
1590 * and PCC interrupt disabled in DMA level.
1592 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1594 writeq(val64, &bar0->txdma_int_mask);
1596 * Enable only the MISC error 1 interrupt in PFC block
1598 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1599 writeq(val64, &bar0->pfc_err_mask);
1601 * Enable only the FB_ECC error interrupt in PCC block
1603 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1604 writeq(val64, &bar0->pcc_err_mask);
1605 } else if (flag == DISABLE_INTRS) {
1607 * Disable TxDMA Intrs in the general intr mask
1610 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1611 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1612 temp64 = readq(&bar0->general_int_mask);
1614 writeq(val64, &bar0->general_int_mask);
1618 /* Enabling/Disabling Rx DMA interrupts */
1619 if (mask & RX_DMA_INTR) {
1620 /* Enable RxDMA Intrs in the general intr mask register */
1621 val64 = RXDMA_INT_M;
1622 if (flag == ENABLE_INTRS) {
1623 temp64 = readq(&bar0->general_int_mask);
1624 temp64 &= ~((u64) val64);
1625 writeq(temp64, &bar0->general_int_mask);
1627 * All RxDMA block interrupts are disabled for now
1630 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1631 } else if (flag == DISABLE_INTRS) {
1633 * Disable RxDMA Intrs in the general intr mask
1636 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1637 temp64 = readq(&bar0->general_int_mask);
1639 writeq(val64, &bar0->general_int_mask);
1643 /* MAC Interrupts */
1644 /* Enabling/Disabling MAC interrupts */
1645 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1646 val64 = TXMAC_INT_M | RXMAC_INT_M;
1647 if (flag == ENABLE_INTRS) {
1648 temp64 = readq(&bar0->general_int_mask);
1649 temp64 &= ~((u64) val64);
1650 writeq(temp64, &bar0->general_int_mask);
1652 * All MAC block error interrupts are disabled for now
1655 } else if (flag == DISABLE_INTRS) {
1657 * Disable MAC Intrs in the general intr mask register
1659 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1660 writeq(DISABLE_ALL_INTRS,
1661 &bar0->mac_rmac_err_mask);
1663 temp64 = readq(&bar0->general_int_mask);
1665 writeq(val64, &bar0->general_int_mask);
1669 /* XGXS Interrupts */
1670 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1671 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1672 if (flag == ENABLE_INTRS) {
1673 temp64 = readq(&bar0->general_int_mask);
1674 temp64 &= ~((u64) val64);
1675 writeq(temp64, &bar0->general_int_mask);
1677 * All XGXS block error interrupts are disabled for now
1680 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1681 } else if (flag == DISABLE_INTRS) {
1683 * Disable MC Intrs in the general intr mask register
1685 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1686 temp64 = readq(&bar0->general_int_mask);
1688 writeq(val64, &bar0->general_int_mask);
1692 /* Memory Controller(MC) interrupts */
1693 if (mask & MC_INTR) {
1695 if (flag == ENABLE_INTRS) {
1696 temp64 = readq(&bar0->general_int_mask);
1697 temp64 &= ~((u64) val64);
1698 writeq(temp64, &bar0->general_int_mask);
1700 * Enable all MC Intrs.
1702 writeq(0x0, &bar0->mc_int_mask);
1703 writeq(0x0, &bar0->mc_err_mask);
1704 } else if (flag == DISABLE_INTRS) {
1706 * Disable MC Intrs in the general intr mask register
1708 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1709 temp64 = readq(&bar0->general_int_mask);
1711 writeq(val64, &bar0->general_int_mask);
1716 /* Tx traffic interrupts */
1717 if (mask & TX_TRAFFIC_INTR) {
1718 val64 = TXTRAFFIC_INT_M;
1719 if (flag == ENABLE_INTRS) {
1720 temp64 = readq(&bar0->general_int_mask);
1721 temp64 &= ~((u64) val64);
1722 writeq(temp64, &bar0->general_int_mask);
1724 * Enable all the Tx side interrupts
1725 * writing 0 Enables all 64 TX interrupt levels
1727 writeq(0x0, &bar0->tx_traffic_mask);
1728 } else if (flag == DISABLE_INTRS) {
1730 * Disable Tx Traffic Intrs in the general intr mask
1733 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1734 temp64 = readq(&bar0->general_int_mask);
1736 writeq(val64, &bar0->general_int_mask);
1740 /* Rx traffic interrupts */
1741 if (mask & RX_TRAFFIC_INTR) {
1742 val64 = RXTRAFFIC_INT_M;
1743 if (flag == ENABLE_INTRS) {
1744 temp64 = readq(&bar0->general_int_mask);
1745 temp64 &= ~((u64) val64);
1746 writeq(temp64, &bar0->general_int_mask);
1747 /* writing 0 Enables all 8 RX interrupt levels */
1748 writeq(0x0, &bar0->rx_traffic_mask);
1749 } else if (flag == DISABLE_INTRS) {
1751 * Disable Rx Traffic Intrs in the general intr mask
1754 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1755 temp64 = readq(&bar0->general_int_mask);
1757 writeq(val64, &bar0->general_int_mask);
1762 static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
1766 if (flag == FALSE) {
1767 if ((!herc && (rev_id >= 4)) || herc) {
1768 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1769 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1770 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1774 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1775 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1776 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1781 if ((!herc && (rev_id >= 4)) || herc) {
1782 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1783 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1784 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1785 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1786 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1790 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1791 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1792 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1793 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1794 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1803 * verify_xena_quiescence - Checks whether the H/W is ready
1804 * @val64 : Value read from adapter status register.
1805 * @flag : indicates if the adapter enable bit was ever written once
1807 * Description: Returns whether the H/W is ready to go or not. Depending
1808 * on whether adapter enable bit was written or not the comparison
1809 * differs and the calling function passes the input argument flag to
1811 * Return: 1 If xena is quiescence
1812 * 0 If Xena is not quiescence
1815 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1818 u64 tmp64 = ~((u64) val64);
1819 int rev_id = get_xena_rev_id(sp->pdev);
1821 herc = (sp->device_type == XFRAME_II_DEVICE);
1824 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1825 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1826 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1827 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1828 ADAPTER_STATUS_P_PLL_LOCK))) {
1829 ret = check_prc_pcc_state(val64, flag, rev_id, herc);
1836 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1837 * @sp: Pointer to device specifc structure
1839 * New procedure to clear mac address reading problems on Alpha platforms
1843 void fix_mac_address(nic_t * sp)
1845 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1849 while (fix_mac[i] != END_SIGN) {
1850 writeq(fix_mac[i++], &bar0->gpio_control);
1852 val64 = readq(&bar0->gpio_control);
1857 * start_nic - Turns the device on
1858 * @nic : device private variable.
1860 * This function actually turns the device on. Before this function is
1861 * called,all Registers are configured from their reset states
1862 * and shared memory is allocated but the NIC is still quiescent. On
1863 * calling this function, the device interrupts are cleared and the NIC is
1864 * literally switched on by writing into the adapter control register.
1866 * SUCCESS on success and -1 on failure.
1869 static int start_nic(struct s2io_nic *nic)
1871 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1872 struct net_device *dev = nic->dev;
1873 register u64 val64 = 0;
1876 mac_info_t *mac_control;
1877 struct config_param *config;
1879 mac_control = &nic->mac_control;
1880 config = &nic->config;
1882 /* PRC Initialization and configuration */
1883 for (i = 0; i < config->rx_ring_num; i++) {
1884 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1885 &bar0->prc_rxd0_n[i]);
1887 val64 = readq(&bar0->prc_ctrl_n[i]);
1888 if (nic->config.bimodal)
1889 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1890 #ifndef CONFIG_2BUFF_MODE
1891 val64 |= PRC_CTRL_RC_ENABLED;
1893 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1895 writeq(val64, &bar0->prc_ctrl_n[i]);
1898 #ifdef CONFIG_2BUFF_MODE
1899 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1900 val64 = readq(&bar0->rx_pa_cfg);
1901 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1902 writeq(val64, &bar0->rx_pa_cfg);
1906 * Enabling MC-RLDRAM. After enabling the device, we timeout
1907 * for around 100ms, which is approximately the time required
1908 * for the device to be ready for operation.
1910 val64 = readq(&bar0->mc_rldram_mrs);
1911 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1912 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1913 val64 = readq(&bar0->mc_rldram_mrs);
1915 msleep(100); /* Delay by around 100 ms. */
1917 /* Enabling ECC Protection. */
1918 val64 = readq(&bar0->adapter_control);
1919 val64 &= ~ADAPTER_ECC_EN;
1920 writeq(val64, &bar0->adapter_control);
1923 * Clearing any possible Link state change interrupts that
1924 * could have popped up just before Enabling the card.
1926 val64 = readq(&bar0->mac_rmac_err_reg);
1928 writeq(val64, &bar0->mac_rmac_err_reg);
1931 * Verify if the device is ready to be enabled, if so enable
1934 val64 = readq(&bar0->adapter_status);
1935 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1936 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1937 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1938 (unsigned long long) val64);
1942 /* Enable select interrupts */
1943 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
1944 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
1945 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
1947 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1950 * With some switches, link might be already up at this point.
1951 * Because of this weird behavior, when we enable laser,
1952 * we may not get link. We need to handle this. We cannot
1953 * figure out which switch is misbehaving. So we are forced to
1954 * make a global change.
1957 /* Enabling Laser. */
1958 val64 = readq(&bar0->adapter_control);
1959 val64 |= ADAPTER_EOI_TX_ON;
1960 writeq(val64, &bar0->adapter_control);
1962 /* SXE-002: Initialize link and activity LED */
1963 subid = nic->pdev->subsystem_device;
1964 if (((subid & 0xFF) >= 0x07) &&
1965 (nic->device_type == XFRAME_I_DEVICE)) {
1966 val64 = readq(&bar0->gpio_control);
1967 val64 |= 0x0000800000000000ULL;
1968 writeq(val64, &bar0->gpio_control);
1969 val64 = 0x0411040400000000ULL;
1970 writeq(val64, (void __iomem *)bar0 + 0x2700);
1974 * Don't see link state interrupts on certain switches, so
1975 * directly scheduling a link state task from here.
1977 schedule_work(&nic->set_link_task);
1983 * free_tx_buffers - Free all queued Tx buffers
1984 * @nic : device private variable.
1986 * Free all queued Tx buffers.
1987 * Return Value: void
1990 static void free_tx_buffers(struct s2io_nic *nic)
1992 struct net_device *dev = nic->dev;
1993 struct sk_buff *skb;
1996 mac_info_t *mac_control;
1997 struct config_param *config;
1998 int cnt = 0, frg_cnt;
2000 mac_control = &nic->mac_control;
2001 config = &nic->config;
2003 for (i = 0; i < config->tx_fifo_num; i++) {
2004 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2005 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2008 (struct sk_buff *) ((unsigned long) txdp->
2011 memset(txdp, 0, sizeof(TxD_t) *
2015 frg_cnt = skb_shinfo(skb)->nr_frags;
2016 pci_unmap_single(nic->pdev, (dma_addr_t)
2017 txdp->Buffer_Pointer,
2018 skb->len - skb->data_len,
2024 for (j = 0; j < frg_cnt; j++, txdp++) {
2026 &skb_shinfo(skb)->frags[j];
2027 pci_unmap_page(nic->pdev,
2037 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
2041 "%s:forcibly freeing %d skbs on FIFO%d\n",
2043 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2044 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2049 * stop_nic - To stop the nic
2050 * @nic ; device private variable.
2052 * This function does exactly the opposite of what the start_nic()
2053 * function does. This function is called to stop the device.
2058 static void stop_nic(struct s2io_nic *nic)
2060 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2061 register u64 val64 = 0;
2062 u16 interruptible, i;
2063 mac_info_t *mac_control;
2064 struct config_param *config;
2066 mac_control = &nic->mac_control;
2067 config = &nic->config;
2069 /* Disable all interrupts */
2070 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2071 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2072 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2073 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2076 for (i = 0; i < config->rx_ring_num; i++) {
2077 val64 = readq(&bar0->prc_ctrl_n[i]);
2078 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
2079 writeq(val64, &bar0->prc_ctrl_n[i]);
2084 * fill_rx_buffers - Allocates the Rx side skbs
2085 * @nic: device private variable
2086 * @ring_no: ring number
2088 * The function allocates Rx side skbs and puts the physical
2089 * address of these buffers into the RxD buffer pointers, so that the NIC
2090 * can DMA the received frame into these locations.
2091 * The NIC supports 3 receive modes, viz
2093 * 2. three buffer and
2094 * 3. Five buffer modes.
2095 * Each mode defines how many fragments the received frame will be split
2096 * up into by the NIC. The frame is split into L3 header, L4 Header,
2097 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2098 * is split into 3 fragments. As of now only single buffer mode is
2101 * SUCCESS on success or an appropriate -ve value on failure.
2104 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2106 struct net_device *dev = nic->dev;
2107 struct sk_buff *skb;
2109 int off, off1, size, block_no, block_no1;
2110 int offset, offset1;
2113 mac_info_t *mac_control;
2114 struct config_param *config;
2115 #ifdef CONFIG_2BUFF_MODE
2120 dma_addr_t rxdpphys;
2122 #ifndef CONFIG_S2IO_NAPI
2123 unsigned long flags;
2125 RxD_t *first_rxdp = NULL;
2127 mac_control = &nic->mac_control;
2128 config = &nic->config;
2129 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2130 atomic_read(&nic->rx_bufs_left[ring_no]);
2131 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2132 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2134 while (alloc_tab < alloc_cnt) {
2135 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2137 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
2139 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2140 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2141 #ifndef CONFIG_2BUFF_MODE
2142 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
2143 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
2145 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
2146 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
2149 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2150 block_virt_addr + off;
2151 if ((offset == offset1) && (rxdp->Host_Control)) {
2152 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
2153 DBG_PRINT(INTR_DBG, " info equated\n");
2156 #ifndef CONFIG_2BUFF_MODE
2157 if (rxdp->Control_1 == END_OF_BLOCK) {
2158 mac_control->rings[ring_no].rx_curr_put_info.
2160 mac_control->rings[ring_no].rx_curr_put_info.
2161 block_index %= mac_control->rings[ring_no].block_count;
2162 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2165 off %= (MAX_RXDS_PER_BLOCK + 1);
2166 mac_control->rings[ring_no].rx_curr_put_info.offset =
2168 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
2169 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2172 #ifndef CONFIG_S2IO_NAPI
2173 spin_lock_irqsave(&nic->put_lock, flags);
2174 mac_control->rings[ring_no].put_pos =
2175 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
2176 spin_unlock_irqrestore(&nic->put_lock, flags);
2179 if (rxdp->Host_Control == END_OF_BLOCK) {
2180 mac_control->rings[ring_no].rx_curr_put_info.
2182 mac_control->rings[ring_no].rx_curr_put_info.block_index
2183 %= mac_control->rings[ring_no].block_count;
2184 block_no = mac_control->rings[ring_no].rx_curr_put_info
2187 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
2188 dev->name, block_no,
2189 (unsigned long long) rxdp->Control_1);
2190 mac_control->rings[ring_no].rx_curr_put_info.offset =
2192 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
2195 #ifndef CONFIG_S2IO_NAPI
2196 spin_lock_irqsave(&nic->put_lock, flags);
2197 mac_control->rings[ring_no].put_pos = (block_no *
2198 (MAX_RXDS_PER_BLOCK + 1)) + off;
2199 spin_unlock_irqrestore(&nic->put_lock, flags);
2203 #ifndef CONFIG_2BUFF_MODE
2204 if (rxdp->Control_1 & RXD_OWN_XENA)
2206 if (rxdp->Control_2 & BIT(0))
2209 mac_control->rings[ring_no].rx_curr_put_info.
2213 #ifdef CONFIG_2BUFF_MODE
2215 * RxDs Spanning cache lines will be replenished only
2216 * if the succeeding RxD is also owned by Host. It
2217 * will always be the ((8*i)+3) and ((8*i)+6)
2218 * descriptors for the 48 byte descriptor. The offending
2219 * decsriptor is of-course the 3rd descriptor.
2221 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
2222 block_dma_addr + (off * sizeof(RxD_t));
2223 if (((u64) (rxdpphys)) % 128 > 80) {
2224 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
2225 block_virt_addr + (off + 1);
2226 if (rxdpnext->Host_Control == END_OF_BLOCK) {
2227 nextblk = (block_no + 1) %
2228 (mac_control->rings[ring_no].block_count);
2229 rxdpnext = mac_control->rings[ring_no].rx_blocks
2230 [nextblk].block_virt_addr;
2232 if (rxdpnext->Control_2 & BIT(0))
2237 #ifndef CONFIG_2BUFF_MODE
2238 skb = dev_alloc_skb(size + NET_IP_ALIGN);
2240 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
2243 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2244 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2247 first_rxdp->Control_1 |= RXD_OWN_XENA;
2251 #ifndef CONFIG_2BUFF_MODE
2252 skb_reserve(skb, NET_IP_ALIGN);
2253 memset(rxdp, 0, sizeof(RxD_t));
2254 rxdp->Buffer0_ptr = pci_map_single
2255 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
2256 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
2257 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
2258 rxdp->Host_Control = (unsigned long) (skb);
2259 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2260 rxdp->Control_1 |= RXD_OWN_XENA;
2262 off %= (MAX_RXDS_PER_BLOCK + 1);
2263 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2265 ba = &mac_control->rings[ring_no].ba[block_no][off];
2266 skb_reserve(skb, BUF0_LEN);
2267 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
2269 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
2271 memset(rxdp, 0, sizeof(RxD_t));
2272 rxdp->Buffer2_ptr = pci_map_single
2273 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
2274 PCI_DMA_FROMDEVICE);
2276 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2277 PCI_DMA_FROMDEVICE);
2279 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
2280 PCI_DMA_FROMDEVICE);
2282 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
2283 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
2284 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
2285 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
2286 rxdp->Host_Control = (u64) ((unsigned long) (skb));
2287 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2288 rxdp->Control_1 |= RXD_OWN_XENA;
2290 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2292 rxdp->Control_2 |= SET_RXD_MARKER;
2294 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2297 first_rxdp->Control_1 |= RXD_OWN_XENA;
2301 atomic_inc(&nic->rx_bufs_left[ring_no]);
2306 /* Transfer ownership of first descriptor to adapter just before
2307 * exiting. Before that, use memory barrier so that ownership
2308 * and other fields are seen by adapter correctly.
2312 first_rxdp->Control_1 |= RXD_OWN_XENA;
2319 * free_rx_buffers - Frees all Rx buffers
2320 * @sp: device private variable.
2322 * This function will free all Rx buffers allocated by host.
2327 static void free_rx_buffers(struct s2io_nic *sp)
2329 struct net_device *dev = sp->dev;
2330 int i, j, blk = 0, off, buf_cnt = 0;
2332 struct sk_buff *skb;
2333 mac_info_t *mac_control;
2334 struct config_param *config;
2335 #ifdef CONFIG_2BUFF_MODE
2339 mac_control = &sp->mac_control;
2340 config = &sp->config;
2342 for (i = 0; i < config->rx_ring_num; i++) {
2343 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2344 off = j % (MAX_RXDS_PER_BLOCK + 1);
2345 rxdp = mac_control->rings[i].rx_blocks[blk].
2346 block_virt_addr + off;
2348 #ifndef CONFIG_2BUFF_MODE
2349 if (rxdp->Control_1 == END_OF_BLOCK) {
2351 (RxD_t *) ((unsigned long) rxdp->
2357 if (rxdp->Host_Control == END_OF_BLOCK) {
2363 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2364 memset(rxdp, 0, sizeof(RxD_t));
2369 (struct sk_buff *) ((unsigned long) rxdp->
2372 #ifndef CONFIG_2BUFF_MODE
2373 pci_unmap_single(sp->pdev, (dma_addr_t)
2376 HEADER_ETHERNET_II_802_3_SIZE
2377 + HEADER_802_2_SIZE +
2379 PCI_DMA_FROMDEVICE);
2381 ba = &mac_control->rings[i].ba[blk][off];
2382 pci_unmap_single(sp->pdev, (dma_addr_t)
2385 PCI_DMA_FROMDEVICE);
2386 pci_unmap_single(sp->pdev, (dma_addr_t)
2389 PCI_DMA_FROMDEVICE);
2390 pci_unmap_single(sp->pdev, (dma_addr_t)
2392 dev->mtu + BUF0_LEN + 4,
2393 PCI_DMA_FROMDEVICE);
2396 atomic_dec(&sp->rx_bufs_left[i]);
2399 memset(rxdp, 0, sizeof(RxD_t));
2401 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2402 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2403 mac_control->rings[i].rx_curr_put_info.offset = 0;
2404 mac_control->rings[i].rx_curr_get_info.offset = 0;
2405 atomic_set(&sp->rx_bufs_left[i], 0);
2406 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2407 dev->name, buf_cnt, i);
2412 * s2io_poll - Rx interrupt handler for NAPI support
2413 * @dev : pointer to the device structure.
2414 * @budget : The number of packets that were budgeted to be processed
2415 * during one pass through the 'Poll" function.
2417 * Comes into picture only if NAPI support has been incorporated. It does
2418 * the same thing that rx_intr_handler does, but not in a interrupt context
2419 * also It will process only a given number of packets.
2421 * 0 on success and 1 if there are No Rx packets to be processed.
2424 #if defined(CONFIG_S2IO_NAPI)
2425 static int s2io_poll(struct net_device *dev, int *budget)
2427 nic_t *nic = dev->priv;
2428 int pkt_cnt = 0, org_pkts_to_process;
2429 mac_info_t *mac_control;
2430 struct config_param *config;
2431 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2435 atomic_inc(&nic->isr_cnt);
2436 mac_control = &nic->mac_control;
2437 config = &nic->config;
2439 nic->pkts_to_process = *budget;
2440 if (nic->pkts_to_process > dev->quota)
2441 nic->pkts_to_process = dev->quota;
2442 org_pkts_to_process = nic->pkts_to_process;
2444 val64 = readq(&bar0->rx_traffic_int);
2445 writeq(val64, &bar0->rx_traffic_int);
2447 for (i = 0; i < config->rx_ring_num; i++) {
2448 rx_intr_handler(&mac_control->rings[i]);
2449 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2450 if (!nic->pkts_to_process) {
2451 /* Quota for the current iteration has been met */
2458 dev->quota -= pkt_cnt;
2460 netif_rx_complete(dev);
2462 for (i = 0; i < config->rx_ring_num; i++) {
2463 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2464 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2465 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2469 /* Re enable the Rx interrupts. */
2470 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2471 atomic_dec(&nic->isr_cnt);
2475 dev->quota -= pkt_cnt;
2478 for (i = 0; i < config->rx_ring_num; i++) {
2479 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2480 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2481 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2485 atomic_dec(&nic->isr_cnt);
2491 * rx_intr_handler - Rx interrupt handler
2492 * @nic: device private variable.
2494 * If the interrupt is because of a received frame or if the
2495 * receive ring contains fresh as yet un-processed frames,this function is
2496 * called. It picks out the RxD at which place the last Rx processing had
2497 * stopped and sends the skb to the OSM's Rx handler and then increments
2502 static void rx_intr_handler(ring_info_t *ring_data)
2504 nic_t *nic = ring_data->nic;
2505 struct net_device *dev = (struct net_device *) nic->dev;
2506 int get_block, get_offset, put_block, put_offset, ring_bufs;
2507 rx_curr_get_info_t get_info, put_info;
2509 struct sk_buff *skb;
2510 #ifndef CONFIG_S2IO_NAPI
2513 spin_lock(&nic->rx_lock);
2514 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2515 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2516 __FUNCTION__, dev->name);
2517 spin_unlock(&nic->rx_lock);
2521 get_info = ring_data->rx_curr_get_info;
2522 get_block = get_info.block_index;
2523 put_info = ring_data->rx_curr_put_info;
2524 put_block = put_info.block_index;
2525 ring_bufs = get_info.ring_len+1;
2526 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2528 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2530 #ifndef CONFIG_S2IO_NAPI
2531 spin_lock(&nic->put_lock);
2532 put_offset = ring_data->put_pos;
2533 spin_unlock(&nic->put_lock);
2535 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2538 while (RXD_IS_UP2DT(rxdp) &&
2539 (((get_offset + 1) % ring_bufs) != put_offset)) {
2540 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2542 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2544 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2545 spin_unlock(&nic->rx_lock);
2548 #ifndef CONFIG_2BUFF_MODE
2549 pci_unmap_single(nic->pdev, (dma_addr_t)
2552 HEADER_ETHERNET_II_802_3_SIZE +
2555 PCI_DMA_FROMDEVICE);
2557 pci_unmap_single(nic->pdev, (dma_addr_t)
2559 BUF0_LEN, PCI_DMA_FROMDEVICE);
2560 pci_unmap_single(nic->pdev, (dma_addr_t)
2562 BUF1_LEN, PCI_DMA_FROMDEVICE);
2563 pci_unmap_single(nic->pdev, (dma_addr_t)
2565 dev->mtu + BUF0_LEN + 4,
2566 PCI_DMA_FROMDEVICE);
2568 rx_osm_handler(ring_data, rxdp);
2570 ring_data->rx_curr_get_info.offset =
2572 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2574 if (get_info.offset &&
2575 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2576 get_info.offset = 0;
2577 ring_data->rx_curr_get_info.offset
2580 get_block %= ring_data->block_count;
2581 ring_data->rx_curr_get_info.block_index
2583 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2586 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2588 #ifdef CONFIG_S2IO_NAPI
2589 nic->pkts_to_process -= 1;
2590 if (!nic->pkts_to_process)
2594 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2598 spin_unlock(&nic->rx_lock);
2602 * tx_intr_handler - Transmit interrupt handler
2603 * @nic : device private variable
2605 * If an interrupt was raised to indicate DMA complete of the
2606 * Tx packet, this function is called. It identifies the last TxD
2607 * whose buffer was freed and frees all skbs whose data have already
2608 * DMA'ed into the NICs internal memory.
2613 static void tx_intr_handler(fifo_info_t *fifo_data)
2615 nic_t *nic = fifo_data->nic;
2616 struct net_device *dev = (struct net_device *) nic->dev;
2617 tx_curr_get_info_t get_info, put_info;
2618 struct sk_buff *skb;
2622 get_info = fifo_data->tx_curr_get_info;
2623 put_info = fifo_data->tx_curr_put_info;
2624 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2626 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2627 (get_info.offset != put_info.offset) &&
2628 (txdlp->Host_Control)) {
2629 /* Check for TxD errors */
2630 if (txdlp->Control_1 & TXD_T_CODE) {
2631 unsigned long long err;
2632 err = txdlp->Control_1 & TXD_T_CODE;
2633 if ((err >> 48) == 0xA) {
2634 DBG_PRINT(TX_DBG, "TxD returned due \
2635 to loss of link\n");
2638 DBG_PRINT(ERR_DBG, "***TxD error \
2643 skb = (struct sk_buff *) ((unsigned long)
2644 txdlp->Host_Control);
2646 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2648 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2652 frg_cnt = skb_shinfo(skb)->nr_frags;
2653 nic->tx_pkt_count++;
2655 pci_unmap_single(nic->pdev, (dma_addr_t)
2656 txdlp->Buffer_Pointer,
2657 skb->len - skb->data_len,
2663 for (j = 0; j < frg_cnt; j++, txdlp++) {
2665 &skb_shinfo(skb)->frags[j];
2666 if (!txdlp->Buffer_Pointer)
2668 pci_unmap_page(nic->pdev,
2678 (sizeof(TxD_t) * fifo_data->max_txds));
2680 /* Updating the statistics block */
2681 nic->stats.tx_bytes += skb->len;
2682 dev_kfree_skb_irq(skb);
2685 get_info.offset %= get_info.fifo_len + 1;
2686 txdlp = (TxD_t *) fifo_data->list_info
2687 [get_info.offset].list_virt_addr;
2688 fifo_data->tx_curr_get_info.offset =
2692 spin_lock(&nic->tx_lock);
2693 if (netif_queue_stopped(dev))
2694 netif_wake_queue(dev);
2695 spin_unlock(&nic->tx_lock);
2699 * alarm_intr_handler - Alarm Interrrupt handler
2700 * @nic: device private variable
2701 * Description: If the interrupt was neither because of Rx packet or Tx
2702 * complete, this function is called. If the interrupt was to indicate
2703 * a loss of link, the OSM link status handler is invoked for any other
2704 * alarm interrupt the block that raised the interrupt is displayed
2705 * and a H/W reset is issued.
2710 static void alarm_intr_handler(struct s2io_nic *nic)
2712 struct net_device *dev = (struct net_device *) nic->dev;
2713 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2714 register u64 val64 = 0, err_reg = 0;
2716 /* Handling link status change error Intr */
2717 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2718 err_reg = readq(&bar0->mac_rmac_err_reg);
2719 writeq(err_reg, &bar0->mac_rmac_err_reg);
2720 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2721 schedule_work(&nic->set_link_task);
2725 /* Handling Ecc errors */
2726 val64 = readq(&bar0->mc_err_reg);
2727 writeq(val64, &bar0->mc_err_reg);
2728 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2729 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2730 nic->mac_control.stats_info->sw_stat.
2732 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
2734 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
2735 if (nic->device_type != XFRAME_II_DEVICE) {
2736 /* Reset XframeI only if critical error */
2737 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
2738 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
2739 netif_stop_queue(dev);
2740 schedule_work(&nic->rst_timer_task);
2744 nic->mac_control.stats_info->sw_stat.
2749 /* In case of a serious error, the device will be Reset. */
2750 val64 = readq(&bar0->serr_source);
2751 if (val64 & SERR_SOURCE_ANY) {
2752 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2753 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
2754 (unsigned long long)val64);
2755 netif_stop_queue(dev);
2756 schedule_work(&nic->rst_timer_task);
2760 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2761 * Error occurs, the adapter will be recycled by disabling the
2762 * adapter enable bit and enabling it again after the device
2763 * becomes Quiescent.
2765 val64 = readq(&bar0->pcc_err_reg);
2766 writeq(val64, &bar0->pcc_err_reg);
2767 if (val64 & PCC_FB_ECC_DB_ERR) {
2768 u64 ac = readq(&bar0->adapter_control);
2769 ac &= ~(ADAPTER_CNTL_EN);
2770 writeq(ac, &bar0->adapter_control);
2771 ac = readq(&bar0->adapter_control);
2772 schedule_work(&nic->set_link_task);
2775 /* Other type of interrupts are not being handled now, TODO */
2779 * wait_for_cmd_complete - waits for a command to complete.
2780 * @sp : private member of the device structure, which is a pointer to the
2781 * s2io_nic structure.
2782 * Description: Function that waits for a command to Write into RMAC
2783 * ADDR DATA registers to be completed and returns either success or
2784 * error depending on whether the command was complete or not.
2786 * SUCCESS on success and FAILURE on failure.
2789 int wait_for_cmd_complete(nic_t * sp)
2791 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2792 int ret = FAILURE, cnt = 0;
2796 val64 = readq(&bar0->rmac_addr_cmd_mem);
2797 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2810 * s2io_reset - Resets the card.
2811 * @sp : private member of the device structure.
2812 * Description: Function to Reset the card. This function then also
2813 * restores the previously saved PCI configuration space registers as
2814 * the card reset also resets the configuration space.
2819 void s2io_reset(nic_t * sp)
2821 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2825 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
2826 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
2828 val64 = SW_RESET_ALL;
2829 writeq(val64, &bar0->sw_reset);
2832 * At this stage, if the PCI write is indeed completed, the
2833 * card is reset and so is the PCI Config space of the device.
2834 * So a read cannot be issued at this stage on any of the
2835 * registers to ensure the write into "sw_reset" register
2837 * Question: Is there any system call that will explicitly force
2838 * all the write commands still pending on the bus to be pushed
2840 * As of now I'am just giving a 250ms delay and hoping that the
2841 * PCI write to sw_reset register is done by this time.
2845 /* Restore the PCI state saved during initialization. */
2846 pci_restore_state(sp->pdev);
2847 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
2853 /* Set swapper to enable I/O register access */
2854 s2io_set_swapper(sp);
2856 /* Clear certain PCI/PCI-X fields after reset */
2857 if (sp->device_type == XFRAME_II_DEVICE) {
2858 /* Clear parity err detect bit */
2859 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
2861 /* Clearing PCIX Ecc status register */
2862 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
2864 /* Clearing PCI_STATUS error reflected here */
2865 writeq(BIT(62), &bar0->txpic_int_reg);
2868 /* Reset device statistics maintained by OS */
2869 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2871 /* SXE-002: Configure link and activity LED to turn it off */
2872 subid = sp->pdev->subsystem_device;
2873 if (((subid & 0xFF) >= 0x07) &&
2874 (sp->device_type == XFRAME_I_DEVICE)) {
2875 val64 = readq(&bar0->gpio_control);
2876 val64 |= 0x0000800000000000ULL;
2877 writeq(val64, &bar0->gpio_control);
2878 val64 = 0x0411040400000000ULL;
2879 writeq(val64, (void __iomem *)bar0 + 0x2700);
2883 * Clear spurious ECC interrupts that would have occured on
2884 * XFRAME II cards after reset.
2886 if (sp->device_type == XFRAME_II_DEVICE) {
2887 val64 = readq(&bar0->pcc_err_reg);
2888 writeq(val64, &bar0->pcc_err_reg);
2891 sp->device_enabled_once = FALSE;
2895 * s2io_set_swapper - to set the swapper controle on the card
2896 * @sp : private member of the device structure,
2897 * pointer to the s2io_nic structure.
2898 * Description: Function to set the swapper control on the card
2899 * correctly depending on the 'endianness' of the system.
2901 * SUCCESS on success and FAILURE on failure.
2904 int s2io_set_swapper(nic_t * sp)
2906 struct net_device *dev = sp->dev;
2907 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2908 u64 val64, valt, valr;
2911 * Set proper endian settings and verify the same by reading
2912 * the PIF Feed-back register.
2915 val64 = readq(&bar0->pif_rd_swapper_fb);
2916 if (val64 != 0x0123456789ABCDEFULL) {
2918 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2919 0x8100008181000081ULL, /* FE=1, SE=0 */
2920 0x4200004242000042ULL, /* FE=0, SE=1 */
2921 0}; /* FE=0, SE=0 */
2924 writeq(value[i], &bar0->swapper_ctrl);
2925 val64 = readq(&bar0->pif_rd_swapper_fb);
2926 if (val64 == 0x0123456789ABCDEFULL)
2931 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2933 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2934 (unsigned long long) val64);
2939 valr = readq(&bar0->swapper_ctrl);
2942 valt = 0x0123456789ABCDEFULL;
2943 writeq(valt, &bar0->xmsi_address);
2944 val64 = readq(&bar0->xmsi_address);
2948 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2949 0x0081810000818100ULL, /* FE=1, SE=0 */
2950 0x0042420000424200ULL, /* FE=0, SE=1 */
2951 0}; /* FE=0, SE=0 */
2954 writeq((value[i] | valr), &bar0->swapper_ctrl);
2955 writeq(valt, &bar0->xmsi_address);
2956 val64 = readq(&bar0->xmsi_address);
2962 unsigned long long x = val64;
2963 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2964 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2968 val64 = readq(&bar0->swapper_ctrl);
2969 val64 &= 0xFFFF000000000000ULL;
2973 * The device by default set to a big endian format, so a
2974 * big endian driver need not set anything.
2976 val64 |= (SWAPPER_CTRL_TXP_FE |
2977 SWAPPER_CTRL_TXP_SE |
2978 SWAPPER_CTRL_TXD_R_FE |
2979 SWAPPER_CTRL_TXD_W_FE |
2980 SWAPPER_CTRL_TXF_R_FE |
2981 SWAPPER_CTRL_RXD_R_FE |
2982 SWAPPER_CTRL_RXD_W_FE |
2983 SWAPPER_CTRL_RXF_W_FE |
2984 SWAPPER_CTRL_XMSI_FE |
2985 SWAPPER_CTRL_XMSI_SE |
2986 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2987 writeq(val64, &bar0->swapper_ctrl);
2990 * Initially we enable all bits to make it accessible by the
2991 * driver, then we selectively enable only those bits that
2994 val64 |= (SWAPPER_CTRL_TXP_FE |
2995 SWAPPER_CTRL_TXP_SE |
2996 SWAPPER_CTRL_TXD_R_FE |
2997 SWAPPER_CTRL_TXD_R_SE |
2998 SWAPPER_CTRL_TXD_W_FE |
2999 SWAPPER_CTRL_TXD_W_SE |
3000 SWAPPER_CTRL_TXF_R_FE |
3001 SWAPPER_CTRL_RXD_R_FE |
3002 SWAPPER_CTRL_RXD_R_SE |
3003 SWAPPER_CTRL_RXD_W_FE |
3004 SWAPPER_CTRL_RXD_W_SE |
3005 SWAPPER_CTRL_RXF_W_FE |
3006 SWAPPER_CTRL_XMSI_FE |
3007 SWAPPER_CTRL_XMSI_SE |
3008 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3009 writeq(val64, &bar0->swapper_ctrl);
3011 val64 = readq(&bar0->swapper_ctrl);
3014 * Verifying if endian settings are accurate by reading a
3015 * feedback register.
3017 val64 = readq(&bar0->pif_rd_swapper_fb);
3018 if (val64 != 0x0123456789ABCDEFULL) {
3019 /* Endian settings are incorrect, calls for another dekko. */
3020 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3022 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3023 (unsigned long long) val64);