[PATCH] S2io: Code cleanup
[sfrench/cifs-2.6.git] / drivers / net / s2io.c
1 /************************************************************************
2  * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3  * Copyright(c) 2002-2005 Neterion Inc.
4
5  * This software may be used and distributed according to the terms of
6  * the GNU General Public License (GPL), incorporated herein by reference.
7  * Drivers based on or derived from this code fall under the GPL and must
8  * retain the authorship, copyright and license notice.  This file is not
9  * a complete program and may only be used when the entire operating
10  * system is licensed under the GPL.
11  * See the file COPYING in this distribution for more information.
12  *
13  * Credits:
14  * Jeff Garzik          : For pointing out the improper error condition
15  *                        check in the s2io_xmit routine and also some
16  *                        issues in the Tx watch dog function. Also for
17  *                        patiently answering all those innumerable
18  *                        questions regaring the 2.6 porting issues.
19  * Stephen Hemminger    : Providing proper 2.6 porting mechanism for some
20  *                        macros available only in 2.6 Kernel.
21  * Francois Romieu      : For pointing out all code part that were
22  *                        deprecated and also styling related comments.
23  * Grant Grundler       : For helping me get rid of some Architecture
24  *                        dependent code.
25  * Christopher Hellwig  : Some more 2.6 specific issues in the driver.
26  *
27  * The module loadable parameters that are supported by the driver and a brief
28  * explaination of all the variables.
29  * rx_ring_num : This can be used to program the number of receive rings used
30  * in the driver.
31  * rx_ring_len: This defines the number of descriptors each ring can have. This
32  * is also an array of size 8.
33  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34  * tx_fifo_len: This too is an array of 8. Each element defines the number of
35  * Tx descriptors that can be associated with each corresponding FIFO.
36  ************************************************************************/
37
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
58
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
61 #include <asm/io.h>
62
63 /* local include */
64 #include "s2io.h"
65 #include "s2io-regs.h"
66
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
70
71 /*
72  * Cards with following subsystem_id have a link state indication
73  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
74  * macro below identifies these cards given the subsystem_id.
75  */
76 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
77                 (((subid >= 0x600B) && (subid <= 0x600D)) || \
78                  ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
79
80 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
81                                       ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
82 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
83 #define PANIC   1
84 #define LOW     2
85 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
86 {
87         int level = 0;
88         mac_info_t *mac_control;
89
90         mac_control = &sp->mac_control;
91         if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
92                 level = LOW;
93                 if ((mac_control->rings[ring].pkt_cnt - rxb_size) <
94                                 MAX_RXDS_PER_BLOCK) {
95                         level = PANIC;
96                 }
97         }
98
99         return level;
100 }
101
102 /* Ethtool related variables and Macros. */
103 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
104         "Register test\t(offline)",
105         "Eeprom test\t(offline)",
106         "Link test\t(online)",
107         "RLDRAM test\t(offline)",
108         "BIST Test\t(offline)"
109 };
110
111 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
112         {"tmac_frms"},
113         {"tmac_data_octets"},
114         {"tmac_drop_frms"},
115         {"tmac_mcst_frms"},
116         {"tmac_bcst_frms"},
117         {"tmac_pause_ctrl_frms"},
118         {"tmac_any_err_frms"},
119         {"tmac_vld_ip_octets"},
120         {"tmac_vld_ip"},
121         {"tmac_drop_ip"},
122         {"tmac_icmp"},
123         {"tmac_rst_tcp"},
124         {"tmac_tcp"},
125         {"tmac_udp"},
126         {"rmac_vld_frms"},
127         {"rmac_data_octets"},
128         {"rmac_fcs_err_frms"},
129         {"rmac_drop_frms"},
130         {"rmac_vld_mcst_frms"},
131         {"rmac_vld_bcst_frms"},
132         {"rmac_in_rng_len_err_frms"},
133         {"rmac_long_frms"},
134         {"rmac_pause_ctrl_frms"},
135         {"rmac_discarded_frms"},
136         {"rmac_usized_frms"},
137         {"rmac_osized_frms"},
138         {"rmac_frag_frms"},
139         {"rmac_jabber_frms"},
140         {"rmac_ip"},
141         {"rmac_ip_octets"},
142         {"rmac_hdr_err_ip"},
143         {"rmac_drop_ip"},
144         {"rmac_icmp"},
145         {"rmac_tcp"},
146         {"rmac_udp"},
147         {"rmac_err_drp_udp"},
148         {"rmac_pause_cnt"},
149         {"rmac_accepted_ip"},
150         {"rmac_err_tcp"},
151 };
152
153 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
154 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
155
156 #define S2IO_TEST_LEN   sizeof(s2io_gstrings) / ETH_GSTRING_LEN
157 #define S2IO_STRINGS_LEN        S2IO_TEST_LEN * ETH_GSTRING_LEN
158
159 /*
160  * Constants to be programmed into the Xena's registers, to configure
161  * the XAUI.
162  */
163
164 #define SWITCH_SIGN     0xA5A5A5A5A5A5A5A5ULL
165 #define END_SIGN        0x0
166
167 static u64 default_mdio_cfg[] = {
168         /* Reset PMA PLL */
169         0xC001010000000000ULL, 0xC0010100000000E0ULL,
170         0xC0010100008000E4ULL,
171         /* Remove Reset from PMA PLL */
172         0xC001010000000000ULL, 0xC0010100000000E0ULL,
173         0xC0010100000000E4ULL,
174         END_SIGN
175 };
176
177 static u64 default_dtx_cfg[] = {
178         0x8000051500000000ULL, 0x80000515000000E0ULL,
179         0x80000515D93500E4ULL, 0x8001051500000000ULL,
180         0x80010515000000E0ULL, 0x80010515001E00E4ULL,
181         0x8002051500000000ULL, 0x80020515000000E0ULL,
182         0x80020515F21000E4ULL,
183         /* Set PADLOOPBACKN */
184         0x8002051500000000ULL, 0x80020515000000E0ULL,
185         0x80020515B20000E4ULL, 0x8003051500000000ULL,
186         0x80030515000000E0ULL, 0x80030515B20000E4ULL,
187         0x8004051500000000ULL, 0x80040515000000E0ULL,
188         0x80040515B20000E4ULL, 0x8005051500000000ULL,
189         0x80050515000000E0ULL, 0x80050515B20000E4ULL,
190         SWITCH_SIGN,
191         /* Remove PADLOOPBACKN */
192         0x8002051500000000ULL, 0x80020515000000E0ULL,
193         0x80020515F20000E4ULL, 0x8003051500000000ULL,
194         0x80030515000000E0ULL, 0x80030515F20000E4ULL,
195         0x8004051500000000ULL, 0x80040515000000E0ULL,
196         0x80040515F20000E4ULL, 0x8005051500000000ULL,
197         0x80050515000000E0ULL, 0x80050515F20000E4ULL,
198         END_SIGN
199 };
200
201 /*
202  * Constants for Fixing the MacAddress problem seen mostly on
203  * Alpha machines.
204  */
205 static u64 fix_mac[] = {
206         0x0060000000000000ULL, 0x0060600000000000ULL,
207         0x0040600000000000ULL, 0x0000600000000000ULL,
208         0x0020600000000000ULL, 0x0060600000000000ULL,
209         0x0020600000000000ULL, 0x0060600000000000ULL,
210         0x0020600000000000ULL, 0x0060600000000000ULL,
211         0x0020600000000000ULL, 0x0060600000000000ULL,
212         0x0020600000000000ULL, 0x0060600000000000ULL,
213         0x0020600000000000ULL, 0x0060600000000000ULL,
214         0x0020600000000000ULL, 0x0060600000000000ULL,
215         0x0020600000000000ULL, 0x0060600000000000ULL,
216         0x0020600000000000ULL, 0x0060600000000000ULL,
217         0x0020600000000000ULL, 0x0060600000000000ULL,
218         0x0020600000000000ULL, 0x0000600000000000ULL,
219         0x0040600000000000ULL, 0x0060600000000000ULL,
220         END_SIGN
221 };
222
223 /* Module Loadable parameters. */
224 static unsigned int tx_fifo_num = 1;
225 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
226     {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
227 static unsigned int rx_ring_num = 1;
228 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
229     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
230 static unsigned int Stats_refresh_time = 4;
231 static unsigned int rts_frm_len[MAX_RX_RINGS] =
232     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
233 static unsigned int rmac_pause_time = 65535;
234 static unsigned int mc_pause_threshold_q0q3 = 187;
235 static unsigned int mc_pause_threshold_q4q7 = 187;
236 static unsigned int shared_splits;
237 static unsigned int tmac_util_period = 5;
238 static unsigned int rmac_util_period = 5;
239 #ifndef CONFIG_S2IO_NAPI
240 static unsigned int indicate_max_pkts;
241 #endif
242
243 /*
244  * S2IO device table.
245  * This table lists all the devices that this driver supports.
246  */
247 static struct pci_device_id s2io_tbl[] __devinitdata = {
248         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
249          PCI_ANY_ID, PCI_ANY_ID},
250         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
251          PCI_ANY_ID, PCI_ANY_ID},
252         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
253          PCI_ANY_ID, PCI_ANY_ID},
254         {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
255          PCI_ANY_ID, PCI_ANY_ID},
256         {0,}
257 };
258
259 MODULE_DEVICE_TABLE(pci, s2io_tbl);
260
261 static struct pci_driver s2io_driver = {
262       .name = "S2IO",
263       .id_table = s2io_tbl,
264       .probe = s2io_init_nic,
265       .remove = __devexit_p(s2io_rem_nic),
266 };
267
268 /* A simplifier macro used both by init and free shared_mem Fns(). */
269 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
270
271 /**
272  * init_shared_mem - Allocation and Initialization of Memory
273  * @nic: Device private variable.
274  * Description: The function allocates all the memory areas shared
275  * between the NIC and the driver. This includes Tx descriptors,
276  * Rx descriptors and the statistics block.
277  */
278
279 static int init_shared_mem(struct s2io_nic *nic)
280 {
281         u32 size;
282         void *tmp_v_addr, *tmp_v_addr_next;
283         dma_addr_t tmp_p_addr, tmp_p_addr_next;
284         RxD_block_t *pre_rxd_blk = NULL;
285         int i, j, blk_cnt, rx_sz, tx_sz;
286         int lst_size, lst_per_page;
287         struct net_device *dev = nic->dev;
288 #ifdef CONFIG_2BUFF_MODE
289         u64 tmp;
290         buffAdd_t *ba;
291 #endif
292
293         mac_info_t *mac_control;
294         struct config_param *config;
295
296         mac_control = &nic->mac_control;
297         config = &nic->config;
298
299
300         /* Allocation and initialization of TXDLs in FIOFs */
301         size = 0;
302         for (i = 0; i < config->tx_fifo_num; i++) {
303                 size += config->tx_cfg[i].fifo_len;
304         }
305         if (size > MAX_AVAILABLE_TXDS) {
306                 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
307                           dev->name);
308                 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
309                 DBG_PRINT(ERR_DBG, "that can be used\n");
310                 return FAILURE;
311         }
312
313         lst_size = (sizeof(TxD_t) * config->max_txds);
314         tx_sz = lst_size * size;
315         lst_per_page = PAGE_SIZE / lst_size;
316
317         for (i = 0; i < config->tx_fifo_num; i++) {
318                 int fifo_len = config->tx_cfg[i].fifo_len;
319                 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
320                 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
321                                                           GFP_KERNEL);
322                 if (!mac_control->fifos[i].list_info) {
323                         DBG_PRINT(ERR_DBG,
324                                   "Malloc failed for list_info\n");
325                         return -ENOMEM;
326                 }
327                 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
328         }
329         for (i = 0; i < config->tx_fifo_num; i++) {
330                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
331                                                 lst_per_page);
332                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
333                 mac_control->fifos[i].tx_curr_put_info.fifo_len =
334                     config->tx_cfg[i].fifo_len - 1;
335                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
336                 mac_control->fifos[i].tx_curr_get_info.fifo_len =
337                     config->tx_cfg[i].fifo_len - 1;
338                 mac_control->fifos[i].fifo_no = i;
339                 mac_control->fifos[i].nic = nic;
340                 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
341
342                 for (j = 0; j < page_num; j++) {
343                         int k = 0;
344                         dma_addr_t tmp_p;
345                         void *tmp_v;
346                         tmp_v = pci_alloc_consistent(nic->pdev,
347                                                      PAGE_SIZE, &tmp_p);
348                         if (!tmp_v) {
349                                 DBG_PRINT(ERR_DBG,
350                                           "pci_alloc_consistent ");
351                                 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
352                                 return -ENOMEM;
353                         }
354                         while (k < lst_per_page) {
355                                 int l = (j * lst_per_page) + k;
356                                 if (l == config->tx_cfg[i].fifo_len)
357                                         break;
358                                 mac_control->fifos[i].list_info[l].list_virt_addr =
359                                     tmp_v + (k * lst_size);
360                                 mac_control->fifos[i].list_info[l].list_phy_addr =
361                                     tmp_p + (k * lst_size);
362                                 k++;
363                         }
364                 }
365         }
366
367         /* Allocation and initialization of RXDs in Rings */
368         size = 0;
369         for (i = 0; i < config->rx_ring_num; i++) {
370                 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
371                         DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
372                         DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
373                                   i);
374                         DBG_PRINT(ERR_DBG, "RxDs per Block");
375                         return FAILURE;
376                 }
377                 size += config->rx_cfg[i].num_rxd;
378                 mac_control->rings[i].block_count =
379                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
380                 mac_control->rings[i].pkt_cnt =
381                     config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
382         }
383         size = (size * (sizeof(RxD_t)));
384         rx_sz = size;
385
386         for (i = 0; i < config->rx_ring_num; i++) {
387                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
388                 mac_control->rings[i].rx_curr_get_info.offset = 0;
389                 mac_control->rings[i].rx_curr_get_info.ring_len =
390                     config->rx_cfg[i].num_rxd - 1;
391                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
392                 mac_control->rings[i].rx_curr_put_info.offset = 0;
393                 mac_control->rings[i].rx_curr_put_info.ring_len =
394                     config->rx_cfg[i].num_rxd - 1;
395                 mac_control->rings[i].nic = nic;
396                 mac_control->rings[i].ring_no = i;
397
398                 blk_cnt =
399                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
400                 /*  Allocating all the Rx blocks */
401                 for (j = 0; j < blk_cnt; j++) {
402 #ifndef CONFIG_2BUFF_MODE
403                         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
404 #else
405                         size = SIZE_OF_BLOCK;
406 #endif
407                         tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
408                                                           &tmp_p_addr);
409                         if (tmp_v_addr == NULL) {
410                                 /*
411                                  * In case of failure, free_shared_mem()
412                                  * is called, which should free any
413                                  * memory that was alloced till the
414                                  * failure happened.
415                                  */
416                                 mac_control->rings[i].rx_blocks[j].block_virt_addr =
417                                     tmp_v_addr;
418                                 return -ENOMEM;
419                         }
420                         memset(tmp_v_addr, 0, size);
421                         mac_control->rings[i].rx_blocks[j].block_virt_addr =
422                                 tmp_v_addr;
423                         mac_control->rings[i].rx_blocks[j].block_dma_addr =
424                                 tmp_p_addr;
425                 }
426                 /* Interlinking all Rx Blocks */
427                 for (j = 0; j < blk_cnt; j++) {
428                         tmp_v_addr =
429                                 mac_control->rings[i].rx_blocks[j].block_virt_addr;
430                         tmp_v_addr_next =
431                                 mac_control->rings[i].rx_blocks[(j + 1) %
432                                               blk_cnt].block_virt_addr;
433                         tmp_p_addr =
434                                 mac_control->rings[i].rx_blocks[j].block_dma_addr;
435                         tmp_p_addr_next =
436                                 mac_control->rings[i].rx_blocks[(j + 1) %
437                                               blk_cnt].block_dma_addr;
438
439                         pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
440                         pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
441                                                                  * marker.
442                                                                  */
443 #ifndef CONFIG_2BUFF_MODE
444                         pre_rxd_blk->reserved_2_pNext_RxD_block =
445                             (unsigned long) tmp_v_addr_next;
446 #endif
447                         pre_rxd_blk->pNext_RxD_Blk_physical =
448                             (u64) tmp_p_addr_next;
449                 }
450         }
451
452 #ifdef CONFIG_2BUFF_MODE
453         /*
454          * Allocation of Storages for buffer addresses in 2BUFF mode
455          * and the buffers as well.
456          */
457         for (i = 0; i < config->rx_ring_num; i++) {
458                 blk_cnt =
459                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
460                 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
461                                      GFP_KERNEL);
462                 if (!mac_control->rings[i].ba)
463                         return -ENOMEM;
464                 for (j = 0; j < blk_cnt; j++) {
465                         int k = 0;
466                         mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
467                                                  (MAX_RXDS_PER_BLOCK + 1)),
468                                                 GFP_KERNEL);
469                         if (!mac_control->rings[i].ba[j])
470                                 return -ENOMEM;
471                         while (k != MAX_RXDS_PER_BLOCK) {
472                                 ba = &mac_control->rings[i].ba[j][k];
473
474                                 ba->ba_0_org = (void *) kmalloc
475                                     (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
476                                 if (!ba->ba_0_org)
477                                         return -ENOMEM;
478                                 tmp = (u64) ba->ba_0_org;
479                                 tmp += ALIGN_SIZE;
480                                 tmp &= ~((u64) ALIGN_SIZE);
481                                 ba->ba_0 = (void *) tmp;
482
483                                 ba->ba_1_org = (void *) kmalloc
484                                     (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
485                                 if (!ba->ba_1_org)
486                                         return -ENOMEM;
487                                 tmp = (u64) ba->ba_1_org;
488                                 tmp += ALIGN_SIZE;
489                                 tmp &= ~((u64) ALIGN_SIZE);
490                                 ba->ba_1 = (void *) tmp;
491                                 k++;
492                         }
493                 }
494         }
495 #endif
496
497         /* Allocation and initialization of Statistics block */
498         size = sizeof(StatInfo_t);
499         mac_control->stats_mem = pci_alloc_consistent
500             (nic->pdev, size, &mac_control->stats_mem_phy);
501
502         if (!mac_control->stats_mem) {
503                 /*
504                  * In case of failure, free_shared_mem() is called, which
505                  * should free any memory that was alloced till the
506                  * failure happened.
507                  */
508                 return -ENOMEM;
509         }
510         mac_control->stats_mem_sz = size;
511
512         tmp_v_addr = mac_control->stats_mem;
513         mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
514         memset(tmp_v_addr, 0, size);
515         DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
516                   (unsigned long long) tmp_p_addr);
517
518         return SUCCESS;
519 }
520
521 /**
522  * free_shared_mem - Free the allocated Memory
523  * @nic:  Device private variable.
524  * Description: This function is to free all memory locations allocated by
525  * the init_shared_mem() function and return it to the kernel.
526  */
527
528 static void free_shared_mem(struct s2io_nic *nic)
529 {
530         int i, j, blk_cnt, size;
531         void *tmp_v_addr;
532         dma_addr_t tmp_p_addr;
533         mac_info_t *mac_control;
534         struct config_param *config;
535         int lst_size, lst_per_page;
536
537
538         if (!nic)
539                 return;
540
541         mac_control = &nic->mac_control;
542         config = &nic->config;
543
544         lst_size = (sizeof(TxD_t) * config->max_txds);
545         lst_per_page = PAGE_SIZE / lst_size;
546
547         for (i = 0; i < config->tx_fifo_num; i++) {
548                 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
549                                                 lst_per_page);
550                 for (j = 0; j < page_num; j++) {
551                         int mem_blks = (j * lst_per_page);
552                         if (!mac_control->fifos[i].list_info[mem_blks].
553                             list_virt_addr)
554                                 break;
555                         pci_free_consistent(nic->pdev, PAGE_SIZE,
556                                             mac_control->fifos[i].
557                                             list_info[mem_blks].
558                                             list_virt_addr,
559                                             mac_control->fifos[i].
560                                             list_info[mem_blks].
561                                             list_phy_addr);
562                 }
563                 kfree(mac_control->fifos[i].list_info);
564         }
565
566 #ifndef CONFIG_2BUFF_MODE
567         size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
568 #else
569         size = SIZE_OF_BLOCK;
570 #endif
571         for (i = 0; i < config->rx_ring_num; i++) {
572                 blk_cnt = mac_control->rings[i].block_count;
573                 for (j = 0; j < blk_cnt; j++) {
574                         tmp_v_addr = mac_control->rings[i].rx_blocks[j].
575                                 block_virt_addr;
576                         tmp_p_addr = mac_control->rings[i].rx_blocks[j].
577                                 block_dma_addr;
578                         if (tmp_v_addr == NULL)
579                                 break;
580                         pci_free_consistent(nic->pdev, size,
581                                             tmp_v_addr, tmp_p_addr);
582                 }
583         }
584
585 #ifdef CONFIG_2BUFF_MODE
586         /* Freeing buffer storage addresses in 2BUFF mode. */
587         for (i = 0; i < config->rx_ring_num; i++) {
588                 blk_cnt =
589                     config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
590                 for (j = 0; j < blk_cnt; j++) {
591                         int k = 0;
592                         if (!mac_control->rings[i].ba[j])
593                                 continue;
594                         while (k != MAX_RXDS_PER_BLOCK) {
595                                 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
596                                 kfree(ba->ba_0_org);
597                                 kfree(ba->ba_1_org);
598                                 k++;
599                         }
600                         kfree(mac_control->rings[i].ba[j]);
601                 }
602                 if (mac_control->rings[i].ba)
603                         kfree(mac_control->rings[i].ba);
604         }
605 #endif
606
607         if (mac_control->stats_mem) {
608                 pci_free_consistent(nic->pdev,
609                                     mac_control->stats_mem_sz,
610                                     mac_control->stats_mem,
611                                     mac_control->stats_mem_phy);
612         }
613 }
614
615 /**
616  *  init_nic - Initialization of hardware
617  *  @nic: device peivate variable
618  *  Description: The function sequentially configures every block
619  *  of the H/W from their reset values.
620  *  Return Value:  SUCCESS on success and
621  *  '-1' on failure (endian settings incorrect).
622  */
623
624 static int init_nic(struct s2io_nic *nic)
625 {
626         XENA_dev_config_t __iomem *bar0 = nic->bar0;
627         struct net_device *dev = nic->dev;
628         register u64 val64 = 0;
629         void __iomem *add;
630         u32 time;
631         int i, j;
632         mac_info_t *mac_control;
633         struct config_param *config;
634         int mdio_cnt = 0, dtx_cnt = 0;
635         unsigned long long mem_share;
636         int mem_size;
637
638         mac_control = &nic->mac_control;
639         config = &nic->config;
640
641         /* to set the swapper control on the card */
642         if(s2io_set_swapper(nic)) {
643                 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
644                 return -1;
645         }
646
647         /* Remove XGXS from reset state */
648         val64 = 0;
649         writeq(val64, &bar0->sw_reset);
650         msleep(500);
651         val64 = readq(&bar0->sw_reset);
652
653         /*  Enable Receiving broadcasts */
654         add = &bar0->mac_cfg;
655         val64 = readq(&bar0->mac_cfg);
656         val64 |= MAC_RMAC_BCAST_ENABLE;
657         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
658         writel((u32) val64, add);
659         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
660         writel((u32) (val64 >> 32), (add + 4));
661
662         /* Read registers in all blocks */
663         val64 = readq(&bar0->mac_int_mask);
664         val64 = readq(&bar0->mc_int_mask);
665         val64 = readq(&bar0->xgxs_int_mask);
666
667         /*  Set MTU */
668         val64 = dev->mtu;
669         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
670
671         /*
672          * Configuring the XAUI Interface of Xena.
673          * ***************************************
674          * To Configure the Xena's XAUI, one has to write a series
675          * of 64 bit values into two registers in a particular
676          * sequence. Hence a macro 'SWITCH_SIGN' has been defined
677          * which will be defined in the array of configuration values
678          * (default_dtx_cfg & default_mdio_cfg) at appropriate places
679          * to switch writing from one regsiter to another. We continue
680          * writing these values until we encounter the 'END_SIGN' macro.
681          * For example, After making a series of 21 writes into
682          * dtx_control register the 'SWITCH_SIGN' appears and hence we
683          * start writing into mdio_control until we encounter END_SIGN.
684          */
685         while (1) {
686               dtx_cfg:
687                 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
688                         if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
689                                 dtx_cnt++;
690                                 goto mdio_cfg;
691                         }
692                         SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
693                                           &bar0->dtx_control, UF);
694                         val64 = readq(&bar0->dtx_control);
695                         dtx_cnt++;
696                 }
697               mdio_cfg:
698                 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
699                         if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
700                                 mdio_cnt++;
701                                 goto dtx_cfg;
702                         }
703                         SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
704                                           &bar0->mdio_control, UF);
705                         val64 = readq(&bar0->mdio_control);
706                         mdio_cnt++;
707                 }
708                 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
709                     (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
710                         break;
711                 } else {
712                         goto dtx_cfg;
713                 }
714         }
715
716         /*  Tx DMA Initialization */
717         val64 = 0;
718         writeq(val64, &bar0->tx_fifo_partition_0);
719         writeq(val64, &bar0->tx_fifo_partition_1);
720         writeq(val64, &bar0->tx_fifo_partition_2);
721         writeq(val64, &bar0->tx_fifo_partition_3);
722
723
724         for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
725                 val64 |=
726                     vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
727                          13) | vBIT(config->tx_cfg[i].fifo_priority,
728                                     ((i * 32) + 5), 3);
729
730                 if (i == (config->tx_fifo_num - 1)) {
731                         if (i % 2 == 0)
732                                 i++;
733                 }
734
735                 switch (i) {
736                 case 1:
737                         writeq(val64, &bar0->tx_fifo_partition_0);
738                         val64 = 0;
739                         break;
740                 case 3:
741                         writeq(val64, &bar0->tx_fifo_partition_1);
742                         val64 = 0;
743                         break;
744                 case 5:
745                         writeq(val64, &bar0->tx_fifo_partition_2);
746                         val64 = 0;
747                         break;
748                 case 7:
749                         writeq(val64, &bar0->tx_fifo_partition_3);
750                         break;
751                 }
752         }
753
754         /* Enable Tx FIFO partition 0. */
755         val64 = readq(&bar0->tx_fifo_partition_0);
756         val64 |= BIT(0);        /* To enable the FIFO partition. */
757         writeq(val64, &bar0->tx_fifo_partition_0);
758
759         val64 = readq(&bar0->tx_fifo_partition_0);
760         DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
761                   &bar0->tx_fifo_partition_0, (unsigned long long) val64);
762
763         /*
764          * Initialization of Tx_PA_CONFIG register to ignore packet
765          * integrity checking.
766          */
767         val64 = readq(&bar0->tx_pa_cfg);
768         val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
769             TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
770         writeq(val64, &bar0->tx_pa_cfg);
771
772         /* Rx DMA intialization. */
773         val64 = 0;
774         for (i = 0; i < config->rx_ring_num; i++) {
775                 val64 |=
776                     vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
777                          3);
778         }
779         writeq(val64, &bar0->rx_queue_priority);
780
781         /*
782          * Allocating equal share of memory to all the
783          * configured Rings.
784          */
785         val64 = 0;
786         mem_size = 64;
787         for (i = 0; i < config->rx_ring_num; i++) {
788                 switch (i) {
789                 case 0:
790                         mem_share = (mem_size / config->rx_ring_num +
791                                      mem_size % config->rx_ring_num);
792                         val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
793                         continue;
794                 case 1:
795                         mem_share = (mem_size / config->rx_ring_num);
796                         val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
797                         continue;
798                 case 2:
799                         mem_share = (mem_size / config->rx_ring_num);
800                         val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
801                         continue;
802                 case 3:
803                         mem_share = (mem_size / config->rx_ring_num);
804                         val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
805                         continue;
806                 case 4:
807                         mem_share = (mem_size / config->rx_ring_num);
808                         val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
809                         continue;
810                 case 5:
811                         mem_share = (mem_size / config->rx_ring_num);
812                         val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
813                         continue;
814                 case 6:
815                         mem_share = (mem_size / config->rx_ring_num);
816                         val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
817                         continue;
818                 case 7:
819                         mem_share = (mem_size / config->rx_ring_num);
820                         val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
821                         continue;
822                 }
823         }
824         writeq(val64, &bar0->rx_queue_cfg);
825
826         /* Initializing the Tx round robin registers to 0
827          * filling tx and rx round robin registers as per
828          * the number of FIFOs and Rings is still TODO
829          */
830         writeq(0, &bar0->tx_w_round_robin_0);
831         writeq(0, &bar0->tx_w_round_robin_1);
832         writeq(0, &bar0->tx_w_round_robin_2);
833         writeq(0, &bar0->tx_w_round_robin_3);
834         writeq(0, &bar0->tx_w_round_robin_4);
835
836         /*
837          * TODO
838          * Disable Rx steering. Hard coding all packets to be steered to
839          * Queue 0 for now.
840          */
841         val64 = 0x8080808080808080ULL;
842         writeq(val64, &bar0->rts_qos_steering);
843
844         /* UDP Fix */
845         val64 = 0;
846         for (i = 0; i < 8; i++)
847                 writeq(val64, &bar0->rts_frm_len_n[i]);
848
849         /* Set the default rts frame length for ring0 */
850         writeq(MAC_RTS_FRM_LEN_SET(dev->mtu+22),
851                 &bar0->rts_frm_len_n[0]);
852
853         /* Program statistics memory */
854         writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
855         val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
856             STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
857         writeq(val64, &bar0->stat_cfg);
858
859         /*
860          * Initializing the sampling rate for the device to calculate the
861          * bandwidth utilization.
862          */
863         val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
864             MAC_RX_LINK_UTIL_VAL(rmac_util_period);
865         writeq(val64, &bar0->mac_link_util);
866
867
868         /*
869          * Initializing the Transmit and Receive Traffic Interrupt
870          * Scheme.
871          */
872         /*
873          * TTI Initialization. Default Tx timer gets us about
874          * 250 interrupts per sec. Continuous interrupts are enabled
875          * by default.
876          */
877         val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
878             TTI_DATA1_MEM_TX_URNG_A(0xA) |
879             TTI_DATA1_MEM_TX_URNG_B(0x10) |
880             TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
881                 TTI_DATA1_MEM_TX_TIMER_CI_EN;
882         writeq(val64, &bar0->tti_data1_mem);
883
884         val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
885             TTI_DATA2_MEM_TX_UFC_B(0x20) |
886             TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
887         writeq(val64, &bar0->tti_data2_mem);
888
889         val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
890         writeq(val64, &bar0->tti_command_mem);
891
892         /*
893          * Once the operation completes, the Strobe bit of the command
894          * register will be reset. We poll for this particular condition
895          * We wait for a maximum of 500ms for the operation to complete,
896          * if it's not complete by then we return error.
897          */
898         time = 0;
899         while (TRUE) {
900                 val64 = readq(&bar0->tti_command_mem);
901                 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
902                         break;
903                 }
904                 if (time > 10) {
905                         DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
906                                   dev->name);
907                         return -1;
908                 }
909                 msleep(50);
910                 time++;
911         }
912
913         /* RTI Initialization */
914         val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
915             RTI_DATA1_MEM_RX_URNG_A(0xA) |
916             RTI_DATA1_MEM_RX_URNG_B(0x10) |
917             RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
918
919         writeq(val64, &bar0->rti_data1_mem);
920
921         val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
922             RTI_DATA2_MEM_RX_UFC_B(0x2) |
923             RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
924         writeq(val64, &bar0->rti_data2_mem);
925
926         val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
927         writeq(val64, &bar0->rti_command_mem);
928
929         /*
930          * Once the operation completes, the Strobe bit of the command
931          * register will be reset. We poll for this particular condition
932          * We wait for a maximum of 500ms for the operation to complete,
933          * if it's not complete by then we return error.
934          */
935         time = 0;
936         while (TRUE) {
937                 val64 = readq(&bar0->rti_command_mem);
938                 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
939                         break;
940                 }
941                 if (time > 10) {
942                         DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
943                                   dev->name);
944                         return -1;
945                 }
946                 time++;
947                 msleep(50);
948         }
949
950         /*
951          * Initializing proper values as Pause threshold into all
952          * the 8 Queues on Rx side.
953          */
954         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
955         writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
956
957         /* Disable RMAC PAD STRIPPING */
958         add = (void *) &bar0->mac_cfg;
959         val64 = readq(&bar0->mac_cfg);
960         val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
961         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
962         writel((u32) (val64), add);
963         writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
964         writel((u32) (val64 >> 32), (add + 4));
965         val64 = readq(&bar0->mac_cfg);
966
967         /*
968          * Set the time value to be inserted in the pause frame
969          * generated by xena.
970          */
971         val64 = readq(&bar0->rmac_pause_cfg);
972         val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
973         val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
974         writeq(val64, &bar0->rmac_pause_cfg);
975
976         /*
977          * Set the Threshold Limit for Generating the pause frame
978          * If the amount of data in any Queue exceeds ratio of
979          * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
980          * pause frame is generated
981          */
982         val64 = 0;
983         for (i = 0; i < 4; i++) {
984                 val64 |=
985                     (((u64) 0xFF00 | nic->mac_control.
986                       mc_pause_threshold_q0q3)
987                      << (i * 2 * 8));
988         }
989         writeq(val64, &bar0->mc_pause_thresh_q0q3);
990
991         val64 = 0;
992         for (i = 0; i < 4; i++) {
993                 val64 |=
994                     (((u64) 0xFF00 | nic->mac_control.
995                       mc_pause_threshold_q4q7)
996                      << (i * 2 * 8));
997         }
998         writeq(val64, &bar0->mc_pause_thresh_q4q7);
999
1000         /*
1001          * TxDMA will stop Read request if the number of read split has
1002          * exceeded the limit pointed by shared_splits
1003          */
1004         val64 = readq(&bar0->pic_control);
1005         val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1006         writeq(val64, &bar0->pic_control);
1007
1008         return SUCCESS;
1009 }
1010
1011 /**
1012  *  en_dis_able_nic_intrs - Enable or Disable the interrupts
1013  *  @nic: device private variable,
1014  *  @mask: A mask indicating which Intr block must be modified and,
1015  *  @flag: A flag indicating whether to enable or disable the Intrs.
1016  *  Description: This function will either disable or enable the interrupts
1017  *  depending on the flag argument. The mask argument can be used to
1018  *  enable/disable any Intr block.
1019  *  Return Value: NONE.
1020  */
1021
1022 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1023 {
1024         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1025         register u64 val64 = 0, temp64 = 0;
1026
1027         /*  Top level interrupt classification */
1028         /*  PIC Interrupts */
1029         if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1030                 /*  Enable PIC Intrs in the general intr mask register */
1031                 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1032                 if (flag == ENABLE_INTRS) {
1033                         temp64 = readq(&bar0->general_int_mask);
1034                         temp64 &= ~((u64) val64);
1035                         writeq(temp64, &bar0->general_int_mask);
1036                         /*
1037                          * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1038                          * interrupts for now.
1039                          * TODO
1040                          */
1041                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1042                         /*
1043                          * No MSI Support is available presently, so TTI and
1044                          * RTI interrupts are also disabled.
1045                          */
1046                 } else if (flag == DISABLE_INTRS) {
1047                         /*
1048                          * Disable PIC Intrs in the general
1049                          * intr mask register
1050                          */
1051                         writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1052                         temp64 = readq(&bar0->general_int_mask);
1053                         val64 |= temp64;
1054                         writeq(val64, &bar0->general_int_mask);
1055                 }
1056         }
1057
1058         /*  DMA Interrupts */
1059         /*  Enabling/Disabling Tx DMA interrupts */
1060         if (mask & TX_DMA_INTR) {
1061                 /* Enable TxDMA Intrs in the general intr mask register */
1062                 val64 = TXDMA_INT_M;
1063                 if (flag == ENABLE_INTRS) {
1064                         temp64 = readq(&bar0->general_int_mask);
1065                         temp64 &= ~((u64) val64);
1066                         writeq(temp64, &bar0->general_int_mask);
1067                         /*
1068                          * Keep all interrupts other than PFC interrupt
1069                          * and PCC interrupt disabled in DMA level.
1070                          */
1071                         val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1072                                                       TXDMA_PCC_INT_M);
1073                         writeq(val64, &bar0->txdma_int_mask);
1074                         /*
1075                          * Enable only the MISC error 1 interrupt in PFC block
1076                          */
1077                         val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1078                         writeq(val64, &bar0->pfc_err_mask);
1079                         /*
1080                          * Enable only the FB_ECC error interrupt in PCC block
1081                          */
1082                         val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1083                         writeq(val64, &bar0->pcc_err_mask);
1084                 } else if (flag == DISABLE_INTRS) {
1085                         /*
1086                          * Disable TxDMA Intrs in the general intr mask
1087                          * register
1088                          */
1089                         writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1090                         writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1091                         temp64 = readq(&bar0->general_int_mask);
1092                         val64 |= temp64;
1093                         writeq(val64, &bar0->general_int_mask);
1094                 }
1095         }
1096
1097         /*  Enabling/Disabling Rx DMA interrupts */
1098         if (mask & RX_DMA_INTR) {
1099                 /*  Enable RxDMA Intrs in the general intr mask register */
1100                 val64 = RXDMA_INT_M;
1101                 if (flag == ENABLE_INTRS) {
1102                         temp64 = readq(&bar0->general_int_mask);
1103                         temp64 &= ~((u64) val64);
1104                         writeq(temp64, &bar0->general_int_mask);
1105                         /*
1106                          * All RxDMA block interrupts are disabled for now
1107                          * TODO
1108                          */
1109                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1110                 } else if (flag == DISABLE_INTRS) {
1111                         /*
1112                          * Disable RxDMA Intrs in the general intr mask
1113                          * register
1114                          */
1115                         writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1116                         temp64 = readq(&bar0->general_int_mask);
1117                         val64 |= temp64;
1118                         writeq(val64, &bar0->general_int_mask);
1119                 }
1120         }
1121
1122         /*  MAC Interrupts */
1123         /*  Enabling/Disabling MAC interrupts */
1124         if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1125                 val64 = TXMAC_INT_M | RXMAC_INT_M;
1126                 if (flag == ENABLE_INTRS) {
1127                         temp64 = readq(&bar0->general_int_mask);
1128                         temp64 &= ~((u64) val64);
1129                         writeq(temp64, &bar0->general_int_mask);
1130                         /*
1131                          * All MAC block error interrupts are disabled for now
1132                          * except the link status change interrupt.
1133                          * TODO
1134                          */
1135                         val64 = MAC_INT_STATUS_RMAC_INT;
1136                         temp64 = readq(&bar0->mac_int_mask);
1137                         temp64 &= ~((u64) val64);
1138                         writeq(temp64, &bar0->mac_int_mask);
1139
1140                         val64 = readq(&bar0->mac_rmac_err_mask);
1141                         val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1142                         writeq(val64, &bar0->mac_rmac_err_mask);
1143                 } else if (flag == DISABLE_INTRS) {
1144                         /*
1145                          * Disable MAC Intrs in the general intr mask register
1146                          */
1147                         writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1148                         writeq(DISABLE_ALL_INTRS,
1149                                &bar0->mac_rmac_err_mask);
1150
1151                         temp64 = readq(&bar0->general_int_mask);
1152                         val64 |= temp64;
1153                         writeq(val64, &bar0->general_int_mask);
1154                 }
1155         }
1156
1157         /*  XGXS Interrupts */
1158         if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1159                 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1160                 if (flag == ENABLE_INTRS) {
1161                         temp64 = readq(&bar0->general_int_mask);
1162                         temp64 &= ~((u64) val64);
1163                         writeq(temp64, &bar0->general_int_mask);
1164                         /*
1165                          * All XGXS block error interrupts are disabled for now
1166                          * TODO
1167                          */
1168                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1169                 } else if (flag == DISABLE_INTRS) {
1170                         /*
1171                          * Disable MC Intrs in the general intr mask register
1172                          */
1173                         writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1174                         temp64 = readq(&bar0->general_int_mask);
1175                         val64 |= temp64;
1176                         writeq(val64, &bar0->general_int_mask);
1177                 }
1178         }
1179
1180         /*  Memory Controller(MC) interrupts */
1181         if (mask & MC_INTR) {
1182                 val64 = MC_INT_M;
1183                 if (flag == ENABLE_INTRS) {
1184                         temp64 = readq(&bar0->general_int_mask);
1185                         temp64 &= ~((u64) val64);
1186                         writeq(temp64, &bar0->general_int_mask);
1187                         /*
1188                          * All MC block error interrupts are disabled for now.
1189                          * TODO
1190                          */
1191                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1192                 } else if (flag == DISABLE_INTRS) {
1193                         /*
1194                          * Disable MC Intrs in the general intr mask register
1195                          */
1196                         writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1197                         temp64 = readq(&bar0->general_int_mask);
1198                         val64 |= temp64;
1199                         writeq(val64, &bar0->general_int_mask);
1200                 }
1201         }
1202
1203
1204         /*  Tx traffic interrupts */
1205         if (mask & TX_TRAFFIC_INTR) {
1206                 val64 = TXTRAFFIC_INT_M;
1207                 if (flag == ENABLE_INTRS) {
1208                         temp64 = readq(&bar0->general_int_mask);
1209                         temp64 &= ~((u64) val64);
1210                         writeq(temp64, &bar0->general_int_mask);
1211                         /*
1212                          * Enable all the Tx side interrupts
1213                          * writing 0 Enables all 64 TX interrupt levels
1214                          */
1215                         writeq(0x0, &bar0->tx_traffic_mask);
1216                 } else if (flag == DISABLE_INTRS) {
1217                         /*
1218                          * Disable Tx Traffic Intrs in the general intr mask
1219                          * register.
1220                          */
1221                         writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1222                         temp64 = readq(&bar0->general_int_mask);
1223                         val64 |= temp64;
1224                         writeq(val64, &bar0->general_int_mask);
1225                 }
1226         }
1227
1228         /*  Rx traffic interrupts */
1229         if (mask & RX_TRAFFIC_INTR) {
1230                 val64 = RXTRAFFIC_INT_M;
1231                 if (flag == ENABLE_INTRS) {
1232                         temp64 = readq(&bar0->general_int_mask);
1233                         temp64 &= ~((u64) val64);
1234                         writeq(temp64, &bar0->general_int_mask);
1235                         /* writing 0 Enables all 8 RX interrupt levels */
1236                         writeq(0x0, &bar0->rx_traffic_mask);
1237                 } else if (flag == DISABLE_INTRS) {
1238                         /*
1239                          * Disable Rx Traffic Intrs in the general intr mask
1240                          * register.
1241                          */
1242                         writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1243                         temp64 = readq(&bar0->general_int_mask);
1244                         val64 |= temp64;
1245                         writeq(val64, &bar0->general_int_mask);
1246                 }
1247         }
1248 }
1249
1250 static int check_prc_pcc_state(u64 val64, int flag)
1251 {
1252         int ret = 0;
1253
1254         if (flag == FALSE) {
1255                 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1256                     ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1257                      ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1258                         ret = 1;
1259                 }
1260         } else {
1261                 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1262                      ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1263                     (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1264                      ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1265                       ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1266                         ret = 1;
1267                 }
1268         }
1269
1270         return ret;
1271 }
1272 /**
1273  *  verify_xena_quiescence - Checks whether the H/W is ready
1274  *  @val64 :  Value read from adapter status register.
1275  *  @flag : indicates if the adapter enable bit was ever written once
1276  *  before.
1277  *  Description: Returns whether the H/W is ready to go or not. Depending
1278  *  on whether adapter enable bit was written or not the comparison
1279  *  differs and the calling function passes the input argument flag to
1280  *  indicate this.
1281  *  Return: 1 If xena is quiescence
1282  *          0 If Xena is not quiescence
1283  */
1284
1285 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1286 {
1287         int ret = 0;
1288         u64 tmp64 = ~((u64) val64);
1289
1290         if (!
1291             (tmp64 &
1292              (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1293               ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1294               ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1295               ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1296               ADAPTER_STATUS_P_PLL_LOCK))) {
1297                 ret = check_prc_pcc_state(val64, flag);
1298         }
1299
1300         return ret;
1301 }
1302
1303 /**
1304  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
1305  * @sp: Pointer to device specifc structure
1306  * Description :
1307  * New procedure to clear mac address reading  problems on Alpha platforms
1308  *
1309  */
1310
1311 void fix_mac_address(nic_t * sp)
1312 {
1313         XENA_dev_config_t __iomem *bar0 = sp->bar0;
1314         u64 val64;
1315         int i = 0;
1316
1317         while (fix_mac[i] != END_SIGN) {
1318                 writeq(fix_mac[i++], &bar0->gpio_control);
1319                 udelay(10);
1320                 val64 = readq(&bar0->gpio_control);
1321         }
1322 }
1323
1324 /**
1325  *  start_nic - Turns the device on
1326  *  @nic : device private variable.
1327  *  Description:
1328  *  This function actually turns the device on. Before this  function is
1329  *  called,all Registers are configured from their reset states
1330  *  and shared memory is allocated but the NIC is still quiescent. On
1331  *  calling this function, the device interrupts are cleared and the NIC is
1332  *  literally switched on by writing into the adapter control register.
1333  *  Return Value:
1334  *  SUCCESS on success and -1 on failure.
1335  */
1336
1337 static int start_nic(struct s2io_nic *nic)
1338 {
1339         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1340         struct net_device *dev = nic->dev;
1341         register u64 val64 = 0;
1342         u16 interruptible;
1343         u16 subid, i;
1344         mac_info_t *mac_control;
1345         struct config_param *config;
1346
1347         mac_control = &nic->mac_control;
1348         config = &nic->config;
1349
1350         /*  PRC Initialization and configuration */
1351         for (i = 0; i < config->rx_ring_num; i++) {
1352                 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1353                        &bar0->prc_rxd0_n[i]);
1354
1355                 val64 = readq(&bar0->prc_ctrl_n[i]);
1356 #ifndef CONFIG_2BUFF_MODE
1357                 val64 |= PRC_CTRL_RC_ENABLED;
1358 #else
1359                 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1360 #endif
1361                 writeq(val64, &bar0->prc_ctrl_n[i]);
1362         }
1363
1364 #ifdef CONFIG_2BUFF_MODE
1365         /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1366         val64 = readq(&bar0->rx_pa_cfg);
1367         val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1368         writeq(val64, &bar0->rx_pa_cfg);
1369 #endif
1370
1371         /*
1372          * Enabling MC-RLDRAM. After enabling the device, we timeout
1373          * for around 100ms, which is approximately the time required
1374          * for the device to be ready for operation.
1375          */
1376         val64 = readq(&bar0->mc_rldram_mrs);
1377         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1378         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1379         val64 = readq(&bar0->mc_rldram_mrs);
1380
1381         msleep(100);    /* Delay by around 100 ms. */
1382
1383         /* Enabling ECC Protection. */
1384         val64 = readq(&bar0->adapter_control);
1385         val64 &= ~ADAPTER_ECC_EN;
1386         writeq(val64, &bar0->adapter_control);
1387
1388         /*
1389          * Clearing any possible Link state change interrupts that
1390          * could have popped up just before Enabling the card.
1391          */
1392         val64 = readq(&bar0->mac_rmac_err_reg);
1393         if (val64)
1394                 writeq(val64, &bar0->mac_rmac_err_reg);
1395
1396         /*
1397          * Verify if the device is ready to be enabled, if so enable
1398          * it.
1399          */
1400         val64 = readq(&bar0->adapter_status);
1401         if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1402                 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1403                 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1404                           (unsigned long long) val64);
1405                 return FAILURE;
1406         }
1407
1408         /*  Enable select interrupts */
1409         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1410             RX_MAC_INTR;
1411         en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1412
1413         /*
1414          * With some switches, link might be already up at this point.
1415          * Because of this weird behavior, when we enable laser,
1416          * we may not get link. We need to handle this. We cannot
1417          * figure out which switch is misbehaving. So we are forced to
1418          * make a global change.
1419          */
1420
1421         /* Enabling Laser. */
1422         val64 = readq(&bar0->adapter_control);
1423         val64 |= ADAPTER_EOI_TX_ON;
1424         writeq(val64, &bar0->adapter_control);
1425
1426         /* SXE-002: Initialize link and activity LED */
1427         subid = nic->pdev->subsystem_device;
1428         if ((subid & 0xFF) >= 0x07) {
1429                 val64 = readq(&bar0->gpio_control);
1430                 val64 |= 0x0000800000000000ULL;
1431                 writeq(val64, &bar0->gpio_control);
1432                 val64 = 0x0411040400000000ULL;
1433                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1434         }
1435
1436         /*
1437          * Don't see link state interrupts on certain switches, so
1438          * directly scheduling a link state task from here.
1439          */
1440         schedule_work(&nic->set_link_task);
1441
1442         /*
1443          * Here we are performing soft reset on XGXS to
1444          * force link down. Since link is already up, we will get
1445          * link state change interrupt after this reset
1446          */
1447         SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1448         val64 = readq(&bar0->dtx_control);
1449         udelay(50);
1450         SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1451         val64 = readq(&bar0->dtx_control);
1452         udelay(50);
1453         SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1454         val64 = readq(&bar0->dtx_control);
1455         udelay(50);
1456
1457         return SUCCESS;
1458 }
1459
1460 /**
1461  *  free_tx_buffers - Free all queued Tx buffers
1462  *  @nic : device private variable.
1463  *  Description:
1464  *  Free all queued Tx buffers.
1465  *  Return Value: void
1466 */
1467
1468 static void free_tx_buffers(struct s2io_nic *nic)
1469 {
1470         struct net_device *dev = nic->dev;
1471         struct sk_buff *skb;
1472         TxD_t *txdp;
1473         int i, j;
1474         mac_info_t *mac_control;
1475         struct config_param *config;
1476         int cnt = 0;
1477
1478         mac_control = &nic->mac_control;
1479         config = &nic->config;
1480
1481         for (i = 0; i < config->tx_fifo_num; i++) {
1482                 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1483                         txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1484                             list_virt_addr;
1485                         skb =
1486                             (struct sk_buff *) ((unsigned long) txdp->
1487                                                 Host_Control);
1488                         if (skb == NULL) {
1489                                 memset(txdp, 0, sizeof(TxD_t));
1490                                 continue;
1491                         }
1492                         dev_kfree_skb(skb);
1493                         memset(txdp, 0, sizeof(TxD_t));
1494                         cnt++;
1495                 }
1496                 DBG_PRINT(INTR_DBG,
1497                           "%s:forcibly freeing %d skbs on FIFO%d\n",
1498                           dev->name, cnt, i);
1499                 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1500                 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1501         }
1502 }
1503
1504 /**
1505  *   stop_nic -  To stop the nic
1506  *   @nic ; device private variable.
1507  *   Description:
1508  *   This function does exactly the opposite of what the start_nic()
1509  *   function does. This function is called to stop the device.
1510  *   Return Value:
1511  *   void.
1512  */
1513
1514 static void stop_nic(struct s2io_nic *nic)
1515 {
1516         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1517         register u64 val64 = 0;
1518         u16 interruptible, i;
1519         mac_info_t *mac_control;
1520         struct config_param *config;
1521
1522         mac_control = &nic->mac_control;
1523         config = &nic->config;
1524
1525         /*  Disable all interrupts */
1526         interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1527             RX_MAC_INTR;
1528         en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1529
1530         /*  Disable PRCs */
1531         for (i = 0; i < config->rx_ring_num; i++) {
1532                 val64 = readq(&bar0->prc_ctrl_n[i]);
1533                 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1534                 writeq(val64, &bar0->prc_ctrl_n[i]);
1535         }
1536 }
1537
1538 /**
1539  *  fill_rx_buffers - Allocates the Rx side skbs
1540  *  @nic:  device private variable
1541  *  @ring_no: ring number
1542  *  Description:
1543  *  The function allocates Rx side skbs and puts the physical
1544  *  address of these buffers into the RxD buffer pointers, so that the NIC
1545  *  can DMA the received frame into these locations.
1546  *  The NIC supports 3 receive modes, viz
1547  *  1. single buffer,
1548  *  2. three buffer and
1549  *  3. Five buffer modes.
1550  *  Each mode defines how many fragments the received frame will be split
1551  *  up into by the NIC. The frame is split into L3 header, L4 Header,
1552  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1553  *  is split into 3 fragments. As of now only single buffer mode is
1554  *  supported.
1555  *   Return Value:
1556  *  SUCCESS on success or an appropriate -ve value on failure.
1557  */
1558
1559 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1560 {
1561         struct net_device *dev = nic->dev;
1562         struct sk_buff *skb;
1563         RxD_t *rxdp;
1564         int off, off1, size, block_no, block_no1;
1565         int offset, offset1;
1566         u32 alloc_tab = 0;
1567         u32 alloc_cnt;
1568         mac_info_t *mac_control;
1569         struct config_param *config;
1570 #ifdef CONFIG_2BUFF_MODE
1571         RxD_t *rxdpnext;
1572         int nextblk;
1573         u64 tmp;
1574         buffAdd_t *ba;
1575         dma_addr_t rxdpphys;
1576 #endif
1577 #ifndef CONFIG_S2IO_NAPI
1578         unsigned long flags;
1579 #endif
1580
1581         mac_control = &nic->mac_control;
1582         config = &nic->config;
1583         alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1584             atomic_read(&nic->rx_bufs_left[ring_no]);
1585         size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1586             HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1587
1588         while (alloc_tab < alloc_cnt) {
1589                 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1590                     block_index;
1591                 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1592                     block_index;
1593                 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1594                 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1595 #ifndef CONFIG_2BUFF_MODE
1596                 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1597                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1598 #else
1599                 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1600                 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1601 #endif
1602
1603                 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1604                     block_virt_addr + off;
1605                 if ((offset == offset1) && (rxdp->Host_Control)) {
1606                         DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1607                         DBG_PRINT(INTR_DBG, " info equated\n");
1608                         goto end;
1609                 }
1610 #ifndef CONFIG_2BUFF_MODE
1611                 if (rxdp->Control_1 == END_OF_BLOCK) {
1612                         mac_control->rings[ring_no].rx_curr_put_info.
1613                             block_index++;
1614                         mac_control->rings[ring_no].rx_curr_put_info.
1615                             block_index %= mac_control->rings[ring_no].block_count;
1616                         block_no = mac_control->rings[ring_no].rx_curr_put_info.
1617                                 block_index;
1618                         off++;
1619                         off %= (MAX_RXDS_PER_BLOCK + 1);
1620                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1621                             off;
1622                         rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1623                         DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1624                                   dev->name, rxdp);
1625                 }
1626 #ifndef CONFIG_S2IO_NAPI
1627                 spin_lock_irqsave(&nic->put_lock, flags);
1628                 mac_control->rings[ring_no].put_pos =
1629                     (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1630                 spin_unlock_irqrestore(&nic->put_lock, flags);
1631 #endif
1632 #else
1633                 if (rxdp->Host_Control == END_OF_BLOCK) {
1634                         mac_control->rings[ring_no].rx_curr_put_info.
1635                             block_index++;
1636                         mac_control->rings[ring_no].rx_curr_put_info.block_index
1637                             %= mac_control->rings[ring_no].block_count;
1638                         block_no = mac_control->rings[ring_no].rx_curr_put_info
1639                             .block_index;
1640                         off = 0;
1641                         DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1642                                   dev->name, block_no,
1643                                   (unsigned long long) rxdp->Control_1);
1644                         mac_control->rings[ring_no].rx_curr_put_info.offset =
1645                             off;
1646                         rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1647                             block_virt_addr;
1648                 }
1649 #ifndef CONFIG_S2IO_NAPI
1650                 spin_lock_irqsave(&nic->put_lock, flags);
1651                 mac_control->rings[ring_no].put_pos = (block_no *
1652                                          (MAX_RXDS_PER_BLOCK + 1)) + off;
1653                 spin_unlock_irqrestore(&nic->put_lock, flags);
1654 #endif
1655 #endif
1656
1657 #ifndef CONFIG_2BUFF_MODE
1658                 if (rxdp->Control_1 & RXD_OWN_XENA)
1659 #else
1660                 if (rxdp->Control_2 & BIT(0))
1661 #endif
1662                 {
1663                         mac_control->rings[ring_no].rx_curr_put_info.
1664                             offset = off;
1665                         goto end;
1666                 }
1667 #ifdef  CONFIG_2BUFF_MODE
1668                 /*
1669                  * RxDs Spanning cache lines will be replenished only
1670                  * if the succeeding RxD is also owned by Host. It
1671                  * will always be the ((8*i)+3) and ((8*i)+6)
1672                  * descriptors for the 48 byte descriptor. The offending
1673                  * decsriptor is of-course the 3rd descriptor.
1674                  */
1675                 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1676                     block_dma_addr + (off * sizeof(RxD_t));
1677                 if (((u64) (rxdpphys)) % 128 > 80) {
1678                         rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1679                             block_virt_addr + (off + 1);
1680                         if (rxdpnext->Host_Control == END_OF_BLOCK) {
1681                                 nextblk = (block_no + 1) %
1682                                     (mac_control->rings[ring_no].block_count);
1683                                 rxdpnext = mac_control->rings[ring_no].rx_blocks
1684                                     [nextblk].block_virt_addr;
1685                         }
1686                         if (rxdpnext->Control_2 & BIT(0))
1687                                 goto end;
1688                 }
1689 #endif
1690
1691 #ifndef CONFIG_2BUFF_MODE
1692                 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1693 #else
1694                 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1695 #endif
1696                 if (!skb) {
1697                         DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1698                         DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1699                         return -ENOMEM;
1700                 }
1701 #ifndef CONFIG_2BUFF_MODE
1702                 skb_reserve(skb, NET_IP_ALIGN);
1703                 memset(rxdp, 0, sizeof(RxD_t));
1704                 rxdp->Buffer0_ptr = pci_map_single
1705                     (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1706                 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1707                 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1708                 rxdp->Host_Control = (unsigned long) (skb);
1709                 rxdp->Control_1 |= RXD_OWN_XENA;
1710                 off++;
1711                 off %= (MAX_RXDS_PER_BLOCK + 1);
1712                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1713 #else
1714                 ba = &mac_control->rings[ring_no].ba[block_no][off];
1715                 skb_reserve(skb, BUF0_LEN);
1716                 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1717                 if (tmp)
1718                         skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1719
1720                 memset(rxdp, 0, sizeof(RxD_t));
1721                 rxdp->Buffer2_ptr = pci_map_single
1722                     (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1723                      PCI_DMA_FROMDEVICE);
1724                 rxdp->Buffer0_ptr =
1725                     pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1726                                    PCI_DMA_FROMDEVICE);
1727                 rxdp->Buffer1_ptr =
1728                     pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1729                                    PCI_DMA_FROMDEVICE);
1730
1731                 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1732                 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1733                 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1734                 rxdp->Control_2 |= BIT(0);      /* Set Buffer_Empty bit. */
1735                 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1736                 rxdp->Control_1 |= RXD_OWN_XENA;
1737                 off++;
1738                 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1739 #endif
1740
1741                 atomic_inc(&nic->rx_bufs_left[ring_no]);
1742                 alloc_tab++;
1743         }
1744
1745       end:
1746         return SUCCESS;
1747 }
1748
1749 /**
1750  *  free_rx_buffers - Frees all Rx buffers
1751  *  @sp: device private variable.
1752  *  Description:
1753  *  This function will free all Rx buffers allocated by host.
1754  *  Return Value:
1755  *  NONE.
1756  */
1757
1758 static void free_rx_buffers(struct s2io_nic *sp)
1759 {
1760         struct net_device *dev = sp->dev;
1761         int i, j, blk = 0, off, buf_cnt = 0;
1762         RxD_t *rxdp;
1763         struct sk_buff *skb;
1764         mac_info_t *mac_control;
1765         struct config_param *config;
1766 #ifdef CONFIG_2BUFF_MODE
1767         buffAdd_t *ba;
1768 #endif
1769
1770         mac_control = &sp->mac_control;
1771         config = &sp->config;
1772
1773         for (i = 0; i < config->rx_ring_num; i++) {
1774                 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1775                         off = j % (MAX_RXDS_PER_BLOCK + 1);
1776                         rxdp = mac_control->rings[i].rx_blocks[blk].
1777                                 block_virt_addr + off;
1778
1779 #ifndef CONFIG_2BUFF_MODE
1780                         if (rxdp->Control_1 == END_OF_BLOCK) {
1781                                 rxdp =
1782                                     (RxD_t *) ((unsigned long) rxdp->
1783                                                Control_2);
1784                                 j++;
1785                                 blk++;
1786                         }
1787 #else
1788                         if (rxdp->Host_Control == END_OF_BLOCK) {
1789                                 blk++;
1790                                 continue;
1791                         }
1792 #endif
1793
1794                         if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1795                                 memset(rxdp, 0, sizeof(RxD_t));
1796                                 continue;
1797                         }
1798
1799                         skb =
1800                             (struct sk_buff *) ((unsigned long) rxdp->
1801                                                 Host_Control);
1802                         if (skb) {
1803 #ifndef CONFIG_2BUFF_MODE
1804                                 pci_unmap_single(sp->pdev, (dma_addr_t)
1805                                                  rxdp->Buffer0_ptr,
1806                                                  dev->mtu +
1807                                                  HEADER_ETHERNET_II_802_3_SIZE
1808                                                  + HEADER_802_2_SIZE +
1809                                                  HEADER_SNAP_SIZE,
1810                                                  PCI_DMA_FROMDEVICE);
1811 #else
1812                                 ba = &mac_control->rings[i].ba[blk][off];
1813                                 pci_unmap_single(sp->pdev, (dma_addr_t)
1814                                                  rxdp->Buffer0_ptr,
1815                                                  BUF0_LEN,
1816                                                  PCI_DMA_FROMDEVICE);
1817                                 pci_unmap_single(sp->pdev, (dma_addr_t)
1818                                                  rxdp->Buffer1_ptr,
1819                                                  BUF1_LEN,
1820                                                  PCI_DMA_FROMDEVICE);
1821                                 pci_unmap_single(sp->pdev, (dma_addr_t)
1822                                                  rxdp->Buffer2_ptr,
1823                                                  dev->mtu + BUF0_LEN + 4,
1824                                                  PCI_DMA_FROMDEVICE);
1825 #endif
1826                                 dev_kfree_skb(skb);
1827                                 atomic_dec(&sp->rx_bufs_left[i]);
1828                                 buf_cnt++;
1829                         }
1830                         memset(rxdp, 0, sizeof(RxD_t));
1831                 }
1832                 mac_control->rings[i].rx_curr_put_info.block_index = 0;
1833                 mac_control->rings[i].rx_curr_get_info.block_index = 0;
1834                 mac_control->rings[i].rx_curr_put_info.offset = 0;
1835                 mac_control->rings[i].rx_curr_get_info.offset = 0;
1836                 atomic_set(&sp->rx_bufs_left[i], 0);
1837                 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1838                           dev->name, buf_cnt, i);
1839         }
1840 }
1841
1842 /**
1843  * s2io_poll - Rx interrupt handler for NAPI support
1844  * @dev : pointer to the device structure.
1845  * @budget : The number of packets that were budgeted to be processed
1846  * during  one pass through the 'Poll" function.
1847  * Description:
1848  * Comes into picture only if NAPI support has been incorporated. It does
1849  * the same thing that rx_intr_handler does, but not in a interrupt context
1850  * also It will process only a given number of packets.
1851  * Return value:
1852  * 0 on success and 1 if there are No Rx packets to be processed.
1853  */
1854
1855 #if defined(CONFIG_S2IO_NAPI)
1856 static int s2io_poll(struct net_device *dev, int *budget)
1857 {
1858         nic_t *nic = dev->priv;
1859         int pkt_cnt = 0, org_pkts_to_process;
1860         mac_info_t *mac_control;
1861         struct config_param *config;
1862         XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
1863         u64 val64;
1864         int i;
1865
1866         mac_control = &nic->mac_control;
1867         config = &nic->config;
1868
1869         nic->pkts_to_process = *budget;
1870         if (nic->pkts_to_process > dev->quota)
1871                 nic->pkts_to_process = dev->quota;
1872         org_pkts_to_process = nic->pkts_to_process;
1873
1874         val64 = readq(&bar0->rx_traffic_int);
1875         writeq(val64, &bar0->rx_traffic_int);
1876
1877         for (i = 0; i < config->rx_ring_num; i++) {
1878                 rx_intr_handler(&mac_control->rings[i]);
1879                 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
1880                 if (!nic->pkts_to_process) {
1881                         /* Quota for the current iteration has been met */
1882                         goto no_rx;
1883                 }
1884         }
1885         if (!pkt_cnt)
1886                 pkt_cnt = 1;
1887
1888         dev->quota -= pkt_cnt;
1889         *budget -= pkt_cnt;
1890         netif_rx_complete(dev);
1891
1892         for (i = 0; i < config->rx_ring_num; i++) {
1893                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
1894                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
1895                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
1896                         break;
1897                 }
1898         }
1899         /* Re enable the Rx interrupts. */
1900         en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
1901         return 0;
1902
1903 no_rx:
1904         dev->quota -= pkt_cnt;
1905         *budget -= pkt_cnt;
1906
1907         for (i = 0; i < config->rx_ring_num; i++) {
1908                 if (fill_rx_buffers(nic, i) == -ENOMEM) {
1909                         DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
1910                         DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
1911                         break;
1912                 }
1913         }
1914         return 1;
1915 }
1916 #endif
1917
1918 /**
1919  *  rx_intr_handler - Rx interrupt handler
1920  *  @nic: device private variable.
1921  *  Description:
1922  *  If the interrupt is because of a received frame or if the
1923  *  receive ring contains fresh as yet un-processed frames,this function is
1924  *  called. It picks out the RxD at which place the last Rx processing had
1925  *  stopped and sends the skb to the OSM's Rx handler and then increments
1926  *  the offset.
1927  *  Return Value:
1928  *  NONE.
1929  */
1930 static void rx_intr_handler(ring_info_t *ring_data)
1931 {
1932         nic_t *nic = ring_data->nic;
1933         struct net_device *dev = (struct net_device *) nic->dev;
1934         XENA_dev_config_t __iomem *bar0 = nic->bar0;
1935         int get_block, get_offset, put_block, put_offset, ring_bufs;
1936         rx_curr_get_info_t get_info, put_info;
1937         RxD_t *rxdp;
1938         struct sk_buff *skb;
1939 #ifndef CONFIG_S2IO_NAPI
1940         int pkt_cnt = 0;
1941 #endif
1942         register u64 val64;
1943
1944         /*
1945          * rx_traffic_int reg is an R1 register, hence we read and write
1946          * back the same value in the register to clear it
1947          */
1948         val64 = readq(&bar0->tx_traffic_int);
1949         writeq(val64, &bar0->tx_traffic_int);
1950
1951         get_info = ring_data->rx_curr_get_info;
1952         get_block = get_info.block_index;
1953         put_info = ring_data->rx_curr_put_info;
1954         put_block = put_info.block_index;
1955         ring_bufs = get_info.ring_len+1;
1956         rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
1957                     get_info.offset;
1958         get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1959                 get_info.offset;
1960 #ifndef CONFIG_S2IO_NAPI
1961         spin_lock(&nic->put_lock);
1962         put_offset = ring_data->put_pos;
1963         spin_unlock(&nic->put_lock);
1964 #else
1965         put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1966                 put_info.offset;
1967 #endif
1968         while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1969 #ifdef CONFIG_2BUFF_MODE
1970                 (!rxdp->Control_2 & BIT(0)) &&
1971 #endif
1972                 (((get_offset + 1) % ring_bufs) != put_offset)) {
1973                 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
1974                 if (skb == NULL) {
1975                         DBG_PRINT(ERR_DBG, "%s: The skb is ",
1976                                   dev->name);
1977                         DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1978                         return;
1979                 }
1980 #ifndef CONFIG_2BUFF_MODE
1981                 pci_unmap_single(nic->pdev, (dma_addr_t)
1982                                  rxdp->Buffer0_ptr,
1983                                  dev->mtu +
1984                                  HEADER_ETHERNET_II_802_3_SIZE +
1985                                  HEADER_802_2_SIZE +
1986                                  HEADER_SNAP_SIZE,
1987                                  PCI_DMA_FROMDEVICE);
1988 #else
1989                 pci_unmap_single(nic->pdev, (dma_addr_t)
1990                                  rxdp->Buffer0_ptr,
1991                                  BUF0_LEN, PCI_DMA_FROMDEVICE);
1992                 pci_unmap_single(nic->pdev, (dma_addr_t)
1993                                  rxdp->Buffer1_ptr,
1994                                  BUF1_LEN, PCI_DMA_FROMDEVICE);
1995                 pci_unmap_single(nic->pdev, (dma_addr_t)
1996                                  rxdp->Buffer2_ptr,
1997                                  dev->mtu + BUF0_LEN + 4,
1998                                  PCI_DMA_FROMDEVICE);
1999 #endif
2000                 rx_osm_handler(ring_data, rxdp);
2001                 get_info.offset++;
2002                 ring_data->rx_curr_get_info.offset =
2003                     get_info.offset;
2004                 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2005                     get_info.offset;
2006                 if (get_info.offset &&
2007                     (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2008                         get_info.offset = 0;
2009                         ring_data->rx_curr_get_info.offset
2010                             = get_info.offset;
2011                         get_block++;
2012                         get_block %= ring_data->block_count;
2013                         ring_data->rx_curr_get_info.block_index
2014                             = get_block;
2015                         rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2016                 }
2017
2018                 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2019                             get_info.offset;
2020 #ifdef CONFIG_S2IO_NAPI
2021                 nic->pkts_to_process -= 1;
2022                 if (!nic->pkts_to_process)
2023                         break;
2024 #else
2025                 pkt_cnt++;
2026                 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2027                         break;
2028 #endif
2029         }
2030 }
2031
2032 /**
2033  *  tx_intr_handler - Transmit interrupt handler
2034  *  @nic : device private variable
2035  *  Description:
2036  *  If an interrupt was raised to indicate DMA complete of the
2037  *  Tx packet, this function is called. It identifies the last TxD
2038  *  whose buffer was freed and frees all skbs whose data have already
2039  *  DMA'ed into the NICs internal memory.
2040  *  Return Value:
2041  *  NONE
2042  */
2043
2044 static void tx_intr_handler(fifo_info_t *fifo_data)
2045 {
2046         nic_t *nic = fifo_data->nic;
2047         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2048         struct net_device *dev = (struct net_device *) nic->dev;
2049         tx_curr_get_info_t get_info, put_info;
2050         struct sk_buff *skb;
2051         TxD_t *txdlp;
2052         u16 j, frg_cnt;
2053         register u64 val64 = 0;
2054
2055         /*
2056          * tx_traffic_int reg is an R1 register, hence we read and write
2057          * back the same value in the register to clear it
2058          */
2059         val64 = readq(&bar0->tx_traffic_int);
2060         writeq(val64, &bar0->tx_traffic_int);
2061
2062         get_info = fifo_data->tx_curr_get_info;
2063         put_info = fifo_data->tx_curr_put_info;
2064         txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2065             list_virt_addr;
2066         while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2067                (get_info.offset != put_info.offset) &&
2068                (txdlp->Host_Control)) {
2069                 /* Check for TxD errors */
2070                 if (txdlp->Control_1 & TXD_T_CODE) {
2071                         unsigned long long err;
2072                         err = txdlp->Control_1 & TXD_T_CODE;
2073                         DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2074                                   err);
2075                 }
2076
2077                 skb = (struct sk_buff *) ((unsigned long)
2078                                 txdlp->Host_Control);
2079                 if (skb == NULL) {
2080                         DBG_PRINT(ERR_DBG, "%s: Null skb ",
2081                         __FUNCTION__);
2082                         DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2083                         return;
2084                 }
2085
2086                 frg_cnt = skb_shinfo(skb)->nr_frags;
2087                 nic->tx_pkt_count++;
2088
2089                 pci_unmap_single(nic->pdev, (dma_addr_t)
2090                                  txdlp->Buffer_Pointer,
2091                                  skb->len - skb->data_len,
2092                                  PCI_DMA_TODEVICE);
2093                 if (frg_cnt) {
2094                         TxD_t *temp;
2095                         temp = txdlp;
2096                         txdlp++;
2097                         for (j = 0; j < frg_cnt; j++, txdlp++) {
2098                                 skb_frag_t *frag =
2099                                     &skb_shinfo(skb)->frags[j];
2100                                 pci_unmap_page(nic->pdev,
2101                                                (dma_addr_t)
2102                                                txdlp->
2103                                                Buffer_Pointer,
2104                                                frag->size,
2105                                                PCI_DMA_TODEVICE);
2106                         }
2107                         txdlp = temp;
2108                 }
2109                 memset(txdlp, 0,
2110                        (sizeof(TxD_t) * fifo_data->max_txds));
2111
2112                 /* Updating the statistics block */
2113                 nic->stats.tx_packets++;
2114                 nic->stats.tx_bytes += skb->len;
2115                 dev_kfree_skb_irq(skb);
2116
2117                 get_info.offset++;
2118                 get_info.offset %= get_info.fifo_len + 1;
2119                 txdlp = (TxD_t *) fifo_data->list_info
2120                     [get_info.offset].list_virt_addr;
2121                 fifo_data->tx_curr_get_info.offset =
2122                     get_info.offset;
2123         }
2124
2125         spin_lock(&nic->tx_lock);
2126         if (netif_queue_stopped(dev))
2127                 netif_wake_queue(dev);
2128         spin_unlock(&nic->tx_lock);
2129 }
2130
2131 /**
2132  *  alarm_intr_handler - Alarm Interrrupt handler
2133  *  @nic: device private variable
2134  *  Description: If the interrupt was neither because of Rx packet or Tx
2135  *  complete, this function is called. If the interrupt was to indicate
2136  *  a loss of link, the OSM link status handler is invoked for any other
2137  *  alarm interrupt the block that raised the interrupt is displayed
2138  *  and a H/W reset is issued.
2139  *  Return Value:
2140  *  NONE
2141 */
2142
2143 static void alarm_intr_handler(struct s2io_nic *nic)
2144 {
2145         struct net_device *dev = (struct net_device *) nic->dev;
2146         XENA_dev_config_t __iomem *bar0 = nic->bar0;
2147         register u64 val64 = 0, err_reg = 0;
2148
2149         /* Handling link status change error Intr */
2150         err_reg = readq(&bar0->mac_rmac_err_reg);
2151         writeq(err_reg, &bar0->mac_rmac_err_reg);
2152         if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2153                 schedule_work(&nic->set_link_task);
2154         }
2155
2156         /* In case of a serious error, the device will be Reset. */
2157         val64 = readq(&bar0->serr_source);
2158         if (val64 & SERR_SOURCE_ANY) {
2159                 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2160                 DBG_PRINT(ERR_DBG, "serious error!!\n");
2161                 netif_stop_queue(dev);
2162                 schedule_work(&nic->rst_timer_task);
2163         }
2164
2165         /*
2166          * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2167          * Error occurs, the adapter will be recycled by disabling the
2168          * adapter enable bit and enabling it again after the device
2169          * becomes Quiescent.
2170          */
2171         val64 = readq(&bar0->pcc_err_reg);
2172         writeq(val64, &bar0->pcc_err_reg);
2173         if (val64 & PCC_FB_ECC_DB_ERR) {
2174                 u64 ac = readq(&bar0->adapter_control);
2175                 ac &= ~(ADAPTER_CNTL_EN);
2176                 writeq(ac, &bar0->adapter_control);
2177                 ac = readq(&bar0->adapter_control);
2178                 schedule_work(&nic->set_link_task);
2179         }
2180
2181         /* Other type of interrupts are not being handled now,  TODO */
2182 }
2183
2184 /**
2185  *  wait_for_cmd_complete - waits for a command to complete.
2186  *  @sp : private member of the device structure, which is a pointer to the
2187  *  s2io_nic structure.
2188  *  Description: Function that waits for a command to Write into RMAC
2189  *  ADDR DATA registers to be completed and returns either success or
2190  *  error depending on whether the command was complete or not.
2191  *  Return value:
2192  *   SUCCESS on success and FAILURE on failure.
2193  */
2194
2195 int wait_for_cmd_complete(nic_t * sp)
2196 {
2197         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2198         int ret = FAILURE, cnt = 0;
2199         u64 val64;
2200
2201         while (TRUE) {
2202                 val64 = readq(&bar0->rmac_addr_cmd_mem);
2203                 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2204                         ret = SUCCESS;
2205                         break;
2206                 }
2207                 msleep(50);
2208                 if (cnt++ > 10)
2209                         break;
2210         }
2211
2212         return ret;
2213 }
2214
2215 /**
2216  *  s2io_reset - Resets the card.
2217  *  @sp : private member of the device structure.
2218  *  Description: Function to Reset the card. This function then also
2219  *  restores the previously saved PCI configuration space registers as
2220  *  the card reset also resets the configuration space.
2221  *  Return value:
2222  *  void.
2223  */
2224
2225 void s2io_reset(nic_t * sp)
2226 {
2227         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2228         u64 val64;
2229         u16 subid;
2230
2231         val64 = SW_RESET_ALL;
2232         writeq(val64, &bar0->sw_reset);
2233
2234         /*
2235          * At this stage, if the PCI write is indeed completed, the
2236          * card is reset and so is the PCI Config space of the device.
2237          * So a read cannot be issued at this stage on any of the
2238          * registers to ensure the write into "sw_reset" register
2239          * has gone through.
2240          * Question: Is there any system call that will explicitly force
2241          * all the write commands still pending on the bus to be pushed
2242          * through?
2243          * As of now I'am just giving a 250ms delay and hoping that the
2244          * PCI write to sw_reset register is done by this time.
2245          */
2246         msleep(250);
2247
2248         /* Restore the PCI state saved during initializarion. */
2249         pci_restore_state(sp->pdev);
2250
2251         s2io_init_pci(sp);
2252
2253         msleep(250);
2254
2255         /* Set swapper to enable I/O register access */
2256         s2io_set_swapper(sp);
2257
2258         /* Reset device statistics maintained by OS */
2259         memset(&sp->stats, 0, sizeof (struct net_device_stats));
2260
2261         /* SXE-002: Configure link and activity LED to turn it off */
2262         subid = sp->pdev->subsystem_device;
2263         if ((subid & 0xFF) >= 0x07) {
2264                 val64 = readq(&bar0->gpio_control);
2265                 val64 |= 0x0000800000000000ULL;
2266                 writeq(val64, &bar0->gpio_control);
2267                 val64 = 0x0411040400000000ULL;
2268                 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2269         }
2270
2271         sp->device_enabled_once = FALSE;
2272 }
2273
2274 /**
2275  *  s2io_set_swapper - to set the swapper controle on the card
2276  *  @sp : private member of the device structure,
2277  *  pointer to the s2io_nic structure.
2278  *  Description: Function to set the swapper control on the card
2279  *  correctly depending on the 'endianness' of the system.
2280  *  Return value:
2281  *  SUCCESS on success and FAILURE on failure.
2282  */
2283
2284 int s2io_set_swapper(nic_t * sp)
2285 {
2286         struct net_device *dev = sp->dev;
2287         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2288         u64 val64, valt, valr;
2289
2290         /*
2291          * Set proper endian settings and verify the same by reading
2292          * the PIF Feed-back register.
2293          */
2294
2295         val64 = readq(&bar0->pif_rd_swapper_fb);
2296         if (val64 != 0x0123456789ABCDEFULL) {
2297                 int i = 0;
2298                 u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
2299                                 0x8100008181000081ULL,  /* FE=1, SE=0 */
2300                                 0x4200004242000042ULL,  /* FE=0, SE=1 */
2301                                 0};                     /* FE=0, SE=0 */
2302
2303                 while(i<4) {
2304                         writeq(value[i], &bar0->swapper_ctrl);
2305                         val64 = readq(&bar0->pif_rd_swapper_fb);
2306                         if (val64 == 0x0123456789ABCDEFULL)
2307                                 break;
2308                         i++;
2309                 }
2310                 if (i == 4) {
2311                         DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2312                                 dev->name);
2313                         DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2314                                 (unsigned long long) val64);
2315                         return FAILURE;
2316                 }
2317                 valr = value[i];
2318         } else {
2319                 valr = readq(&bar0->swapper_ctrl);
2320         }
2321
2322         valt = 0x0123456789ABCDEFULL;
2323         writeq(valt, &bar0->xmsi_address);
2324         val64 = readq(&bar0->xmsi_address);
2325
2326         if(val64 != valt) {
2327                 int i = 0;
2328                 u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
2329                                 0x0081810000818100ULL,  /* FE=1, SE=0 */
2330                                 0x0042420000424200ULL,  /* FE=0, SE=1 */
2331                                 0};                     /* FE=0, SE=0 */
2332
2333                 while(i<4) {
2334                         writeq((value[i] | valr), &bar0->swapper_ctrl);
2335                         writeq(valt, &bar0->xmsi_address);
2336                         val64 = readq(&bar0->xmsi_address);
2337                         if(val64 == valt)
2338                                 break;
2339                         i++;
2340                 }
2341                 if(i == 4) {
2342                         unsigned long long x = val64;
2343                         DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2344                         DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2345                         return FAILURE;
2346                 }
2347         }
2348         val64 = readq(&bar0->swapper_ctrl);
2349         val64 &= 0xFFFF000000000000ULL;
2350
2351 #ifdef  __BIG_ENDIAN
2352         /*
2353          * The device by default set to a big endian format, so a
2354          * big endian driver need not set anything.
2355          */
2356         val64 |= (SWAPPER_CTRL_TXP_FE |
2357                  SWAPPER_CTRL_TXP_SE |
2358                  SWAPPER_CTRL_TXD_R_FE |
2359                  SWAPPER_CTRL_TXD_W_FE |
2360                  SWAPPER_CTRL_TXF_R_FE |
2361                  SWAPPER_CTRL_RXD_R_FE |
2362                  SWAPPER_CTRL_RXD_W_FE |
2363                  SWAPPER_CTRL_RXF_W_FE |
2364                  SWAPPER_CTRL_XMSI_FE |
2365                  SWAPPER_CTRL_XMSI_SE |
2366                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2367         writeq(val64, &bar0->swapper_ctrl);
2368 #else
2369         /*
2370          * Initially we enable all bits to make it accessible by the
2371          * driver, then we selectively enable only those bits that
2372          * we want to set.
2373          */
2374         val64 |= (SWAPPER_CTRL_TXP_FE |
2375                  SWAPPER_CTRL_TXP_SE |
2376                  SWAPPER_CTRL_TXD_R_FE |
2377                  SWAPPER_CTRL_TXD_R_SE |
2378                  SWAPPER_CTRL_TXD_W_FE |
2379                  SWAPPER_CTRL_TXD_W_SE |
2380                  SWAPPER_CTRL_TXF_R_FE |
2381                  SWAPPER_CTRL_RXD_R_FE |
2382                  SWAPPER_CTRL_RXD_R_SE |
2383                  SWAPPER_CTRL_RXD_W_FE |
2384                  SWAPPER_CTRL_RXD_W_SE |
2385                  SWAPPER_CTRL_RXF_W_FE |
2386                  SWAPPER_CTRL_XMSI_FE |
2387                  SWAPPER_CTRL_XMSI_SE |
2388                  SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2389         writeq(val64, &bar0->swapper_ctrl);
2390 #endif
2391         val64 = readq(&bar0->swapper_ctrl);
2392
2393         /*
2394          * Verifying if endian settings are accurate by reading a
2395          * feedback register.
2396          */
2397         val64 = readq(&bar0->pif_rd_swapper_fb);
2398         if (val64 != 0x0123456789ABCDEFULL) {
2399                 /* Endian settings are incorrect, calls for another dekko. */
2400                 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2401                           dev->name);
2402                 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2403                           (unsigned long long) val64);
2404                 return FAILURE;
2405         }
2406
2407         return SUCCESS;
2408 }
2409
2410 /* ********************************************************* *
2411  * Functions defined below concern the OS part of the driver *
2412  * ********************************************************* */
2413
2414 /**
2415  *  s2io_open - open entry point of the driver
2416  *  @dev : pointer to the device structure.
2417  *  Description:
2418  *  This function is the open entry point of the driver. It mainly calls a
2419  *  function to allocate Rx buffers and inserts them into the buffer
2420  *  descriptors and then enables the Rx part of the NIC.
2421  *  Return value:
2422  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2423  *   file on failure.
2424  */
2425
2426 int s2io_open(struct net_device *dev)
2427 {
2428         nic_t *sp = dev->priv;
2429         int err = 0;
2430
2431         /*
2432          * Make sure you have link off by default every time
2433          * Nic is initialized
2434          */
2435         netif_carrier_off(dev);
2436         sp->last_link_state = LINK_DOWN;
2437
2438         /* Initialize H/W and enable interrupts */
2439         if (s2io_card_up(sp)) {
2440                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2441                           dev->name);
2442                 err = -ENODEV;
2443                 goto hw_init_failed;
2444         }
2445
2446         /* After proper initialization of H/W, register ISR */
2447         err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2448                           sp->name, dev);
2449         if (err) {
2450                 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2451                           dev->name);
2452                 goto isr_registration_failed;
2453         }
2454
2455         if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2456                 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2457                 err = -ENODEV;
2458                 goto setting_mac_address_failed;
2459         }
2460
2461         netif_start_queue(dev);
2462         return 0;
2463
2464 setting_mac_address_failed:
2465         free_irq(sp->pdev->irq, dev);
2466 isr_registration_failed:
2467         s2io_reset(sp);
2468 hw_init_failed:
2469         return err;
2470 }
2471
2472 /**
2473  *  s2io_close -close entry point of the driver
2474  *  @dev : device pointer.
2475  *  Description:
2476  *  This is the stop entry point of the driver. It needs to undo exactly
2477  *  whatever was done by the open entry point,thus it's usually referred to
2478  *  as the close function.Among other things this function mainly stops the
2479  *  Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2480  *  Return value:
2481  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2482  *  file on failure.
2483  */
2484
2485 int s2io_close(struct net_device *dev)
2486 {
2487         nic_t *sp = dev->priv;
2488         flush_scheduled_work();
2489         netif_stop_queue(dev);
2490         /* Reset card, kill tasklet and free Tx and Rx buffers. */
2491         s2io_card_down(sp);
2492
2493         free_irq(sp->pdev->irq, dev);
2494         sp->device_close_flag = TRUE;   /* Device is shut down. */
2495         return 0;
2496 }
2497
2498 /**
2499  *  s2io_xmit - Tx entry point of te driver
2500  *  @skb : the socket buffer containing the Tx data.
2501  *  @dev : device pointer.
2502  *  Description :
2503  *  This function is the Tx entry point of the driver. S2IO NIC supports
2504  *  certain protocol assist features on Tx side, namely  CSO, S/G, LSO.
2505  *  NOTE: when device cant queue the pkt,just the trans_start variable will
2506  *  not be upadted.
2507  *  Return value:
2508  *  0 on success & 1 on failure.
2509  */
2510
2511 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2512 {
2513         nic_t *sp = dev->priv;
2514         u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2515         register u64 val64;
2516         TxD_t *txdp;
2517         TxFIFO_element_t __iomem *tx_fifo;
2518         unsigned long flags;
2519 #ifdef NETIF_F_TSO
2520         int mss;
2521 #endif
2522         mac_info_t *mac_control;
2523         struct config_param *config;
2524         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2525
2526         mac_control = &sp->mac_control;
2527         config = &sp->config;
2528
2529         DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2530         spin_lock_irqsave(&sp->tx_lock, flags);
2531         if (atomic_read(&sp->card_state) == CARD_DOWN) {
2532                 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2533                           dev->name);
2534                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2535                 dev_kfree_skb(skb);
2536                 return 0;
2537         }
2538
2539         queue = 0;
2540
2541         put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2542         get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2543         txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2544                 list_virt_addr;
2545
2546         queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2547         /* Avoid "put" pointer going beyond "get" pointer */
2548         if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2549                 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2550                 netif_stop_queue(dev);
2551                 dev_kfree_skb(skb);
2552                 spin_unlock_irqrestore(&sp->tx_lock, flags);
2553                 return 0;
2554         }
2555 #ifdef NETIF_F_TSO
2556         mss = skb_shinfo(skb)->tso_size;
2557         if (mss) {
2558                 txdp->Control_1 |= TXD_TCP_LSO_EN;
2559                 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2560         }
2561 #endif
2562
2563         frg_cnt = skb_shinfo(skb)->nr_frags;
2564         frg_len = skb->len - skb->data_len;
2565
2566         txdp->Buffer_Pointer = pci_map_single
2567             (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2568         txdp->Host_Control = (unsigned long) skb;
2569         if (skb->ip_summed == CHECKSUM_HW) {
2570                 txdp->Control_2 |=
2571                     (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2572                      TXD_TX_CKO_UDP_EN);
2573         }
2574
2575         txdp->Control_2 |= config->tx_intr_type;
2576
2577         txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2578                             TXD_GATHER_CODE_FIRST);
2579         txdp->Control_1 |= TXD_LIST_OWN_XENA;
2580
2581         /* For fragmented SKB. */
2582         for (i = 0; i < frg_cnt; i++) {
2583                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2584                 txdp++;
2585                 txdp->Buffer_Pointer = (u64) pci_map_page
2586                     (sp->pdev, frag->page, frag->page_offset,
2587                      frag->size, PCI_DMA_TODEVICE);
2588                 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2589         }
2590         txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2591
2592         tx_fifo = mac_control->tx_FIFO_start[queue];
2593         val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2594         writeq(val64, &tx_fifo->TxDL_Pointer);
2595
2596         val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2597                  TX_FIFO_LAST_LIST);
2598
2599 #ifdef NETIF_F_TSO
2600         if (mss)
2601                 val64 |= TX_FIFO_SPECIAL_FUNC;
2602 #endif
2603         writeq(val64, &tx_fifo->List_Control);
2604
2605         /* Perform a PCI read to flush previous writes */
2606         val64 = readq(&bar0->general_int_status);
2607
2608         put_off++;
2609         put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2610         mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2611
2612         /* Avoid "put" pointer going beyond "get" pointer */
2613         if (((put_off + 1) % queue_len) == get_off) {
2614                 DBG_PRINT(TX_DBG,
2615                           "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2616                           put_off, get_off);
2617                 netif_stop_queue(dev);
2618         }
2619
2620         dev->trans_start = jiffies;
2621         spin_unlock_irqrestore(&sp->tx_lock, flags);
2622
2623         return 0;
2624 }
2625
2626 /**
2627  *  s2io_isr - ISR handler of the device .
2628  *  @irq: the irq of the device.
2629  *  @dev_id: a void pointer to the dev structure of the NIC.
2630  *  @pt_regs: pointer to the registers pushed on the stack.
2631  *  Description:  This function is the ISR handler of the device. It
2632  *  identifies the reason for the interrupt and calls the relevant
2633  *  service routines. As a contongency measure, this ISR allocates the
2634  *  recv buffers, if their numbers are below the panic value which is
2635  *  presently set to 25% of the original number of rcv buffers allocated.
2636  *  Return value:
2637  *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
2638  *   IRQ_NONE: will be returned if interrupt is not from our device
2639  */
2640 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2641 {
2642         struct net_device *dev = (struct net_device *) dev_id;
2643         nic_t *sp = dev->priv;
2644         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2645         int i;
2646         u64 reason = 0;
2647         mac_info_t *mac_control;
2648         struct config_param *config;
2649
2650         mac_control = &sp->mac_control;
2651         config = &sp->config;
2652
2653         /*
2654          * Identify the cause for interrupt and call the appropriate
2655          * interrupt handler. Causes for the interrupt could be;
2656          * 1. Rx of packet.
2657          * 2. Tx complete.
2658          * 3. Link down.
2659          * 4. Error in any functional blocks of the NIC.
2660          */
2661         reason = readq(&bar0->general_int_status);
2662
2663         if (!reason) {
2664                 /* The interrupt was not raised by Xena. */
2665                 return IRQ_NONE;
2666         }
2667
2668         if (reason & (GEN_ERROR_INTR))
2669                 alarm_intr_handler(sp);
2670
2671 #ifdef CONFIG_S2IO_NAPI
2672         if (reason & GEN_INTR_RXTRAFFIC) {
2673                 if (netif_rx_schedule_prep(dev)) {
2674                         en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2675                                               DISABLE_INTRS);
2676                         __netif_rx_schedule(dev);
2677                 }
2678         }
2679 #else
2680         /* If Intr is because of Rx Traffic */
2681         if (reason & GEN_INTR_RXTRAFFIC) {
2682                 for (i = 0; i < config->rx_ring_num; i++) {
2683                         rx_intr_handler(&mac_control->rings[i]);
2684                 }
2685         }
2686 #endif
2687
2688         /* If Intr is because of Tx Traffic */
2689         if (reason & GEN_INTR_TXTRAFFIC) {
2690                 for (i = 0; i < config->tx_fifo_num; i++)
2691                         tx_intr_handler(&mac_control->fifos[i]);
2692         }
2693
2694         /*
2695          * If the Rx buffer count is below the panic threshold then
2696          * reallocate the buffers from the interrupt handler itself,
2697          * else schedule a tasklet to reallocate the buffers.
2698          */
2699 #ifndef CONFIG_S2IO_NAPI
2700         for (i = 0; i < config->rx_ring_num; i++) {
2701                 int ret;
2702                 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2703                 int level = rx_buffer_level(sp, rxb_size, i);
2704
2705                 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2706                         DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2707                         DBG_PRINT(INTR_DBG, "PANIC levels\n");
2708                         if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2709                                 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2710                                           dev->name);
2711                                 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2712                                 clear_bit(0, (&sp->tasklet_status));
2713                                 return IRQ_HANDLED;
2714                         }
2715                         clear_bit(0, (&sp->tasklet_status));
2716                 } else if (level == LOW) {
2717                         tasklet_schedule(&sp->task);
2718                 }
2719         }
2720 #endif
2721
2722         return IRQ_HANDLED;
2723 }
2724
2725 /**
2726  *  s2io_get_stats - Updates the device statistics structure.
2727  *  @dev : pointer to the device structure.
2728  *  Description:
2729  *  This function updates the device statistics structure in the s2io_nic
2730  *  structure and returns a pointer to the same.
2731  *  Return value:
2732  *  pointer to the updated net_device_stats structure.
2733  */
2734
2735 struct net_device_stats *s2io_get_stats(struct net_device *dev)
2736 {
2737         nic_t *sp = dev->priv;
2738         mac_info_t *mac_control;
2739         struct config_param *config;
2740
2741
2742         mac_control = &sp->mac_control;
2743         config = &sp->config;
2744
2745         sp->stats.tx_errors =
2746                 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
2747         sp->stats.rx_errors =
2748                 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
2749         sp->stats.multicast =
2750                 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
2751         sp->stats.rx_length_errors =
2752                 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
2753
2754         return (&sp->stats);
2755 }
2756
2757 /**
2758  *  s2io_set_multicast - entry point for multicast address enable/disable.
2759  *  @dev : pointer to the device structure
2760  *  Description:
2761  *  This function is a driver entry point which gets called by the kernel
2762  *  whenever multicast addresses must be enabled/disabled. This also gets
2763  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
2764  *  determine, if multicast address must be enabled or if promiscuous mode
2765  *  is to be disabled etc.
2766  *  Return value:
2767  *  void.
2768  */
2769
2770 static void s2io_set_multicast(struct net_device *dev)
2771 {
2772         int i, j, prev_cnt;
2773         struct dev_mc_list *mclist;
2774         nic_t *sp = dev->priv;
2775         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2776         u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2777             0xfeffffffffffULL;
2778         u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2779         void __iomem *add;
2780
2781         if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2782                 /*  Enable all Multicast addresses */
2783                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2784                        &bar0->rmac_addr_data0_mem);
2785                 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2786                        &bar0->rmac_addr_data1_mem);
2787                 val64 = RMAC_ADDR_CMD_MEM_WE |
2788                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2789                     RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2790                 writeq(val64, &bar0->rmac_addr_cmd_mem);
2791                 /* Wait till command completes */
2792                 wait_for_cmd_complete(sp);
2793
2794                 sp->m_cast_flg = 1;
2795                 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2796         } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2797                 /*  Disable all Multicast addresses */
2798                 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2799                        &bar0->rmac_addr_data0_mem);
2800                 val64 = RMAC_ADDR_CMD_MEM_WE |
2801                     RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2802                     RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2803                 writeq(val64, &bar0->rmac_addr_cmd_mem);
2804                 /* Wait till command completes */
2805                 wait_for_cmd_complete(sp);
2806
2807                 sp->m_cast_flg = 0;
2808                 sp->all_multi_pos = 0;
2809         }
2810
2811         if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2812                 /*  Put the NIC into promiscuous mode */
2813                 add = &bar0->mac_cfg;
2814                 val64 = readq(&bar0->mac_cfg);
2815                 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2816
2817                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2818                 writel((u32) val64, add);
2819                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2820                 writel((u32) (val64 >> 32), (add + 4));
2821
2822                 val64 = readq(&bar0->mac_cfg);
2823                 sp->promisc_flg = 1;
2824                 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2825                           dev->name);
2826         } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2827                 /*  Remove the NIC from promiscuous mode */
2828                 add = &bar0->mac_cfg;
2829                 val64 = readq(&bar0->mac_cfg);
2830                 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2831
2832                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2833                 writel((u32) val64, add);
2834                 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2835                 writel((u32) (val64 >> 32), (add + 4));
2836
2837                 val64 = readq(&bar0->mac_cfg);
2838                 sp->promisc_flg = 0;
2839                 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
2840                           dev->name);
2841         }
2842
2843         /*  Update individual M_CAST address list */
2844         if ((!sp->m_cast_flg) && dev->mc_count) {
2845                 if (dev->mc_count >
2846                     (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
2847                         DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
2848                                   dev->name);
2849                         DBG_PRINT(ERR_DBG, "can be added, please enable ");
2850                         DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
2851                         return;
2852                 }
2853
2854                 prev_cnt = sp->mc_addr_count;
2855                 sp->mc_addr_count = dev->mc_count;
2856
2857                 /* Clear out the previous list of Mc in the H/W. */
2858                 for (i = 0; i < prev_cnt; i++) {
2859                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2860                                &bar0->rmac_addr_data0_mem);
2861                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
2862                                 &bar0->rmac_addr_data1_mem);
2863                         val64 = RMAC_ADDR_CMD_MEM_WE |
2864                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2865                             RMAC_ADDR_CMD_MEM_OFFSET
2866                             (MAC_MC_ADDR_START_OFFSET + i);
2867                         writeq(val64, &bar0->rmac_addr_cmd_mem);
2868
2869                         /* Wait for command completes */
2870                         if (wait_for_cmd_complete(sp)) {
2871                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
2872                                           dev->name);
2873                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
2874                                 return;
2875                         }
2876                 }
2877
2878                 /* Create the new Rx filter list and update the same in H/W. */
2879                 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2880                      i++, mclist = mclist->next) {
2881                         memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
2882                                ETH_ALEN);
2883                         for (j = 0; j < ETH_ALEN; j++) {
2884                                 mac_addr |= mclist->dmi_addr[j];
2885                                 mac_addr <<= 8;
2886                         }
2887                         mac_addr >>= 8;
2888                         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
2889                                &bar0->rmac_addr_data0_mem);
2890                         writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
2891                                 &bar0->rmac_addr_data1_mem);
2892                         val64 = RMAC_ADDR_CMD_MEM_WE |
2893                             RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2894                             RMAC_ADDR_CMD_MEM_OFFSET
2895                             (i + MAC_MC_ADDR_START_OFFSET);
2896                         writeq(val64, &bar0->rmac_addr_cmd_mem);
2897
2898                         /* Wait for command completes */
2899                         if (wait_for_cmd_complete(sp)) {
2900                                 DBG_PRINT(ERR_DBG, "%s: Adding ",
2901                                           dev->name);
2902                                 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
2903                                 return;
2904                         }
2905                 }
2906         }
2907 }
2908
2909 /**
2910  *  s2io_set_mac_addr - Programs the Xframe mac address
2911  *  @dev : pointer to the device structure.
2912  *  @addr: a uchar pointer to the new mac address which is to be set.
2913  *  Description : This procedure will program the Xframe to receive
2914  *  frames with new Mac Address
2915  *  Return value: SUCCESS on success and an appropriate (-)ve integer
2916  *  as defined in errno.h file on failure.
2917  */
2918
2919 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
2920 {
2921         nic_t *sp = dev->priv;
2922         XENA_dev_config_t __iomem *bar0 = sp->bar0;
2923         register u64 val64, mac_addr = 0;
2924         int i;
2925
2926         /*
2927          * Set the new MAC address as the new unicast filter and reflect this
2928          * change on the device address registered with the OS. It will be
2929          * at offset 0.
2930          */
2931         for (i = 0; i < ETH_ALEN; i++) {
2932                 mac_addr <<= 8;
2933                 mac_addr |= addr[i];
2934         }
2935
2936         writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
2937                &bar0->rmac_addr_data0_mem);
2938
2939         val64 =
2940             RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2941             RMAC_ADDR_CMD_MEM_OFFSET(0);
2942         writeq(val64, &bar0->rmac_addr_cmd_mem);
2943         /* Wait till command completes */
2944         if (wait_for_cmd_complete(sp)) {
2945                 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
2946                 return FAILURE;
2947         }
2948
2949         return SUCCESS;
2950 }
2951
2952 /**
2953  * s2io_ethtool_sset - Sets different link parameters.
2954  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
2955  * @info: pointer to the structure with parameters given by ethtool to set
2956  * link information.
2957  * Description:
2958  * The function sets different link parameters provided by the user onto
2959  * the NIC.
2960  * Return value:
2961  * 0 on success.
2962 */
2963
2964 static int s2io_ethtool_sset(struct net_device *dev,
2965                              struct ethtool_cmd *info)
2966 {
2967         nic_t *sp = dev->priv;
2968         if ((info->autoneg == AUTONEG_ENABLE) ||
2969             (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
2970                 return -EINVAL;
2971         else {
2972                 s2io_close(sp->dev);
2973                 s2io_open(sp->dev);
2974         }
2975
2976         return 0;
2977 }
2978
2979 /**
2980  * s2io_ethtol_gset - Return link specific information.
2981  * @sp : private member of the device structure, pointer to the
2982  *      s2io_nic structure.
2983  * @info : pointer to the structure with parameters given by ethtool
2984  * to return link information.
2985  * Description:
2986  * Returns link specific information like speed, duplex etc.. to ethtool.
2987  * Return value :
2988  * return 0 on success.
2989  */
2990
2991 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
2992 {
2993         nic_t *sp = dev->priv;
2994         info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2995         info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
2996         info->port = PORT_FIBRE;
2997         /* info->transceiver?? TODO */
2998
2999         if (netif_carrier_ok(sp->dev)) {
3000                 info->speed = 10000;
3001                 info->duplex = DUPLEX_FULL;
3002         } else {
3003                 info->speed = -1;
3004                 info->duplex = -1;
3005         }
3006
3007         info->autoneg = AUTONEG_DISABLE;
3008         return 0;
3009 }
3010
3011 /**
3012  * s2io_ethtool_gdrvinfo - Returns driver specific information.
3013  * @sp : private member of the device structure, which is a pointer to the
3014  * s2io_nic structure.
3015  * @info : pointer to the structure with parameters given by ethtool to
3016  * return driver information.
3017  * Description:
3018  * Returns driver specefic information like name, version etc.. to ethtool.
3019  * Return value:
3020  *  void
3021  */
3022
3023 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3024                                   struct ethtool_drvinfo *info)
3025 {
3026         nic_t *sp = dev->priv;
3027
3028         strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3029         strncpy(info->version, s2io_driver_version,
3030                 sizeof(s2io_driver_version));
3031         strncpy(info->fw_version, "", 32);
3032         strncpy(info->bus_info, pci_name(sp->pdev), 32);
3033         info->regdump_len = XENA_REG_SPACE;
3034         info->eedump_len = XENA_EEPROM_SPACE;
3035         info->testinfo_len = S2IO_TEST_LEN;
3036         info->n_stats = S2IO_STAT_LEN;
3037 }
3038
3039 /**
3040  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3041  *  @sp: private member of the device structure, which is a pointer to the
3042  *  s2io_nic structure.
3043  *  @regs : pointer to the structure with parameters given by ethtool for
3044  *  dumping the registers.
3045  *  @reg_space: The input argumnet into which all the registers are dumped.
3046  *  Description:
3047  *  Dumps the entire register space of xFrame NIC into the user given
3048  *  buffer area.
3049  * Return value :
3050  * void .
3051 */
3052
3053 static void s2io_ethtool_gregs(struct net_device *dev,
3054                                struct ethtool_regs *regs, void *space)
3055 {
3056         int i;
3057         u64 reg;
3058         u8 *reg_space = (u8 *) space;
3059         nic_t *sp = dev->priv;
3060
3061         regs->len = XENA_REG_SPACE;
3062         regs->version = sp->pdev->subsystem_device;
3063
3064         for (i = 0; i < regs->len; i += 8) {
3065                 reg = readq(sp->bar0 + i);
3066                 memcpy((reg_space + i), &reg, 8);
3067         }
3068 }
3069
3070 /**
3071  *  s2io_phy_id  - timer function that alternates adapter LED.
3072  *  @data : address of the private member of the device structure, which
3073  *  is a pointer to the s2io_nic structure, provided as an u32.
3074  * Description: This is actually the timer function that alternates the
3075  * adapter LED bit of the adapter control bit to set/reset every time on
3076  * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3077  *  once every second.
3078 */
3079 static void s2io_phy_id(unsigned long data)
3080 {
3081         nic_t *sp = (nic_t *) data;
3082         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3083         u64 val64 = 0;
3084         u16 subid;
3085
3086         subid = sp->pdev->subsystem_device;
3087         if ((subid & 0xFF) >= 0x07) {
3088                 val64 = readq(&bar0->gpio_control);
3089                 val64 ^= GPIO_CTRL_GPIO_0;
3090                 writeq(val64, &bar0->gpio_control);
3091         } else {
3092                 val64 = readq(&bar0->adapter_control);
3093                 val64 ^= ADAPTER_LED_ON;
3094                 writeq(val64, &bar0->adapter_control);
3095         }
3096
3097         mod_timer(&sp->id_timer, jiffies + HZ / 2);
3098 }
3099
3100 /**
3101  * s2io_ethtool_idnic - To physically identify the nic on the system.
3102  * @sp : private member of the device structure, which is a pointer to the
3103  * s2io_nic structure.
3104  * @id : pointer to the structure with identification parameters given by
3105  * ethtool.
3106  * Description: Used to physically identify the NIC on the system.
3107  * The Link LED will blink for a time specified by the user for
3108  * identification.
3109  * NOTE: The Link has to be Up to be able to blink the LED. Hence
3110  * identification is possible only if it's link is up.
3111  * Return value:
3112  * int , returns 0 on success
3113  */
3114
3115 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3116 {
3117         u64 val64 = 0, last_gpio_ctrl_val;
3118         nic_t *sp = dev->priv;
3119         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3120         u16 subid;
3121
3122         subid = sp->pdev->subsystem_device;
3123         last_gpio_ctrl_val = readq(&bar0->gpio_control);
3124         if ((subid & 0xFF) < 0x07) {
3125                 val64 = readq(&bar0->adapter_control);
3126                 if (!(val64 & ADAPTER_CNTL_EN)) {
3127                         printk(KERN_ERR
3128                                "Adapter Link down, cannot blink LED\n");
3129                         return -EFAULT;
3130                 }
3131         }
3132         if (sp->id_timer.function == NULL) {
3133                 init_timer(&sp->id_timer);
3134                 sp->id_timer.function = s2io_phy_id;
3135                 sp->id_timer.data = (unsigned long) sp;
3136         }
3137         mod_timer(&sp->id_timer, jiffies);
3138         if (data)
3139                 msleep_interruptible(data * HZ);
3140         else
3141                 msleep_interruptible(MAX_FLICKER_TIME);
3142         del_timer_sync(&sp->id_timer);
3143
3144         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3145                 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3146                 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3147         }
3148
3149         return 0;
3150 }
3151
3152 /**
3153  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3154  * @sp : private member of the device structure, which is a pointer to the
3155  *      s2io_nic structure.
3156  * @ep : pointer to the structure with pause parameters given by ethtool.
3157  * Description:
3158  * Returns the Pause frame generation and reception capability of the NIC.
3159  * Return value:
3160  *  void
3161  */
3162 static void s2io_ethtool_getpause_data(struct net_device *dev,
3163                                        struct ethtool_pauseparam *ep)
3164 {
3165         u64 val64;
3166         nic_t *sp = dev->priv;
3167         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3168
3169         val64 = readq(&bar0->rmac_pause_cfg);
3170         if (val64 & RMAC_PAUSE_GEN_ENABLE)
3171                 ep->tx_pause = TRUE;
3172         if (val64 & RMAC_PAUSE_RX_ENABLE)
3173                 ep->rx_pause = TRUE;
3174         ep->autoneg = FALSE;
3175 }
3176
3177 /**
3178  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
3179  * @sp : private member of the device structure, which is a pointer to the
3180  *      s2io_nic structure.
3181  * @ep : pointer to the structure with pause parameters given by ethtool.
3182  * Description:
3183  * It can be used to set or reset Pause frame generation or reception
3184  * support of the NIC.
3185  * Return value:
3186  * int, returns 0 on Success
3187  */
3188
3189 static int s2io_ethtool_setpause_data(struct net_device *dev,
3190                                struct ethtool_pauseparam *ep)
3191 {
3192         u64 val64;
3193         nic_t *sp = dev->priv;
3194         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3195
3196         val64 = readq(&bar0->rmac_pause_cfg);
3197         if (ep->tx_pause)
3198                 val64 |= RMAC_PAUSE_GEN_ENABLE;
3199         else
3200                 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3201         if (ep->rx_pause)
3202                 val64 |= RMAC_PAUSE_RX_ENABLE;
3203         else
3204                 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3205         writeq(val64, &bar0->rmac_pause_cfg);
3206         return 0;
3207 }
3208
3209 /**
3210  * read_eeprom - reads 4 bytes of data from user given offset.
3211  * @sp : private member of the device structure, which is a pointer to the
3212  *      s2io_nic structure.
3213  * @off : offset at which the data must be written
3214  * @data : Its an output parameter where the data read at the given
3215  *      offset is stored.
3216  * Description:
3217  * Will read 4 bytes of data from the user given offset and return the
3218  * read data.
3219  * NOTE: Will allow to read only part of the EEPROM visible through the
3220  *   I2C bus.
3221  * Return value:
3222  *  -1 on failure and 0 on success.
3223  */
3224
3225 #define S2IO_DEV_ID             5
3226 static int read_eeprom(nic_t * sp, int off, u32 * data)
3227 {
3228         int ret = -1;
3229         u32 exit_cnt = 0;
3230         u64 val64;
3231         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3232
3233         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3234             I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3235             I2C_CONTROL_CNTL_START;
3236         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3237
3238         while (exit_cnt < 5) {
3239                 val64 = readq(&bar0->i2c_control);
3240                 if (I2C_CONTROL_CNTL_END(val64)) {
3241                         *data = I2C_CONTROL_GET_DATA(val64);
3242                         ret = 0;
3243                         break;
3244                 }
3245                 msleep(50);
3246                 exit_cnt++;
3247         }
3248
3249         return ret;
3250 }
3251
3252 /**
3253  *  write_eeprom - actually writes the relevant part of the data value.
3254  *  @sp : private member of the device structure, which is a pointer to the
3255  *       s2io_nic structure.
3256  *  @off : offset at which the data must be written
3257  *  @data : The data that is to be written
3258  *  @cnt : Number of bytes of the data that are actually to be written into
3259  *  the Eeprom. (max of 3)
3260  * Description:
3261  *  Actually writes the relevant part of the data value into the Eeprom
3262  *  through the I2C bus.
3263  * Return value:
3264  *  0 on success, -1 on failure.
3265  */
3266
3267 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3268 {
3269         int exit_cnt = 0, ret = -1;
3270         u64 val64;
3271         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3272
3273         val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3274             I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3275             I2C_CONTROL_CNTL_START;
3276         SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3277
3278         while (exit_cnt < 5) {
3279                 val64 = readq(&bar0->i2c_control);
3280                 if (I2C_CONTROL_CNTL_END(val64)) {
3281                         if (!(val64 & I2C_CONTROL_NACK))
3282                                 ret = 0;
3283                         break;
3284                 }
3285                 msleep(50);
3286                 exit_cnt++;
3287         }
3288
3289         return ret;
3290 }
3291
3292 /**
3293  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
3294  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
3295  *  @eeprom : pointer to the user level structure provided by ethtool,
3296  *  containing all relevant information.
3297  *  @data_buf : user defined value to be written into Eeprom.
3298  *  Description: Reads the values stored in the Eeprom at given offset
3299  *  for a given length. Stores these values int the input argument data
3300  *  buffer 'data_buf' and returns these to the caller (ethtool.)
3301  *  Return value:
3302  *  int  0 on success
3303  */
3304
3305 static int s2io_ethtool_geeprom(struct net_device *dev,
3306                          struct ethtool_eeprom *eeprom, u8 * data_buf)
3307 {
3308         u32 data, i, valid;
3309         nic_t *sp = dev->priv;
3310
3311         eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3312
3313         if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3314                 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3315
3316         for (i = 0; i < eeprom->len; i += 4) {
3317                 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3318                         DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3319                         return -EFAULT;
3320                 }
3321                 valid = INV(data);
3322                 memcpy((data_buf + i), &valid, 4);
3323         }
3324         return 0;
3325 }
3326
3327 /**
3328  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3329  *  @sp : private member of the device structure, which is a pointer to the
3330  *  s2io_nic structure.
3331  *  @eeprom : pointer to the user level structure provided by ethtool,
3332  *  containing all relevant information.
3333  *  @data_buf ; user defined value to be written into Eeprom.
3334  *  Description:
3335  *  Tries to write the user provided value in the Eeprom, at the offset
3336  *  given by the user.
3337  *  Return value:
3338  *  0 on success, -EFAULT on failure.
3339  */
3340
3341 static int s2io_ethtool_seeprom(struct net_device *dev,
3342                                 struct ethtool_eeprom *eeprom,
3343                                 u8 * data_buf)
3344 {
3345         int len = eeprom->len, cnt = 0;
3346         u32 valid = 0, data;
3347         nic_t *sp = dev->priv;
3348
3349         if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3350                 DBG_PRINT(ERR_DBG,
3351                           "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3352                 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3353                           eeprom->magic);
3354                 return -EFAULT;
3355         }
3356
3357         while (len) {
3358                 data = (u32) data_buf[cnt] & 0x000000FF;
3359                 if (data) {
3360                         valid = (u32) (data << 24);
3361                 } else
3362                         valid = data;
3363
3364                 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3365                         DBG_PRINT(ERR_DBG,
3366                                   "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3367                         DBG_PRINT(ERR_DBG,
3368                                   "write into the specified offset\n");
3369                         return -EFAULT;
3370                 }
3371                 cnt++;
3372                 len--;
3373         }
3374
3375         return 0;
3376 }
3377
3378 /**
3379  * s2io_register_test - reads and writes into all clock domains.
3380  * @sp : private member of the device structure, which is a pointer to the
3381  * s2io_nic structure.
3382  * @data : variable that returns the result of each of the test conducted b
3383  * by the driver.
3384  * Description:
3385  * Read and write into all clock domains. The NIC has 3 clock domains,
3386  * see that registers in all the three regions are accessible.
3387  * Return value:
3388  * 0 on success.
3389  */
3390
3391 static int s2io_register_test(nic_t * sp, uint64_t * data)
3392 {
3393         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3394         u64 val64 = 0;
3395         int fail = 0;
3396
3397         val64 = readq(&bar0->pif_rd_swapper_fb);
3398         if (val64 != 0x123456789abcdefULL) {
3399                 fail = 1;
3400                 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3401         }
3402
3403         val64 = readq(&bar0->rmac_pause_cfg);
3404         if (val64 != 0xc000ffff00000000ULL) {
3405                 fail = 1;
3406                 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3407         }
3408
3409         val64 = readq(&bar0->rx_queue_cfg);
3410         if (val64 != 0x0808080808080808ULL) {
3411                 fail = 1;
3412                 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3413         }
3414
3415         val64 = readq(&bar0->xgxs_efifo_cfg);
3416         if (val64 != 0x000000001923141EULL) {
3417                 fail = 1;
3418                 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3419         }
3420
3421         val64 = 0x5A5A5A5A5A5A5A5AULL;
3422         writeq(val64, &bar0->xmsi_data);
3423         val64 = readq(&bar0->xmsi_data);
3424         if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3425                 fail = 1;
3426                 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3427         }
3428
3429         val64 = 0xA5A5A5A5A5A5A5A5ULL;
3430         writeq(val64, &bar0->xmsi_data);
3431         val64 = readq(&bar0->xmsi_data);
3432         if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3433                 fail = 1;
3434                 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3435         }
3436
3437         *data = fail;
3438         return 0;
3439 }
3440
3441 /**
3442  * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3443  * @sp : private member of the device structure, which is a pointer to the
3444  * s2io_nic structure.
3445  * @data:variable that returns the result of each of the test conducted by
3446  * the driver.
3447  * Description:
3448  * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3449  * register.
3450  * Return value:
3451  * 0 on success.
3452  */
3453
3454 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3455 {
3456         int fail = 0;
3457         u32 ret_data;
3458
3459         /* Test Write Error at offset 0 */
3460         if (!write_eeprom(sp, 0, 0, 3))
3461                 fail = 1;
3462
3463         /* Test Write at offset 4f0 */
3464         if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3465                 fail = 1;
3466         if (read_eeprom(sp, 0x4F0, &ret_data))
3467                 fail = 1;
3468
3469         if (ret_data != 0x01234567)
3470                 fail = 1;
3471
3472         /* Reset the EEPROM data go FFFF */
3473         write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3474
3475         /* Test Write Request Error at offset 0x7c */
3476         if (!write_eeprom(sp, 0x07C, 0, 3))
3477                 fail = 1;
3478
3479         /* Test Write Request at offset 0x7fc */
3480         if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3481                 fail = 1;
3482         if (read_eeprom(sp, 0x7FC, &ret_data))
3483                 fail = 1;
3484
3485         if (ret_data != 0x01234567)
3486                 fail = 1;
3487
3488         /* Reset the EEPROM data go FFFF */
3489         write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3490
3491         /* Test Write Error at offset 0x80 */
3492         if (!write_eeprom(sp, 0x080, 0, 3))
3493                 fail = 1;
3494
3495         /* Test Write Error at offset 0xfc */
3496         if (!write_eeprom(sp, 0x0FC, 0, 3))
3497                 fail = 1;
3498
3499         /* Test Write Error at offset 0x100 */
3500         if (!write_eeprom(sp, 0x100, 0, 3))
3501                 fail = 1;
3502
3503         /* Test Write Error at offset 4ec */
3504         if (!write_eeprom(sp, 0x4EC, 0, 3))
3505                 fail = 1;
3506
3507         *data = fail;
3508         return 0;
3509 }
3510
3511 /**
3512  * s2io_bist_test - invokes the MemBist test of the card .
3513  * @sp : private member of the device structure, which is a pointer to the
3514  * s2io_nic structure.
3515  * @data:variable that returns the result of each of the test conducted by
3516  * the driver.
3517  * Description:
3518  * This invokes the MemBist test of the card. We give around
3519  * 2 secs time for the Test to complete. If it's still not complete
3520  * within this peiod, we consider that the test failed.
3521  * Return value:
3522  * 0 on success and -1 on failure.
3523  */
3524
3525 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3526 {
3527         u8 bist = 0;
3528         int cnt = 0, ret = -1;
3529
3530         pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3531         bist |= PCI_BIST_START;
3532         pci_write_config_word(sp->pdev, PCI_BIST, bist);
3533
3534         while (cnt < 20) {
3535                 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3536                 if (!(bist & PCI_BIST_START)) {
3537                         *data = (bist & PCI_BIST_CODE_MASK);
3538                         ret = 0;
3539                         break;
3540                 }
3541                 msleep(100);
3542                 cnt++;
3543         }
3544
3545         return ret;
3546 }
3547
3548 /**
3549  * s2io-link_test - verifies the link state of the nic
3550  * @sp ; private member of the device structure, which is a pointer to the
3551  * s2io_nic structure.
3552  * @data: variable that returns the result of each of the test conducted by
3553  * the driver.
3554  * Description:
3555  * The function verifies the link state of the NIC and updates the input
3556  * argument 'data' appropriately.
3557  * Return value:
3558  * 0 on success.
3559  */
3560
3561 static int s2io_link_test(nic_t * sp, uint64_t * data)
3562 {
3563         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3564         u64 val64;
3565
3566         val64 = readq(&bar0->adapter_status);
3567         if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3568                 *data = 1;
3569
3570         return 0;
3571 }
3572
3573 /**
3574  * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3575  * @sp - private member of the device structure, which is a pointer to the
3576  * s2io_nic structure.
3577  * @data - variable that returns the result of each of the test
3578  * conducted by the driver.
3579  * Description:
3580  *  This is one of the offline test that tests the read and write
3581  *  access to the RldRam chip on the NIC.
3582  * Return value:
3583  *  0 on success.
3584  */
3585
3586 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3587 {
3588         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3589         u64 val64;
3590         int cnt, iteration = 0, test_pass = 0;
3591
3592         val64 = readq(&bar0->adapter_control);
3593         val64 &= ~ADAPTER_ECC_EN;
3594         writeq(val64, &bar0->adapter_control);
3595
3596         val64 = readq(&bar0->mc_rldram_test_ctrl);
3597         val64 |= MC_RLDRAM_TEST_MODE;
3598         writeq(val64, &bar0->mc_rldram_test_ctrl);
3599
3600         val64 = readq(&bar0->mc_rldram_mrs);
3601         val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3602         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3603
3604         val64 |= MC_RLDRAM_MRS_ENABLE;
3605         SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3606
3607         while (iteration < 2) {
3608                 val64 = 0x55555555aaaa0000ULL;
3609                 if (iteration == 1) {
3610                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3611                 }
3612                 writeq(val64, &bar0->mc_rldram_test_d0);
3613
3614                 val64 = 0xaaaa5a5555550000ULL;
3615                 if (iteration == 1) {
3616                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3617                 }
3618                 writeq(val64, &bar0->mc_rldram_test_d1);
3619
3620                 val64 = 0x55aaaaaaaa5a0000ULL;
3621                 if (iteration == 1) {
3622                         val64 ^= 0xFFFFFFFFFFFF0000ULL;
3623                 }
3624                 writeq(val64, &bar0->mc_rldram_test_d2);
3625
3626                 val64 = (u64) (0x0000003fffff0000ULL);
3627                 writeq(val64, &bar0->mc_rldram_test_add);
3628
3629
3630                 val64 = MC_RLDRAM_TEST_MODE;
3631                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3632
3633                 val64 |=
3634                     MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3635                     MC_RLDRAM_TEST_GO;
3636                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3637
3638                 for (cnt = 0; cnt < 5; cnt++) {
3639                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3640                         if (val64 & MC_RLDRAM_TEST_DONE)
3641                                 break;
3642                         msleep(200);
3643                 }
3644
3645                 if (cnt == 5)
3646                         break;
3647
3648                 val64 = MC_RLDRAM_TEST_MODE;
3649                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3650
3651                 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3652                 writeq(val64, &bar0->mc_rldram_test_ctrl);
3653
3654                 for (cnt = 0; cnt < 5; cnt++) {
3655                         val64 = readq(&bar0->mc_rldram_test_ctrl);
3656                         if (val64 & MC_RLDRAM_TEST_DONE)
3657                                 break;
3658                         msleep(500);
3659                 }
3660
3661                 if (cnt == 5)
3662                         break;
3663
3664                 val64 = readq(&bar0->mc_rldram_test_ctrl);
3665                 if (val64 & MC_RLDRAM_TEST_PASS)
3666                         test_pass = 1;
3667
3668                 iteration++;
3669         }
3670
3671         if (!test_pass)
3672                 *data = 1;
3673         else
3674                 *data = 0;
3675
3676         return 0;
3677 }
3678
3679 /**
3680  *  s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3681  *  @sp : private member of the device structure, which is a pointer to the
3682  *  s2io_nic structure.
3683  *  @ethtest : pointer to a ethtool command specific structure that will be
3684  *  returned to the user.
3685  *  @data : variable that returns the result of each of the test
3686  * conducted by the driver.
3687  * Description:
3688  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
3689  *  the health of the card.
3690  * Return value:
3691  *  void
3692  */
3693
3694 static void s2io_ethtool_test(struct net_device *dev,
3695                               struct ethtool_test *ethtest,
3696                               uint64_t * data)
3697 {
3698         nic_t *sp = dev->priv;
3699         int orig_state = netif_running(sp->dev);
3700
3701         if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3702                 /* Offline Tests. */
3703                 if (orig_state)
3704                         s2io_close(sp->dev);
3705
3706                 if (s2io_register_test(sp, &data[0]))
3707                         ethtest->flags |= ETH_TEST_FL_FAILED;
3708
3709                 s2io_reset(sp);
3710
3711                 if (s2io_rldram_test(sp, &data[3]))
3712                         ethtest->flags |= ETH_TEST_FL_FAILED;
3713
3714                 s2io_reset(sp);
3715
3716                 if (s2io_eeprom_test(sp, &data[1]))
3717                         ethtest->flags |= ETH_TEST_FL_FAILED;
3718
3719                 if (s2io_bist_test(sp, &data[4]))
3720                         ethtest->flags |= ETH_TEST_FL_FAILED;
3721
3722                 if (orig_state)
3723                         s2io_open(sp->dev);
3724
3725                 data[2] = 0;
3726         } else {
3727                 /* Online Tests. */
3728                 if (!orig_state) {
3729                         DBG_PRINT(ERR_DBG,
3730                                   "%s: is not up, cannot run test\n",
3731                                   dev->name);
3732                         data[0] = -1;
3733                         data[1] = -1;
3734                         data[2] = -1;
3735                         data[3] = -1;
3736                         data[4] = -1;
3737                 }
3738
3739                 if (s2io_link_test(sp, &data[2]))
3740                         ethtest->flags |= ETH_TEST_FL_FAILED;
3741
3742                 data[0] = 0;
3743                 data[1] = 0;
3744                 data[3] = 0;
3745                 data[4] = 0;
3746         }
3747 }
3748
3749 static void s2io_get_ethtool_stats(struct net_device *dev,
3750                                    struct ethtool_stats *estats,
3751                                    u64 * tmp_stats)
3752 {
3753         int i = 0;
3754         nic_t *sp = dev->priv;
3755         StatInfo_t *stat_info = sp->mac_control.stats_info;
3756
3757         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
3758         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
3759         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
3760         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
3761         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
3762         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
3763         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
3764         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
3765         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
3766         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
3767         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
3768         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
3769         tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
3770         tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
3771         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
3772         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
3773         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
3774         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
3775         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
3776         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
3777         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
3778         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
3779         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
3780         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
3781         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
3782         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
3783         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
3784         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
3785         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
3786         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
3787         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
3788         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
3789         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
3790         tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
3791         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
3792         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
3793         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
3794         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
3795         tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
3796 }
3797
3798 int s2io_ethtool_get_regs_len(struct net_device *dev)
3799 {
3800         return (XENA_REG_SPACE);
3801 }
3802
3803
3804 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3805 {
3806         nic_t *sp = dev->priv;
3807
3808         return (sp->rx_csum);
3809 }
3810 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3811 {
3812         nic_t *sp = dev->priv;
3813
3814         if (data)
3815                 sp->rx_csum = 1;
3816         else
3817                 sp->rx_csum = 0;
3818
3819         return 0;
3820 }
3821 int s2io_get_eeprom_len(struct net_device *dev)
3822 {
3823         return (XENA_EEPROM_SPACE);
3824 }
3825
3826 int s2io_ethtool_self_test_count(struct net_device *dev)
3827 {
3828         return (S2IO_TEST_LEN);
3829 }
3830 void s2io_ethtool_get_strings(struct net_device *dev,
3831                               u32 stringset, u8 * data)
3832 {
3833         switch (stringset) {
3834         case ETH_SS_TEST:
3835                 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
3836                 break;
3837         case ETH_SS_STATS:
3838                 memcpy(data, &ethtool_stats_keys,
3839                        sizeof(ethtool_stats_keys));
3840         }
3841 }
3842 static int s2io_ethtool_get_stats_count(struct net_device *dev)
3843 {
3844         return (S2IO_STAT_LEN);
3845 }
3846
3847 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
3848 {
3849         if (data)
3850                 dev->features |= NETIF_F_IP_CSUM;
3851         else
3852                 dev->features &= ~NETIF_F_IP_CSUM;
3853
3854         return 0;
3855 }
3856
3857
3858 static struct ethtool_ops netdev_ethtool_ops = {
3859         .get_settings = s2io_ethtool_gset,
3860         .set_settings = s2io_ethtool_sset,
3861         .get_drvinfo = s2io_ethtool_gdrvinfo,
3862         .get_regs_len = s2io_ethtool_get_regs_len,
3863         .get_regs = s2io_ethtool_gregs,
3864         .get_link = ethtool_op_get_link,
3865         .get_eeprom_len = s2io_get_eeprom_len,
3866         .get_eeprom = s2io_ethtool_geeprom,
3867         .set_eeprom = s2io_ethtool_seeprom,
3868         .get_pauseparam = s2io_ethtool_getpause_data,
3869         .set_pauseparam = s2io_ethtool_setpause_data,
3870         .get_rx_csum = s2io_ethtool_get_rx_csum,
3871         .set_rx_csum = s2io_ethtool_set_rx_csum,
3872         .get_tx_csum = ethtool_op_get_tx_csum,
3873         .set_tx_csum = s2io_ethtool_op_set_tx_csum,
3874         .get_sg = ethtool_op_get_sg,
3875         .set_sg = ethtool_op_set_sg,
3876 #ifdef NETIF_F_TSO
3877         .get_tso = ethtool_op_get_tso,
3878         .set_tso = ethtool_op_set_tso,
3879 #endif
3880         .self_test_count = s2io_ethtool_self_test_count,
3881         .self_test = s2io_ethtool_test,
3882         .get_strings = s2io_ethtool_get_strings,
3883         .phys_id = s2io_ethtool_idnic,
3884         .get_stats_count = s2io_ethtool_get_stats_count,
3885         .get_ethtool_stats = s2io_get_ethtool_stats
3886 };
3887
3888 /**
3889  *  s2io_ioctl - Entry point for the Ioctl
3890  *  @dev :  Device pointer.
3891  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
3892  *  a proprietary structure used to pass information to the driver.
3893  *  @cmd :  This is used to distinguish between the different commands that
3894  *  can be passed to the IOCTL functions.
3895  *  Description:
3896  *  Currently there are no special functionality supported in IOCTL, hence
3897  *  function always return EOPNOTSUPPORTED
3898  */
3899
3900 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3901 {
3902         return -EOPNOTSUPP;
3903 }
3904
3905 /**
3906  *  s2io_change_mtu - entry point to change MTU size for the device.
3907  *   @dev : device pointer.
3908  *   @new_mtu : the new MTU size for the device.
3909  *   Description: A driver entry point to change MTU size for the device.
3910  *   Before changing the MTU the device must be stopped.
3911  *  Return value:
3912  *   0 on success and an appropriate (-)ve integer as defined in errno.h
3913  *   file on failure.
3914  */
3915
3916 int s2io_change_mtu(struct net_device *dev, int new_mtu)
3917 {
3918         nic_t *sp = dev->priv;
3919         XENA_dev_config_t __iomem *bar0 = sp->bar0;
3920         register u64 val64;
3921
3922         if (netif_running(dev)) {
3923                 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
3924                 DBG_PRINT(ERR_DBG, "change its MTU\n");
3925                 return -EBUSY;
3926         }
3927
3928         if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
3929                 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
3930                           dev->name);
3931                 return -EPERM;
3932         }
3933
3934         /* Set the new MTU into the PYLD register of the NIC */
3935         val64 = new_mtu;
3936         writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
3937
3938         dev->mtu = new_mtu;
3939
3940         return 0;
3941 }
3942
3943 /**
3944  *  s2io_tasklet - Bottom half of the ISR.
3945  *  @dev_adr : address of the device structure in dma_addr_t format.
3946  *  Description:
3947  *  This is the tasklet or the bottom half of the ISR. This is
3948  *  an extension of the ISR which is scheduled by the scheduler to be run
3949  *  when the load on the CPU is low. All low priority tasks of the ISR can
3950  *  be pushed into the tasklet. For now the tasklet is used only to
3951  *  replenish the Rx buffers in the Rx buffer descriptors.
3952  *  Return value:
3953  *  void.
3954  */
3955
3956 static void s2io_tasklet(unsigned long dev_addr)
3957 {
3958         struct net_device *dev = (struct net_device *) dev_addr;
3959         nic_t *sp = dev->priv;
3960         int i, ret;
3961         mac_info_t *mac_control;
3962         struct config_param *config;
3963
3964         mac_control = &sp->mac_control;
3965         config = &sp->config;
3966
3967         if (!TASKLET_IN_USE) {
3968                 for (i = 0; i < config->rx_ring_num; i++) {
3969                         ret = fill_rx_buffers(sp, i);
3970                         if (ret == -ENOMEM) {
3971                                 DBG_PRINT(ERR_DBG, "%s: Out of ",
3972                                           dev->name);
3973                                 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
3974                                 break;
3975                         } else if (ret == -EFILL) {
3976                                 DBG_PRINT(ERR_DBG,
3977                                           "%s: Rx Ring %d is full\n",
3978                                           dev->name, i);
3979                                 break;
3980                         }
3981                 }
3982                 clear_bit(0, (&sp->tasklet_status));
3983         }
3984 }
3985
3986 /**
3987  * s2io_set_link - Set the LInk status
3988  * @data: long pointer to device private structue
3989  * Description: Sets the link status for the adapter
3990  */
3991
3992 static void s2io_set_link(unsigned long data)
3993 {
3994         nic_t *nic = (nic_t *) data;
3995         struct net_device *dev = nic->dev;
3996         XENA_dev_config_t __iomem *bar0 = nic->bar0;
3997         register u64 val64;
3998         u16 subid;
3999
4000         if (test_and_set_bit(0, &(nic->link_state))) {
4001                 /* The card is being reset, no point doing anything */
4002                 return;
4003         }
4004
4005         subid = nic->pdev->subsystem_device;
4006         /*
4007          * Allow a small delay for the NICs self initiated
4008          * cleanup to complete.
4009          */
4010         msleep(100);
4011
4012         val64 = readq(&bar0->adapter_status);
4013         if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4014                 if (LINK_IS_UP(val64)) {
4015                         val64 = readq(&bar0->adapter_control);
4016                         val64 |= ADAPTER_CNTL_EN;
4017                         writeq(val64, &bar0->adapter_control);
4018                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4019                                 val64 = readq(&bar0->gpio_control);
4020                                 val64 |= GPIO_CTRL_GPIO_0;
4021                                 writeq(val64, &bar0->gpio_control);
4022                                 val64 = readq(&bar0->gpio_control);
4023                         } else {
4024                                 val64 |= ADAPTER_LED_ON;
4025                                 writeq(val64, &bar0->adapter_control);
4026                         }
4027                         val64 = readq(&bar0->adapter_status);
4028                         if (!LINK_IS_UP(val64)) {
4029                                 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4030                                 DBG_PRINT(ERR_DBG, " Link down");
4031                                 DBG_PRINT(ERR_DBG, "after ");
4032                                 DBG_PRINT(ERR_DBG, "enabling ");
4033                                 DBG_PRINT(ERR_DBG, "device \n");
4034                         }
4035                         if (nic->device_enabled_once == FALSE) {
4036                                 nic->device_enabled_once = TRUE;
4037                         }
4038                         s2io_link(nic, LINK_UP);
4039                 } else {
4040                         if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4041                                 val64 = readq(&bar0->gpio_control);
4042                                 val64 &= ~GPIO_CTRL_GPIO_0;
4043                                 writeq(val64, &bar0->gpio_control);
4044                                 val64 = readq(&bar0->gpio_control);
4045                         }
4046                         s2io_link(nic, LINK_DOWN);
4047                 }
4048         } else {                /* NIC is not Quiescent. */
4049                 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4050                 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4051                 netif_stop_queue(dev);
4052         }
4053         clear_bit(0, &(nic->link_state));
4054 }
4055
4056 static void s2io_card_down(nic_t * sp)
4057 {
4058         int cnt = 0;
4059         XENA_dev_config_t __iomem *bar0 = sp->bar0;
4060         unsigned long flags;
4061         register u64 val64 = 0;
4062
4063         /* If s2io_set_link task is executing, wait till it completes. */
4064         while (test_and_set_bit(0, &(sp->link_state))) {
4065                 msleep(50);
4066         }
4067         atomic_set(&sp->card_state, CARD_DOWN);
4068
4069         /* disable Tx and Rx traffic on the NIC */
4070         stop_nic(sp);
4071
4072         /* Kill tasklet. */
4073         tasklet_kill(&sp->task);
4074
4075         /* Check if the device is Quiescent and then Reset the NIC */
4076         do {
4077                 val64 = readq(&bar0->adapter_status);
4078                 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4079                         break;
4080                 }
4081
4082                 msleep(50);
4083                 cnt++;
4084                 if (cnt == 10) {
4085                         DBG_PRINT(ERR_DBG,
4086                                   "s2io_close:Device not Quiescent ");
4087                         DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4088                                   (unsigned long long) val64);
4089                         break;
4090                 }
4091         } while (1);
4092         spin_lock_irqsave(&sp->tx_lock, flags);
4093         s2io_reset(sp);
4094
4095         /* Free all unused Tx and Rx buffers */
4096         free_tx_buffers(sp);
4097         free_rx_buffers(sp);
4098
4099         spin_unlock_irqrestore(&sp->tx_lock, flags);
4100         clear_bit(0, &(sp->link_state));
4101 }
4102
4103 static int s2io_card_up(nic_t * sp)
4104 {
4105         int i, ret;
4106         mac_info_t *mac_control;
4107         struct config_param *config;
4108         struct net_device *dev = (struct net_device *) sp->dev;
4109
4110         /* Initialize the H/W I/O registers */
4111         if (init_nic(sp) != 0) {
4112                 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4113                           dev->name);
4114                 return -ENODEV;
4115         }
4116
4117         /*
4118          * Initializing the Rx buffers. For now we are considering only 1
4119          * Rx ring and initializing buffers into 30 Rx blocks
4120          */
4121         mac_control = &sp->mac_control;
4122         config = &sp->config;
4123
4124         for (i = 0; i < config->rx_ring_num; i++) {
4125                 if ((ret = fill_rx_buffers(sp, i))) {
4126                         DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4127                                   dev->name);
4128                         s2io_reset(sp);
4129                         free_rx_buffers(sp);
4130                         return -ENOMEM;
4131                 }
4132                 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4133                           atomic_read(&sp->rx_bufs_left[i]));
4134         }
4135
4136         /* Setting its receive mode */
4137         s2io_set_multicast(dev);
4138
4139         /* Enable tasklet for the device */
4140         tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4141
4142         /* Enable Rx Traffic and interrupts on the NIC */
4143         if (start_nic(sp)) {
4144                 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4145                 tasklet_kill(&sp->task);
4146                 s2io_reset(sp);
4147                 free_irq(dev->irq, dev);
4148                 free_rx_buffers(sp);
4149                 return -ENODEV;
4150         }
4151
4152         atomic_set(&sp->card_state, CARD_UP);
4153         return 0;
4154 }
4155
4156 /**
4157  * s2io_restart_nic - Resets the NIC.
4158  * @data : long pointer to the device private structure
4159  * Description:
4160  * This function is scheduled to be run by the s2io_tx_watchdog
4161  * function after 0.5 secs to reset the NIC. The idea is to reduce
4162  * the run time of the watch dog routine which is run holding a
4163  * spin lock.
4164  */
4165
4166 static void s2io_restart_nic(unsigned long data)
4167 {
4168         struct net_device *dev = (struct net_device *) data;
4169         nic_t *sp = dev->priv;
4170
4171         s2io_card_down(sp);
4172         if (s2io_card_up(sp)) {
4173                 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4174                           dev->name);
4175         }
4176         netif_wake_queue(dev);
4177         DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4178                   dev->name);
4179
4180 }
4181
4182 /**
4183  *  s2io_tx_watchdog - Watchdog for transmit side.
4184  *  @dev : Pointer to net device structure
4185  *  Description:
4186  *  This function is triggered if the Tx Queue is stopped
4187  *  for a pre-defined amount of time when the Interface is still up.
4188  *  If the Interface is jammed in such a situation, the hardware is
4189  *  reset (by s2io_close) and restarted again (by s2io_open) to
4190  *  overcome any problem that might have been caused in the hardware.
4191  *  Return value:
4192  *  void
4193  */
4194
4195 static void s2io_tx_watchdog(struct net_device *dev)
4196 {
4197         nic_t *sp = dev->priv;
4198
4199         if (netif_carrier_ok(dev)) {
4200                 schedule_work(&sp->rst_timer_task);
4201         }
4202 }
4203
4204 /**
4205  *   rx_osm_handler - To perform some OS related operations on SKB.
4206  *   @sp: private member of the device structure,pointer to s2io_nic structure.
4207  *   @skb : the socket buffer pointer.
4208  *   @len : length of the packet
4209  *   @cksum : FCS checksum of the frame.
4210  *   @ring_no : the ring from which this RxD was extracted.
4211  *   Description:
4212  *   This function is called by the Tx interrupt serivce routine to perform
4213  *   some OS related operations on the SKB before passing it to the upper
4214  *   layers. It mainly checks if the checksum is OK, if so adds it to the
4215  *   SKBs cksum variable, increments the Rx packet count and passes the SKB
4216  *   to the upper layer. If the checksum is wrong, it increments the Rx
4217  *   packet error count, frees the SKB and returns error.
4218  *   Return value:
4219  *   SUCCESS on success and -1 on failure.
4220  */
4221 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4222 {
4223         nic_t *sp = ring_data->nic;
4224         struct net_device *dev = (struct net_device *) sp->dev;
4225         struct sk_buff *skb = (struct sk_buff *)
4226                 ((unsigned long) rxdp->Host_Control);
4227         int ring_no = ring_data->ring_no;
4228         u16 l3_csum, l4_csum;
4229 #ifdef CONFIG_2BUFF_MODE
4230         int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4231         int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4232         int get_block = ring_data->rx_curr_get_info.block_index;
4233         int get_off = ring_data->rx_curr_get_info.offset;
4234         buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4235         unsigned char *buff;
4236 #else
4237         u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4238 #endif
4239         skb->dev = dev;
4240         if (rxdp->Control_1 & RXD_T_CODE) {
4241                 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4242                 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4243                           dev->name, err);
4244         }
4245
4246         /* Updating statistics */
4247         rxdp->Host_Control = 0;
4248         sp->rx_pkt_count++;
4249         sp->stats.rx_packets++;
4250 #ifndef CONFIG_2BUFF_MODE
4251         sp->stats.rx_bytes += len;
4252 #else
4253         sp->stats.rx_bytes += buf0_len + buf2_len;
4254 #endif
4255
4256 #ifndef CONFIG_2BUFF_MODE
4257         skb_put(skb, len);
4258 #else
4259         buff = skb_push(skb, buf0_len);
4260         memcpy(buff, ba->ba_0, buf0_len);
4261         skb_put(skb, buf2_len);
4262 #endif
4263
4264         if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4265             (sp->rx_csum)) {
4266                 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4267                 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4268                 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4269                         /*
4270                          * NIC verifies if the Checksum of the received
4271                          * frame is Ok or not and accordingly returns
4272                          * a flag in the RxD.
4273                          */
4274                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4275                 } else {
4276                         /*
4277                          * Packet with erroneous checksum, let the
4278                          * upper layers deal with it.
4279                          */
4280                         skb->ip_summed = CHECKSUM_NONE;
4281                 }
4282         } else {
4283                 skb->ip_summed = CHECKSUM_NONE;
4284         }
4285
4286         skb->protocol = eth_type_trans(skb, dev);
4287 #ifdef CONFIG_S2IO_NAPI
4288         netif_receive_skb(skb);
4289 #else
4290         netif_rx(skb);
4291 #endif
4292         dev->last_rx = jiffies;
4293         atomic_dec(&sp->rx_bufs_left[ring_no]);
4294         return SUCCESS;
4295 }
4296
4297 /**
4298  *  s2io_link - stops/starts the Tx queue.
4299  *  @sp : private member of the device structure, which is a pointer to the
4300  *  s2io_nic structure.
4301  *  @link : inidicates whether link is UP/DOWN.
4302  *  Description:
4303  *  This function stops/starts the Tx queue depending on whether the link
4304  *  status of the NIC is is down or up. This is called by the Alarm
4305  *  interrupt handler whenever a link change interrupt comes up.
4306  *  Return value:
4307  *  void.
4308  */
4309
4310 void s2io_link(nic_t * sp, int link)
4311 {
4312         struct net_device *dev = (struct net_device *) sp->dev;
4313
4314         if (link != sp->last_link_state) {
4315                 if (link == LINK_DOWN) {
4316                         DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4317                         netif_carrier_off(dev);
4318                 } else {
4319                         DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4320                         netif_carrier_on(dev);
4321                 }
4322         }
4323         sp->last_link_state = link;
4324 }
4325
4326 /**
4327  *  get_xena_rev_id - to identify revision ID of xena.
4328  *  @pdev : PCI Dev structure
4329  *  Description:
4330  *  Function to identify the Revision ID of xena.
4331  *  Return value:
4332  *  returns the revision ID of the device.
4333  */
4334
4335 int get_xena_rev_id(struct pci_dev *pdev)
4336 {
4337         u8 id = 0;
4338         int ret;
4339         ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4340         return id;
4341 }
4342
4343 /**
4344  *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4345  *  @sp : private member of the device structure, which is a pointer to the
4346  *  s2io_nic structure.
4347  *  Description:
4348  *  This function initializes a few of the PCI and PCI-X configuration registers
4349  *  with recommended values.
4350  *  Return value:
4351  *  void
4352  */
4353
4354 static void s2io_init_pci(nic_t * sp)
4355 {
4356         u16 pci_cmd = 0, pcix_cmd = 0;
4357
4358         /* Enable Data Parity Error Recovery in PCI-X command register. */
4359         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4360                              &(pcix_cmd));
4361         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4362                               (pcix_cmd | 1));
4363         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4364                              &(pcix_cmd));
4365
4366         /* Set the PErr Response bit in PCI command register. */
4367         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4368         pci_write_config_word(sp->pdev, PCI_COMMAND,
4369                               (pci_cmd | PCI_COMMAND_PARITY));
4370         pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4371
4372         /* Set MMRB count to 1024 in PCI-X Command register. */
4373         pcix_cmd &= 0xFFF3;
4374         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4375                               (pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4376         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4377                              &(pcix_cmd));
4378
4379         /*  Setting Maximum outstanding splits based on system type. */
4380         pcix_cmd &= 0xFF8F;
4381         pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1);   /* 2 splits. */
4382         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4383                               pcix_cmd);
4384         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4385                              &(pcix_cmd));
4386
4387         /* Forcibly disabling relaxed ordering capability of the card. */
4388         pcix_cmd &= 0xfffd;
4389         pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4390                               pcix_cmd);
4391         pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4392                              &(pcix_cmd));
4393 }
4394
4395 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4396 MODULE_LICENSE("GPL");
4397 module_param(tx_fifo_num, int, 0);
4398 module_param(rx_ring_num, int, 0);
4399 module_param_array(tx_fifo_len, uint, NULL, 0);
4400 module_param_array(rx_ring_sz, uint, NULL, 0);
4401 module_param(Stats_refresh_time, int, 0);
4402 module_param_array(rts_frm_len, uint, NULL, 0);
4403 module_param(rmac_pause_time, int, 0);
4404 module_param(mc_pause_threshold_q0q3, int, 0);
4405 module_param(mc_pause_threshold_q4q7, int, 0);
4406 module_param(shared_splits, int, 0);
4407 module_param(tmac_util_period, int, 0);
4408 module_param(rmac_util_period, int, 0);
4409 #ifndef CONFIG_S2IO_NAPI
4410 module_param(indicate_max_pkts, int, 0);
4411 #endif
4412
4413 /**
4414  *  s2io_init_nic - Initialization of the adapter .
4415  *  @pdev : structure containing the PCI related information of the device.
4416  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4417  *  Description:
4418  *  The function initializes an adapter identified by the pci_dec structure.
4419  *  All OS related initialization including memory and device structure and
4420  *  initlaization of the device private variable is done. Also the swapper
4421  *  control register is initialized to enable read and write into the I/O
4422  *  registers of the device.
4423  *  Return value:
4424  *  returns 0 on success and negative on failure.
4425  */
4426
4427 static int __devinit
4428 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4429 {
4430         nic_t *sp;
4431         struct net_device *dev;
4432         int i, j, ret;
4433         int dma_flag = FALSE;
4434         u32 mac_up, mac_down;
4435         u64 val64 = 0, tmp64 = 0;
4436         XENA_dev_config_t __iomem *bar0 = NULL;
4437         u16 subid;
4438         mac_info_t *mac_control;
4439         struct config_param *config;
4440
4441 #ifdef CONFIG_S2IO_NAPI
4442         DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4443 #endif
4444
4445         if ((ret = pci_enable_device(pdev))) {
4446                 DBG_PRINT(ERR_DBG,
4447                           "s2io_init_nic: pci_enable_device failed\n");
4448                 return ret;
4449         }
4450
4451         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4452                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4453                 dma_flag = TRUE;
4454                 if (pci_set_consistent_dma_mask
4455                     (pdev, DMA_64BIT_MASK)) {
4456                         DBG_PRINT(ERR_DBG,
4457                                   "Unable to obtain 64bit DMA for \
4458                                         consistent allocations\n");
4459                         pci_disable_device(pdev);
4460                         return -ENOMEM;
4461                 }
4462         } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4463                 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4464         } else {
4465                 pci_disable_device(pdev);
4466                 return -ENOMEM;
4467         }
4468
4469         if (pci_request_regions(pdev, s2io_driver_name)) {
4470                 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4471                     pci_disable_device(pdev);
4472                 return -ENODEV;
4473         }
4474
4475         dev = alloc_etherdev(sizeof(nic_t));
4476         if (dev == NULL) {
4477                 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4478                 pci_disable_device(pdev);
4479                 pci_release_regions(pdev);
4480                 return -ENODEV;
4481         }
4482
4483         pci_set_master(pdev);
4484         pci_set_drvdata(pdev, dev);
4485         SET_MODULE_OWNER(dev);
4486         SET_NETDEV_DEV(dev, &pdev->dev);
4487
4488         /*  Private member variable initialized to s2io NIC structure */
4489         sp = dev->priv;
4490         memset(sp, 0, sizeof(nic_t));
4491         sp->dev = dev;
4492         sp->pdev = pdev;
4493         sp->high_dma_flag = dma_flag;
4494         sp->device_enabled_once = FALSE;
4495
4496         /* Initialize some PCI/PCI-X fields of the NIC. */
4497         s2io_init_pci(sp);
4498
4499         /*
4500          * Setting the device configuration parameters.
4501          * Most of these parameters can be specified by the user during
4502          * module insertion as they are module loadable parameters. If
4503          * these parameters are not not specified during load time, they
4504          * are initialized with default values.
4505          */
4506         mac_control = &sp->mac_control;
4507         config = &sp->config;
4508
4509         /* Tx side parameters. */
4510         tx_fifo_len[0] = DEFAULT_FIFO_LEN;      /* Default value. */
4511         config->tx_fifo_num = tx_fifo_num;
4512         for (i = 0; i < MAX_TX_FIFOS; i++) {
4513                 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4514                 config->tx_cfg[i].fifo_priority = i;
4515         }
4516
4517         /* mapping the QoS priority to the configured fifos */
4518         for (i = 0; i < MAX_TX_FIFOS; i++)
4519                 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4520
4521         config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4522         for (i = 0; i < config->tx_fifo_num; i++) {
4523                 config->tx_cfg[i].f_no_snoop =
4524                     (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4525                 if (config->tx_cfg[i].fifo_len < 65) {
4526                         config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4527                         break;
4528                 }
4529         }
4530         config->max_txds = MAX_SKB_FRAGS;
4531
4532         /* Rx side parameters. */
4533         rx_ring_sz[0] = SMALL_BLK_CNT;  /* Default value. */
4534         config->rx_ring_num = rx_ring_num;
4535         for (i = 0; i < MAX_RX_RINGS; i++) {
4536                 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4537                     (MAX_RXDS_PER_BLOCK + 1);
4538                 config->rx_cfg[i].ring_priority = i;
4539         }
4540
4541         for (i = 0; i < rx_ring_num; i++) {
4542                 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4543                 config->rx_cfg[i].f_no_snoop =
4544                     (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4545         }
4546
4547         /*  Setting Mac Control parameters */
4548         mac_control->rmac_pause_time = rmac_pause_time;
4549         mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4550         mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4551
4552
4553         /* Initialize Ring buffer parameters. */
4554         for (i = 0; i < config->rx_ring_num; i++)
4555                 atomic_set(&sp->rx_bufs_left[i], 0);
4556
4557         /*  initialize the shared memory used by the NIC and the host */
4558         if (init_shared_mem(sp)) {
4559                 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4560                           dev->name);
4561                 ret = -ENOMEM;
4562                 goto mem_alloc_failed;
4563         }
4564
4565         sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4566                                      pci_resource_len(pdev, 0));
4567         if (!sp->bar0) {
4568                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4569                           dev->name);
4570                 ret = -ENOMEM;
4571                 goto bar0_remap_failed;
4572         }
4573
4574         sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4575                                      pci_resource_len(pdev, 2));
4576         if (!sp->bar1) {
4577                 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4578                           dev->name);
4579                 ret = -ENOMEM;
4580                 goto bar1_remap_failed;
4581         }
4582
4583         dev->irq = pdev->irq;
4584         dev->base_addr = (unsigned long) sp->bar0;
4585
4586         /* Initializing the BAR1 address as the start of the FIFO pointer. */
4587         for (j = 0; j < MAX_TX_FIFOS; j++) {
4588                 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4589                     (sp->bar1 + (j * 0x00020000));
4590         }
4591
4592         /*  Driver entry points */
4593         dev->open = &s2io_open;
4594         dev->stop = &s2io_close;
4595         dev->hard_start_xmit = &s2io_xmit;
4596         dev->get_stats = &s2io_get_stats;
4597         dev->set_multicast_list = &s2io_set_multicast;
4598         dev->do_ioctl = &s2io_ioctl;
4599         dev->change_mtu = &s2io_change_mtu;
4600         SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4601
4602         /*
4603          * will use eth_mac_addr() for  dev->set_mac_address
4604          * mac address will be set every time dev->open() is called
4605          */
4606 #if defined(CONFIG_S2IO_NAPI)
4607         dev->poll = s2io_poll;
4608         dev->weight = 32;
4609 #endif
4610
4611         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4612         if (sp->high_dma_flag == TRUE)
4613                 dev->features |= NETIF_F_HIGHDMA;
4614 #ifdef NETIF_F_TSO
4615         dev->features |= NETIF_F_TSO;
4616 #endif
4617
4618         dev->tx_timeout = &s2io_tx_watchdog;
4619         dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4620         INIT_WORK(&sp->rst_timer_task,
4621                   (void (*)(void *)) s2io_restart_nic, dev);
4622         INIT_WORK(&sp->set_link_task,
4623                   (void (*)(void *)) s2io_set_link, sp);
4624
4625         pci_save_state(sp->pdev);
4626
4627         /* Setting swapper control on the NIC, for proper reset operation */
4628         if (s2io_set_swapper(sp)) {
4629                 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4630                           dev->name);
4631                 ret = -EAGAIN;
4632                 goto set_swap_failed;
4633         }
4634
4635         /*
4636          * Fix for all "FFs" MAC address problems observed on
4637          * Alpha platforms
4638          */
4639         fix_mac_address(sp);
4640         s2io_reset(sp);
4641
4642         /*
4643          * MAC address initialization.
4644          * For now only one mac address will be read and used.
4645          */
4646         bar0 = sp->bar0;
4647         val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4648             RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4649         writeq(val64, &bar0->rmac_addr_cmd_mem);
4650         wait_for_cmd_complete(sp);
4651
4652         tmp64 = readq(&bar0->rmac_addr_data0_mem);
4653         mac_down = (u32) tmp64;
4654         mac_up = (u32) (tmp64 >> 32);
4655
4656         memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4657
4658         sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4659         sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4660         sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4661         sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4662         sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4663         sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4664
4665         DBG_PRINT(INIT_DBG,
4666                   "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4667                   sp->def_mac_addr[0].mac_addr[0],
4668                   sp->def_mac_addr[0].mac_addr[1],
4669                   sp->def_mac_addr[0].mac_addr[2],
4670                   sp->def_mac_addr[0].mac_addr[3],
4671                   sp->def_mac_addr[0].mac_addr[4],
4672                   sp->def_mac_addr[0].mac_addr[5]);
4673
4674         /*  Set the factory defined MAC address initially   */
4675         dev->addr_len = ETH_ALEN;
4676         memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4677
4678         /*
4679          * Initialize the tasklet status and link state flags
4680          * and the card statte parameter
4681          */
4682         atomic_set(&(sp->card_state), 0);
4683         sp->tasklet_status = 0;
4684         sp->link_state = 0;
4685
4686         /* Initialize spinlocks */
4687         spin_lock_init(&sp->tx_lock);
4688 #ifndef CONFIG_S2IO_NAPI
4689         spin_lock_init(&sp->put_lock);
4690 #endif
4691
4692         /*
4693          * SXE-002: Configure link and activity LED to init state
4694          * on driver load.
4695          */
4696         subid = sp->pdev->subsystem_device;
4697         if ((subid & 0xFF) >= 0x07) {
4698                 val64 = readq(&bar0->gpio_control);
4699                 val64 |= 0x0000800000000000ULL;
4700                 writeq(val64, &bar0->gpio_control);
4701                 val64 = 0x0411040400000000ULL;
4702                 writeq(val64, (void __iomem *) bar0 + 0x2700);
4703                 val64 = readq(&bar0->gpio_control);
4704         }
4705
4706         sp->rx_csum = 1;        /* Rx chksum verify enabled by default */
4707
4708         if (register_netdev(dev)) {
4709                 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4710                 ret = -ENODEV;
4711                 goto register_failed;
4712         }
4713
4714         /*
4715          * Make Link state as off at this point, when the Link change
4716          * interrupt comes the state will be automatically changed to
4717          * the right state.
4718          */
4719         netif_carrier_off(dev);
4720         sp->last_link_state = LINK_DOWN;
4721
4722         return 0;
4723
4724       register_failed:
4725       set_swap_failed:
4726         iounmap(sp->bar1);
4727       bar1_remap_failed:
4728         iounmap(sp->bar0);
4729       bar0_remap_failed:
4730       mem_alloc_failed:
4731         free_shared_mem(sp);
4732         pci_disable_device(pdev);
4733         pci_release_regions(pdev);
4734         pci_set_drvdata(pdev, NULL);
4735         free_netdev(dev);
4736
4737         return ret;
4738 }
4739
4740 /**
4741  * s2io_rem_nic - Free the PCI device
4742  * @pdev: structure containing the PCI related information of the device.
4743  * Description: This function is called by the Pci subsystem to release a
4744  * PCI device and free up all resource held up by the device. This could
4745  * be in response to a Hot plug event or when the driver is to be removed
4746  * from memory.
4747  */
4748
4749 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4750 {
4751         struct net_device *dev =
4752             (struct net_device *) pci_get_drvdata(pdev);
4753         nic_t *sp;
4754
4755         if (dev == NULL) {
4756                 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4757                 return;
4758         }
4759
4760         sp = dev->priv;
4761         unregister_netdev(dev);
4762
4763         free_shared_mem(sp);
4764         iounmap(sp->bar0);
4765         iounmap(sp->bar1);
4766         pci_disable_device(pdev);
4767         pci_release_regions(pdev);
4768         pci_set_drvdata(pdev, NULL);
4769         free_netdev(dev);
4770 }
4771
4772 /**
4773  * s2io_starter - Entry point for the driver
4774  * Description: This function is the entry point for the driver. It verifies
4775  * the module loadable parameters and initializes PCI configuration space.
4776  */
4777
4778 int __init s2io_starter(void)
4779 {
4780         return pci_module_init(&s2io_driver);
4781 }
4782
4783 /**
4784  * s2io_closer - Cleanup routine for the driver
4785  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4786  */
4787
4788 void s2io_closer(void)
4789 {
4790         pci_unregister_driver(&s2io_driver);
4791         DBG_PRINT(INIT_DBG, "cleanup done\n");
4792 }
4793
4794 module_init(s2io_starter);
4795 module_exit(s2io_closer);