cxgb4: Replaced the backdoor mechanism to access the HW memory with PCIe Window method
[sfrench/cifs-2.6.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "cxgb4_dcb.h"
71 #include "l2t.h"
72
73 #include <../drivers/net/bonding/bonding.h>
74
75 #ifdef DRV_VERSION
76 #undef DRV_VERSION
77 #endif
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80
81 /*
82  * Max interrupt hold-off timer value in us.  Queues fall back to this value
83  * under extreme memory pressure so it's largish to give the system time to
84  * recover.
85  */
86 #define MAX_SGE_TIMERVAL 200U
87
88 enum {
89         /*
90          * Physical Function provisioning constants.
91          */
92         PFRES_NVI = 4,                  /* # of Virtual Interfaces */
93         PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
94         PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
95                                          */
96         PFRES_NEQ = 256,                /* # of egress queues */
97         PFRES_NIQ = 0,                  /* # of ingress queues */
98         PFRES_TC = 0,                   /* PCI-E traffic class */
99         PFRES_NEXACTF = 128,            /* # of exact MPS filters */
100
101         PFRES_R_CAPS = FW_CMD_CAP_PF,
102         PFRES_WX_CAPS = FW_CMD_CAP_PF,
103
104 #ifdef CONFIG_PCI_IOV
105         /*
106          * Virtual Function provisioning constants.  We need two extra Ingress
107          * Queues with Interrupt capability to serve as the VF's Firmware
108          * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109          * neither will have Free Lists associated with them).  For each
110          * Ethernet/Control Egress Queue and for each Free List, we need an
111          * Egress Context.
112          */
113         VFRES_NPORTS = 1,               /* # of "ports" per VF */
114         VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
115
116         VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
117         VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
118         VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119         VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
120         VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
121         VFRES_TC = 0,                   /* PCI-E traffic class */
122         VFRES_NEXACTF = 16,             /* # of exact MPS filters */
123
124         VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125         VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif
127 };
128
129 /*
130  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
131  * static and likely not to be useful in the long run.  We really need to
132  * implement some form of persistent configuration which the firmware
133  * controls.
134  */
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136                                   unsigned int pf, unsigned int vf)
137 {
138         unsigned int portn, portvec;
139
140         /*
141          * Give PF's access to all of the ports.
142          */
143         if (vf == 0)
144                 return FW_PFVF_CMD_PMASK_MASK;
145
146         /*
147          * For VFs, we'll assign them access to the ports based purely on the
148          * PF.  We assign active ports in order, wrapping around if there are
149          * fewer active ports than PFs: e.g. active port[pf % nports].
150          * Unfortunately the adapter's port_info structs haven't been
151          * initialized yet so we have to compute this.
152          */
153         if (adapter->params.nports == 0)
154                 return 0;
155
156         portn = pf % adapter->params.nports;
157         portvec = adapter->params.portvec;
158         for (;;) {
159                 /*
160                  * Isolate the lowest set bit in the port vector.  If we're at
161                  * the port number that we want, return that as the pmask.
162                  * otherwise mask that bit out of the port vector and
163                  * decrement our port number ...
164                  */
165                 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166                 if (portn == 0)
167                         return pmask;
168                 portn--;
169                 portvec &= ~pmask;
170         }
171         /*NOTREACHED*/
172 }
173
174 enum {
175         MAX_TXQ_ENTRIES      = 16384,
176         MAX_CTRL_TXQ_ENTRIES = 1024,
177         MAX_RSPQ_ENTRIES     = 16384,
178         MAX_RX_BUFFERS       = 16384,
179         MIN_TXQ_ENTRIES      = 32,
180         MIN_CTRL_TXQ_ENTRIES = 32,
181         MIN_RSPQ_ENTRIES     = 128,
182         MIN_FL_ENTRIES       = 16
183 };
184
185 /* Host shadow copy of ingress filter entry.  This is in host native format
186  * and doesn't match the ordering or bit order, etc. of the hardware of the
187  * firmware command.  The use of bit-field structure elements is purely to
188  * remind ourselves of the field size limitations and save memory in the case
189  * where the filter table is large.
190  */
191 struct filter_entry {
192         /* Administrative fields for filter.
193          */
194         u32 valid:1;            /* filter allocated and valid */
195         u32 locked:1;           /* filter is administratively locked */
196
197         u32 pending:1;          /* filter action is pending firmware reply */
198         u32 smtidx:8;           /* Source MAC Table index for smac */
199         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
200
201         /* The filter itself.  Most of this is a straight copy of information
202          * provided by the extended ioctl().  Some fields are translated to
203          * internal forms -- for instance the Ingress Queue ID passed in from
204          * the ioctl() is translated into the Absolute Ingress Queue ID.
205          */
206         struct ch_filter_specification fs;
207 };
208
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214
215 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
216         CH_DEVICE(0xa000, 0),  /* PE10K */
217         CH_DEVICE(0x4001, -1),
218         CH_DEVICE(0x4002, -1),
219         CH_DEVICE(0x4003, -1),
220         CH_DEVICE(0x4004, -1),
221         CH_DEVICE(0x4005, -1),
222         CH_DEVICE(0x4006, -1),
223         CH_DEVICE(0x4007, -1),
224         CH_DEVICE(0x4008, -1),
225         CH_DEVICE(0x4009, -1),
226         CH_DEVICE(0x400a, -1),
227         CH_DEVICE(0x4401, 4),
228         CH_DEVICE(0x4402, 4),
229         CH_DEVICE(0x4403, 4),
230         CH_DEVICE(0x4404, 4),
231         CH_DEVICE(0x4405, 4),
232         CH_DEVICE(0x4406, 4),
233         CH_DEVICE(0x4407, 4),
234         CH_DEVICE(0x4408, 4),
235         CH_DEVICE(0x4409, 4),
236         CH_DEVICE(0x440a, 4),
237         CH_DEVICE(0x440d, 4),
238         CH_DEVICE(0x440e, 4),
239         CH_DEVICE(0x5001, 4),
240         CH_DEVICE(0x5002, 4),
241         CH_DEVICE(0x5003, 4),
242         CH_DEVICE(0x5004, 4),
243         CH_DEVICE(0x5005, 4),
244         CH_DEVICE(0x5006, 4),
245         CH_DEVICE(0x5007, 4),
246         CH_DEVICE(0x5008, 4),
247         CH_DEVICE(0x5009, 4),
248         CH_DEVICE(0x500A, 4),
249         CH_DEVICE(0x500B, 4),
250         CH_DEVICE(0x500C, 4),
251         CH_DEVICE(0x500D, 4),
252         CH_DEVICE(0x500E, 4),
253         CH_DEVICE(0x500F, 4),
254         CH_DEVICE(0x5010, 4),
255         CH_DEVICE(0x5011, 4),
256         CH_DEVICE(0x5012, 4),
257         CH_DEVICE(0x5013, 4),
258         CH_DEVICE(0x5014, 4),
259         CH_DEVICE(0x5015, 4),
260         CH_DEVICE(0x5080, 4),
261         CH_DEVICE(0x5081, 4),
262         CH_DEVICE(0x5082, 4),
263         CH_DEVICE(0x5083, 4),
264         CH_DEVICE(0x5084, 4),
265         CH_DEVICE(0x5085, 4),
266         CH_DEVICE(0x5401, 4),
267         CH_DEVICE(0x5402, 4),
268         CH_DEVICE(0x5403, 4),
269         CH_DEVICE(0x5404, 4),
270         CH_DEVICE(0x5405, 4),
271         CH_DEVICE(0x5406, 4),
272         CH_DEVICE(0x5407, 4),
273         CH_DEVICE(0x5408, 4),
274         CH_DEVICE(0x5409, 4),
275         CH_DEVICE(0x540A, 4),
276         CH_DEVICE(0x540B, 4),
277         CH_DEVICE(0x540C, 4),
278         CH_DEVICE(0x540D, 4),
279         CH_DEVICE(0x540E, 4),
280         CH_DEVICE(0x540F, 4),
281         CH_DEVICE(0x5410, 4),
282         CH_DEVICE(0x5411, 4),
283         CH_DEVICE(0x5412, 4),
284         CH_DEVICE(0x5413, 4),
285         CH_DEVICE(0x5414, 4),
286         CH_DEVICE(0x5415, 4),
287         CH_DEVICE(0x5480, 4),
288         CH_DEVICE(0x5481, 4),
289         CH_DEVICE(0x5482, 4),
290         CH_DEVICE(0x5483, 4),
291         CH_DEVICE(0x5484, 4),
292         CH_DEVICE(0x5485, 4),
293         { 0, }
294 };
295
296 #define FW4_FNAME "cxgb4/t4fw.bin"
297 #define FW5_FNAME "cxgb4/t5fw.bin"
298 #define FW4_CFNAME "cxgb4/t4-config.txt"
299 #define FW5_CFNAME "cxgb4/t5-config.txt"
300
301 MODULE_DESCRIPTION(DRV_DESC);
302 MODULE_AUTHOR("Chelsio Communications");
303 MODULE_LICENSE("Dual BSD/GPL");
304 MODULE_VERSION(DRV_VERSION);
305 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
306 MODULE_FIRMWARE(FW4_FNAME);
307 MODULE_FIRMWARE(FW5_FNAME);
308
309 /*
310  * Normally we're willing to become the firmware's Master PF but will be happy
311  * if another PF has already become the Master and initialized the adapter.
312  * Setting "force_init" will cause this driver to forcibly establish itself as
313  * the Master PF and initialize the adapter.
314  */
315 static uint force_init;
316
317 module_param(force_init, uint, 0644);
318 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
319
320 /*
321  * Normally if the firmware we connect to has Configuration File support, we
322  * use that and only fall back to the old Driver-based initialization if the
323  * Configuration File fails for some reason.  If force_old_init is set, then
324  * we'll always use the old Driver-based initialization sequence.
325  */
326 static uint force_old_init;
327
328 module_param(force_old_init, uint, 0644);
329 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
330
331 static int dflt_msg_enable = DFLT_MSG_ENABLE;
332
333 module_param(dflt_msg_enable, int, 0644);
334 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
335
336 /*
337  * The driver uses the best interrupt scheme available on a platform in the
338  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
339  * of these schemes the driver may consider as follows:
340  *
341  * msi = 2: choose from among all three options
342  * msi = 1: only consider MSI and INTx interrupts
343  * msi = 0: force INTx interrupts
344  */
345 static int msi = 2;
346
347 module_param(msi, int, 0644);
348 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
349
350 /*
351  * Queue interrupt hold-off timer values.  Queues default to the first of these
352  * upon creation.
353  */
354 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
355
356 module_param_array(intr_holdoff, uint, NULL, 0644);
357 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
358                  "0..4 in microseconds");
359
360 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
361
362 module_param_array(intr_cnt, uint, NULL, 0644);
363 MODULE_PARM_DESC(intr_cnt,
364                  "thresholds 1..3 for queue interrupt packet counters");
365
366 /*
367  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
368  * offset by 2 bytes in order to have the IP headers line up on 4-byte
369  * boundaries.  This is a requirement for many architectures which will throw
370  * a machine check fault if an attempt is made to access one of the 4-byte IP
371  * header fields on a non-4-byte boundary.  And it's a major performance issue
372  * even on some architectures which allow it like some implementations of the
373  * x86 ISA.  However, some architectures don't mind this and for some very
374  * edge-case performance sensitive applications (like forwarding large volumes
375  * of small packets), setting this DMA offset to 0 will decrease the number of
376  * PCI-E Bus transfers enough to measurably affect performance.
377  */
378 static int rx_dma_offset = 2;
379
380 static bool vf_acls;
381
382 #ifdef CONFIG_PCI_IOV
383 module_param(vf_acls, bool, 0644);
384 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
385
386 /* Configure the number of PCI-E Virtual Function which are to be instantiated
387  * on SR-IOV Capable Physical Functions.
388  */
389 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
390
391 module_param_array(num_vf, uint, NULL, 0644);
392 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
393 #endif
394
395 /* TX Queue select used to determine what algorithm to use for selecting TX
396  * queue. Select between the kernel provided function (select_queue=0) or user
397  * cxgb_select_queue function (select_queue=1)
398  *
399  * Default: select_queue=0
400  */
401 static int select_queue;
402 module_param(select_queue, int, 0644);
403 MODULE_PARM_DESC(select_queue,
404                  "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
405
406 /*
407  * The filter TCAM has a fixed portion and a variable portion.  The fixed
408  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
409  * ports.  The variable portion is 36 bits which can include things like Exact
410  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
411  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
412  * far exceed the 36-bit budget for this "compressed" header portion of the
413  * filter.  Thus, we have a scarce resource which must be carefully managed.
414  *
415  * By default we set this up to mostly match the set of filter matching
416  * capabilities of T3 but with accommodations for some of T4's more
417  * interesting features:
418  *
419  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
420  *     [Inner] VLAN (17), Port (3), FCoE (1) }
421  */
422 enum {
423         TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
424         TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
425         TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
426 };
427
428 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
429
430 module_param(tp_vlan_pri_map, uint, 0644);
431 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
432
433 static struct dentry *cxgb4_debugfs_root;
434
435 static LIST_HEAD(adapter_list);
436 static DEFINE_MUTEX(uld_mutex);
437 /* Adapter list to be accessed from atomic context */
438 static LIST_HEAD(adap_rcu_list);
439 static DEFINE_SPINLOCK(adap_rcu_lock);
440 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
441 static const char *uld_str[] = { "RDMA", "iSCSI" };
442
443 static void link_report(struct net_device *dev)
444 {
445         if (!netif_carrier_ok(dev))
446                 netdev_info(dev, "link down\n");
447         else {
448                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
449
450                 const char *s = "10Mbps";
451                 const struct port_info *p = netdev_priv(dev);
452
453                 switch (p->link_cfg.speed) {
454                 case 10000:
455                         s = "10Gbps";
456                         break;
457                 case 1000:
458                         s = "1000Mbps";
459                         break;
460                 case 100:
461                         s = "100Mbps";
462                         break;
463                 case 40000:
464                         s = "40Gbps";
465                         break;
466                 }
467
468                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
469                             fc[p->link_cfg.fc]);
470         }
471 }
472
473 #ifdef CONFIG_CHELSIO_T4_DCB
474 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
475 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
476 {
477         struct port_info *pi = netdev_priv(dev);
478         struct adapter *adap = pi->adapter;
479         struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
480         int i;
481
482         /* We use a simple mapping of Port TX Queue Index to DCB
483          * Priority when we're enabling DCB.
484          */
485         for (i = 0; i < pi->nqsets; i++, txq++) {
486                 u32 name, value;
487                 int err;
488
489                 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
490                         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
491                         FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
492                 value = enable ? i : 0xffffffff;
493
494                 /* Since we can be called while atomic (from "interrupt
495                  * level") we need to issue the Set Parameters Commannd
496                  * without sleeping (timeout < 0).
497                  */
498                 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
499                                             &name, &value);
500
501                 if (err)
502                         dev_err(adap->pdev_dev,
503                                 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
504                                 enable ? "set" : "unset", pi->port_id, i, -err);
505         }
506 }
507 #endif /* CONFIG_CHELSIO_T4_DCB */
508
509 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
510 {
511         struct net_device *dev = adapter->port[port_id];
512
513         /* Skip changes from disabled ports. */
514         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
515                 if (link_stat)
516                         netif_carrier_on(dev);
517                 else {
518 #ifdef CONFIG_CHELSIO_T4_DCB
519                         cxgb4_dcb_state_init(dev);
520                         dcb_tx_queue_prio_enable(dev, false);
521 #endif /* CONFIG_CHELSIO_T4_DCB */
522                         netif_carrier_off(dev);
523                 }
524
525                 link_report(dev);
526         }
527 }
528
529 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
530 {
531         static const char *mod_str[] = {
532                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
533         };
534
535         const struct net_device *dev = adap->port[port_id];
536         const struct port_info *pi = netdev_priv(dev);
537
538         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
539                 netdev_info(dev, "port module unplugged\n");
540         else if (pi->mod_type < ARRAY_SIZE(mod_str))
541                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
542 }
543
544 /*
545  * Configure the exact and hash address filters to handle a port's multicast
546  * and secondary unicast MAC addresses.
547  */
548 static int set_addr_filters(const struct net_device *dev, bool sleep)
549 {
550         u64 mhash = 0;
551         u64 uhash = 0;
552         bool free = true;
553         u16 filt_idx[7];
554         const u8 *addr[7];
555         int ret, naddr = 0;
556         const struct netdev_hw_addr *ha;
557         int uc_cnt = netdev_uc_count(dev);
558         int mc_cnt = netdev_mc_count(dev);
559         const struct port_info *pi = netdev_priv(dev);
560         unsigned int mb = pi->adapter->fn;
561
562         /* first do the secondary unicast addresses */
563         netdev_for_each_uc_addr(ha, dev) {
564                 addr[naddr++] = ha->addr;
565                 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
566                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
567                                         naddr, addr, filt_idx, &uhash, sleep);
568                         if (ret < 0)
569                                 return ret;
570
571                         free = false;
572                         naddr = 0;
573                 }
574         }
575
576         /* next set up the multicast addresses */
577         netdev_for_each_mc_addr(ha, dev) {
578                 addr[naddr++] = ha->addr;
579                 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
580                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
581                                         naddr, addr, filt_idx, &mhash, sleep);
582                         if (ret < 0)
583                                 return ret;
584
585                         free = false;
586                         naddr = 0;
587                 }
588         }
589
590         return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
591                                 uhash | mhash, sleep);
592 }
593
594 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
595 module_param(dbfifo_int_thresh, int, 0644);
596 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
597
598 /*
599  * usecs to sleep while draining the dbfifo
600  */
601 static int dbfifo_drain_delay = 1000;
602 module_param(dbfifo_drain_delay, int, 0644);
603 MODULE_PARM_DESC(dbfifo_drain_delay,
604                  "usecs to sleep while draining the dbfifo");
605
606 /*
607  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
608  * If @mtu is -1 it is left unchanged.
609  */
610 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
611 {
612         int ret;
613         struct port_info *pi = netdev_priv(dev);
614
615         ret = set_addr_filters(dev, sleep_ok);
616         if (ret == 0)
617                 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
618                                     (dev->flags & IFF_PROMISC) ? 1 : 0,
619                                     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
620                                     sleep_ok);
621         return ret;
622 }
623
624 static struct workqueue_struct *workq;
625
626 /**
627  *      link_start - enable a port
628  *      @dev: the port to enable
629  *
630  *      Performs the MAC and PHY actions needed to enable a port.
631  */
632 static int link_start(struct net_device *dev)
633 {
634         int ret;
635         struct port_info *pi = netdev_priv(dev);
636         unsigned int mb = pi->adapter->fn;
637
638         /*
639          * We do not set address filters and promiscuity here, the stack does
640          * that step explicitly.
641          */
642         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
643                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
644         if (ret == 0) {
645                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
646                                     pi->xact_addr_filt, dev->dev_addr, true,
647                                     true);
648                 if (ret >= 0) {
649                         pi->xact_addr_filt = ret;
650                         ret = 0;
651                 }
652         }
653         if (ret == 0)
654                 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
655                                     &pi->link_cfg);
656         if (ret == 0)
657                 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
658                                           true, CXGB4_DCB_ENABLED);
659
660         return ret;
661 }
662
663 int cxgb4_dcb_enabled(const struct net_device *dev)
664 {
665 #ifdef CONFIG_CHELSIO_T4_DCB
666         struct port_info *pi = netdev_priv(dev);
667
668         return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
669 #else
670         return 0;
671 #endif
672 }
673 EXPORT_SYMBOL(cxgb4_dcb_enabled);
674
675 #ifdef CONFIG_CHELSIO_T4_DCB
676 /* Handle a Data Center Bridging update message from the firmware. */
677 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
678 {
679         int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
680         struct net_device *dev = adap->port[port];
681         int old_dcb_enabled = cxgb4_dcb_enabled(dev);
682         int new_dcb_enabled;
683
684         cxgb4_dcb_handle_fw_update(adap, pcmd);
685         new_dcb_enabled = cxgb4_dcb_enabled(dev);
686
687         /* If the DCB has become enabled or disabled on the port then we're
688          * going to need to set up/tear down DCB Priority parameters for the
689          * TX Queues associated with the port.
690          */
691         if (new_dcb_enabled != old_dcb_enabled)
692                 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
693 }
694 #endif /* CONFIG_CHELSIO_T4_DCB */
695
696 /* Clear a filter and release any of its resources that we own.  This also
697  * clears the filter's "pending" status.
698  */
699 static void clear_filter(struct adapter *adap, struct filter_entry *f)
700 {
701         /* If the new or old filter have loopback rewriteing rules then we'll
702          * need to free any existing Layer Two Table (L2T) entries of the old
703          * filter rule.  The firmware will handle freeing up any Source MAC
704          * Table (SMT) entries used for rewriting Source MAC Addresses in
705          * loopback rules.
706          */
707         if (f->l2t)
708                 cxgb4_l2t_release(f->l2t);
709
710         /* The zeroing of the filter rule below clears the filter valid,
711          * pending, locked flags, l2t pointer, etc. so it's all we need for
712          * this operation.
713          */
714         memset(f, 0, sizeof(*f));
715 }
716
717 /* Handle a filter write/deletion reply.
718  */
719 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
720 {
721         unsigned int idx = GET_TID(rpl);
722         unsigned int nidx = idx - adap->tids.ftid_base;
723         unsigned int ret;
724         struct filter_entry *f;
725
726         if (idx >= adap->tids.ftid_base && nidx <
727            (adap->tids.nftids + adap->tids.nsftids)) {
728                 idx = nidx;
729                 ret = GET_TCB_COOKIE(rpl->cookie);
730                 f = &adap->tids.ftid_tab[idx];
731
732                 if (ret == FW_FILTER_WR_FLT_DELETED) {
733                         /* Clear the filter when we get confirmation from the
734                          * hardware that the filter has been deleted.
735                          */
736                         clear_filter(adap, f);
737                 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
738                         dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
739                                 idx);
740                         clear_filter(adap, f);
741                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
742                         f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
743                         f->pending = 0;  /* asynchronous setup completed */
744                         f->valid = 1;
745                 } else {
746                         /* Something went wrong.  Issue a warning about the
747                          * problem and clear everything out.
748                          */
749                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
750                                 idx, ret);
751                         clear_filter(adap, f);
752                 }
753         }
754 }
755
756 /* Response queue handler for the FW event queue.
757  */
758 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
759                           const struct pkt_gl *gl)
760 {
761         u8 opcode = ((const struct rss_header *)rsp)->opcode;
762
763         rsp++;                                          /* skip RSS header */
764
765         /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
766          */
767         if (unlikely(opcode == CPL_FW4_MSG &&
768            ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
769                 rsp++;
770                 opcode = ((const struct rss_header *)rsp)->opcode;
771                 rsp++;
772                 if (opcode != CPL_SGE_EGR_UPDATE) {
773                         dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
774                                 , opcode);
775                         goto out;
776                 }
777         }
778
779         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
780                 const struct cpl_sge_egr_update *p = (void *)rsp;
781                 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
782                 struct sge_txq *txq;
783
784                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
785                 txq->restarts++;
786                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
787                         struct sge_eth_txq *eq;
788
789                         eq = container_of(txq, struct sge_eth_txq, q);
790                         netif_tx_wake_queue(eq->txq);
791                 } else {
792                         struct sge_ofld_txq *oq;
793
794                         oq = container_of(txq, struct sge_ofld_txq, q);
795                         tasklet_schedule(&oq->qresume_tsk);
796                 }
797         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
798                 const struct cpl_fw6_msg *p = (void *)rsp;
799
800 #ifdef CONFIG_CHELSIO_T4_DCB
801                 const struct fw_port_cmd *pcmd = (const void *)p->data;
802                 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
803                 unsigned int action =
804                         FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
805
806                 if (cmd == FW_PORT_CMD &&
807                     action == FW_PORT_ACTION_GET_PORT_INFO) {
808                         int port = FW_PORT_CMD_PORTID_GET(
809                                         be32_to_cpu(pcmd->op_to_portid));
810                         struct net_device *dev = q->adap->port[port];
811                         int state_input = ((pcmd->u.info.dcbxdis_pkd &
812                                             FW_PORT_CMD_DCBXDIS)
813                                            ? CXGB4_DCB_INPUT_FW_DISABLED
814                                            : CXGB4_DCB_INPUT_FW_ENABLED);
815
816                         cxgb4_dcb_state_fsm(dev, state_input);
817                 }
818
819                 if (cmd == FW_PORT_CMD &&
820                     action == FW_PORT_ACTION_L2_DCB_CFG)
821                         dcb_rpl(q->adap, pcmd);
822                 else
823 #endif
824                         if (p->type == 0)
825                                 t4_handle_fw_rpl(q->adap, p->data);
826         } else if (opcode == CPL_L2T_WRITE_RPL) {
827                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
828
829                 do_l2t_write_rpl(q->adap, p);
830         } else if (opcode == CPL_SET_TCB_RPL) {
831                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
832
833                 filter_rpl(q->adap, p);
834         } else
835                 dev_err(q->adap->pdev_dev,
836                         "unexpected CPL %#x on FW event queue\n", opcode);
837 out:
838         return 0;
839 }
840
841 /**
842  *      uldrx_handler - response queue handler for ULD queues
843  *      @q: the response queue that received the packet
844  *      @rsp: the response queue descriptor holding the offload message
845  *      @gl: the gather list of packet fragments
846  *
847  *      Deliver an ingress offload packet to a ULD.  All processing is done by
848  *      the ULD, we just maintain statistics.
849  */
850 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
851                          const struct pkt_gl *gl)
852 {
853         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
854
855         /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
856          */
857         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
858             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
859                 rsp += 2;
860
861         if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
862                 rxq->stats.nomem++;
863                 return -1;
864         }
865         if (gl == NULL)
866                 rxq->stats.imm++;
867         else if (gl == CXGB4_MSG_AN)
868                 rxq->stats.an++;
869         else
870                 rxq->stats.pkts++;
871         return 0;
872 }
873
874 static void disable_msi(struct adapter *adapter)
875 {
876         if (adapter->flags & USING_MSIX) {
877                 pci_disable_msix(adapter->pdev);
878                 adapter->flags &= ~USING_MSIX;
879         } else if (adapter->flags & USING_MSI) {
880                 pci_disable_msi(adapter->pdev);
881                 adapter->flags &= ~USING_MSI;
882         }
883 }
884
885 /*
886  * Interrupt handler for non-data events used with MSI-X.
887  */
888 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
889 {
890         struct adapter *adap = cookie;
891
892         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
893         if (v & PFSW) {
894                 adap->swintr = 1;
895                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
896         }
897         t4_slow_intr_handler(adap);
898         return IRQ_HANDLED;
899 }
900
901 /*
902  * Name the MSI-X interrupts.
903  */
904 static void name_msix_vecs(struct adapter *adap)
905 {
906         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
907
908         /* non-data interrupts */
909         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
910
911         /* FW events */
912         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
913                  adap->port[0]->name);
914
915         /* Ethernet queues */
916         for_each_port(adap, j) {
917                 struct net_device *d = adap->port[j];
918                 const struct port_info *pi = netdev_priv(d);
919
920                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
921                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
922                                  d->name, i);
923         }
924
925         /* offload queues */
926         for_each_ofldrxq(&adap->sge, i)
927                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
928                          adap->port[0]->name, i);
929
930         for_each_rdmarxq(&adap->sge, i)
931                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
932                          adap->port[0]->name, i);
933
934         for_each_rdmaciq(&adap->sge, i)
935                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
936                          adap->port[0]->name, i);
937 }
938
939 static int request_msix_queue_irqs(struct adapter *adap)
940 {
941         struct sge *s = &adap->sge;
942         int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
943         int msi_index = 2;
944
945         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
946                           adap->msix_info[1].desc, &s->fw_evtq);
947         if (err)
948                 return err;
949
950         for_each_ethrxq(s, ethqidx) {
951                 err = request_irq(adap->msix_info[msi_index].vec,
952                                   t4_sge_intr_msix, 0,
953                                   adap->msix_info[msi_index].desc,
954                                   &s->ethrxq[ethqidx].rspq);
955                 if (err)
956                         goto unwind;
957                 msi_index++;
958         }
959         for_each_ofldrxq(s, ofldqidx) {
960                 err = request_irq(adap->msix_info[msi_index].vec,
961                                   t4_sge_intr_msix, 0,
962                                   adap->msix_info[msi_index].desc,
963                                   &s->ofldrxq[ofldqidx].rspq);
964                 if (err)
965                         goto unwind;
966                 msi_index++;
967         }
968         for_each_rdmarxq(s, rdmaqidx) {
969                 err = request_irq(adap->msix_info[msi_index].vec,
970                                   t4_sge_intr_msix, 0,
971                                   adap->msix_info[msi_index].desc,
972                                   &s->rdmarxq[rdmaqidx].rspq);
973                 if (err)
974                         goto unwind;
975                 msi_index++;
976         }
977         for_each_rdmaciq(s, rdmaciqqidx) {
978                 err = request_irq(adap->msix_info[msi_index].vec,
979                                   t4_sge_intr_msix, 0,
980                                   adap->msix_info[msi_index].desc,
981                                   &s->rdmaciq[rdmaciqqidx].rspq);
982                 if (err)
983                         goto unwind;
984                 msi_index++;
985         }
986         return 0;
987
988 unwind:
989         while (--rdmaciqqidx >= 0)
990                 free_irq(adap->msix_info[--msi_index].vec,
991                          &s->rdmaciq[rdmaciqqidx].rspq);
992         while (--rdmaqidx >= 0)
993                 free_irq(adap->msix_info[--msi_index].vec,
994                          &s->rdmarxq[rdmaqidx].rspq);
995         while (--ofldqidx >= 0)
996                 free_irq(adap->msix_info[--msi_index].vec,
997                          &s->ofldrxq[ofldqidx].rspq);
998         while (--ethqidx >= 0)
999                 free_irq(adap->msix_info[--msi_index].vec,
1000                          &s->ethrxq[ethqidx].rspq);
1001         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1002         return err;
1003 }
1004
1005 static void free_msix_queue_irqs(struct adapter *adap)
1006 {
1007         int i, msi_index = 2;
1008         struct sge *s = &adap->sge;
1009
1010         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1011         for_each_ethrxq(s, i)
1012                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1013         for_each_ofldrxq(s, i)
1014                 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1015         for_each_rdmarxq(s, i)
1016                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1017         for_each_rdmaciq(s, i)
1018                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1019 }
1020
1021 /**
1022  *      write_rss - write the RSS table for a given port
1023  *      @pi: the port
1024  *      @queues: array of queue indices for RSS
1025  *
1026  *      Sets up the portion of the HW RSS table for the port's VI to distribute
1027  *      packets to the Rx queues in @queues.
1028  */
1029 static int write_rss(const struct port_info *pi, const u16 *queues)
1030 {
1031         u16 *rss;
1032         int i, err;
1033         const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1034
1035         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1036         if (!rss)
1037                 return -ENOMEM;
1038
1039         /* map the queue indices to queue ids */
1040         for (i = 0; i < pi->rss_size; i++, queues++)
1041                 rss[i] = q[*queues].rspq.abs_id;
1042
1043         err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1044                                   pi->rss_size, rss, pi->rss_size);
1045         kfree(rss);
1046         return err;
1047 }
1048
1049 /**
1050  *      setup_rss - configure RSS
1051  *      @adap: the adapter
1052  *
1053  *      Sets up RSS for each port.
1054  */
1055 static int setup_rss(struct adapter *adap)
1056 {
1057         int i, err;
1058
1059         for_each_port(adap, i) {
1060                 const struct port_info *pi = adap2pinfo(adap, i);
1061
1062                 err = write_rss(pi, pi->rss);
1063                 if (err)
1064                         return err;
1065         }
1066         return 0;
1067 }
1068
1069 /*
1070  * Return the channel of the ingress queue with the given qid.
1071  */
1072 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1073 {
1074         qid -= p->ingr_start;
1075         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1076 }
1077
1078 /*
1079  * Wait until all NAPI handlers are descheduled.
1080  */
1081 static void quiesce_rx(struct adapter *adap)
1082 {
1083         int i;
1084
1085         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1086                 struct sge_rspq *q = adap->sge.ingr_map[i];
1087
1088                 if (q && q->handler)
1089                         napi_disable(&q->napi);
1090         }
1091 }
1092
1093 /*
1094  * Enable NAPI scheduling and interrupt generation for all Rx queues.
1095  */
1096 static void enable_rx(struct adapter *adap)
1097 {
1098         int i;
1099
1100         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1101                 struct sge_rspq *q = adap->sge.ingr_map[i];
1102
1103                 if (!q)
1104                         continue;
1105                 if (q->handler)
1106                         napi_enable(&q->napi);
1107                 /* 0-increment GTS to start the timer and enable interrupts */
1108                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1109                              SEINTARM(q->intr_params) |
1110                              INGRESSQID(q->cntxt_id));
1111         }
1112 }
1113
1114 /**
1115  *      setup_sge_queues - configure SGE Tx/Rx/response queues
1116  *      @adap: the adapter
1117  *
1118  *      Determines how many sets of SGE queues to use and initializes them.
1119  *      We support multiple queue sets per port if we have MSI-X, otherwise
1120  *      just one queue set per port.
1121  */
1122 static int setup_sge_queues(struct adapter *adap)
1123 {
1124         int err, msi_idx, i, j;
1125         struct sge *s = &adap->sge;
1126
1127         bitmap_zero(s->starving_fl, MAX_EGRQ);
1128         bitmap_zero(s->txq_maperr, MAX_EGRQ);
1129
1130         if (adap->flags & USING_MSIX)
1131                 msi_idx = 1;         /* vector 0 is for non-queue interrupts */
1132         else {
1133                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1134                                        NULL, NULL);
1135                 if (err)
1136                         return err;
1137                 msi_idx = -((int)s->intrq.abs_id + 1);
1138         }
1139
1140         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1141                                msi_idx, NULL, fwevtq_handler);
1142         if (err) {
1143 freeout:        t4_free_sge_resources(adap);
1144                 return err;
1145         }
1146
1147         for_each_port(adap, i) {
1148                 struct net_device *dev = adap->port[i];
1149                 struct port_info *pi = netdev_priv(dev);
1150                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1151                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1152
1153                 for (j = 0; j < pi->nqsets; j++, q++) {
1154                         if (msi_idx > 0)
1155                                 msi_idx++;
1156                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1157                                                msi_idx, &q->fl,
1158                                                t4_ethrx_handler);
1159                         if (err)
1160                                 goto freeout;
1161                         q->rspq.idx = j;
1162                         memset(&q->stats, 0, sizeof(q->stats));
1163                 }
1164                 for (j = 0; j < pi->nqsets; j++, t++) {
1165                         err = t4_sge_alloc_eth_txq(adap, t, dev,
1166                                         netdev_get_tx_queue(dev, j),
1167                                         s->fw_evtq.cntxt_id);
1168                         if (err)
1169                                 goto freeout;
1170                 }
1171         }
1172
1173         j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1174         for_each_ofldrxq(s, i) {
1175                 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1176                 struct net_device *dev = adap->port[i / j];
1177
1178                 if (msi_idx > 0)
1179                         msi_idx++;
1180                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1181                                        q->fl.size ? &q->fl : NULL,
1182                                        uldrx_handler);
1183                 if (err)
1184                         goto freeout;
1185                 memset(&q->stats, 0, sizeof(q->stats));
1186                 s->ofld_rxq[i] = q->rspq.abs_id;
1187                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1188                                             s->fw_evtq.cntxt_id);
1189                 if (err)
1190                         goto freeout;
1191         }
1192
1193         for_each_rdmarxq(s, i) {
1194                 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1195
1196                 if (msi_idx > 0)
1197                         msi_idx++;
1198                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1199                                        msi_idx, q->fl.size ? &q->fl : NULL,
1200                                        uldrx_handler);
1201                 if (err)
1202                         goto freeout;
1203                 memset(&q->stats, 0, sizeof(q->stats));
1204                 s->rdma_rxq[i] = q->rspq.abs_id;
1205         }
1206
1207         for_each_rdmaciq(s, i) {
1208                 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1209
1210                 if (msi_idx > 0)
1211                         msi_idx++;
1212                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1213                                        msi_idx, q->fl.size ? &q->fl : NULL,
1214                                        uldrx_handler);
1215                 if (err)
1216                         goto freeout;
1217                 memset(&q->stats, 0, sizeof(q->stats));
1218                 s->rdma_ciq[i] = q->rspq.abs_id;
1219         }
1220
1221         for_each_port(adap, i) {
1222                 /*
1223                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1224                  * have RDMA queues, and that's the right value.
1225                  */
1226                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1227                                             s->fw_evtq.cntxt_id,
1228                                             s->rdmarxq[i].rspq.cntxt_id);
1229                 if (err)
1230                         goto freeout;
1231         }
1232
1233         t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1234                      RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1235                      QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1236         return 0;
1237 }
1238
1239 /*
1240  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1241  * The allocated memory is cleared.
1242  */
1243 void *t4_alloc_mem(size_t size)
1244 {
1245         void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1246
1247         if (!p)
1248                 p = vzalloc(size);
1249         return p;
1250 }
1251
1252 /*
1253  * Free memory allocated through alloc_mem().
1254  */
1255 static void t4_free_mem(void *addr)
1256 {
1257         if (is_vmalloc_addr(addr))
1258                 vfree(addr);
1259         else
1260                 kfree(addr);
1261 }
1262
1263 /* Send a Work Request to write the filter at a specified index.  We construct
1264  * a Firmware Filter Work Request to have the work done and put the indicated
1265  * filter into "pending" mode which will prevent any further actions against
1266  * it till we get a reply from the firmware on the completion status of the
1267  * request.
1268  */
1269 static int set_filter_wr(struct adapter *adapter, int fidx)
1270 {
1271         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1272         struct sk_buff *skb;
1273         struct fw_filter_wr *fwr;
1274         unsigned int ftid;
1275
1276         /* If the new filter requires loopback Destination MAC and/or VLAN
1277          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1278          * the filter.
1279          */
1280         if (f->fs.newdmac || f->fs.newvlan) {
1281                 /* allocate L2T entry for new filter */
1282                 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1283                 if (f->l2t == NULL)
1284                         return -EAGAIN;
1285                 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1286                                         f->fs.eport, f->fs.dmac)) {
1287                         cxgb4_l2t_release(f->l2t);
1288                         f->l2t = NULL;
1289                         return -ENOMEM;
1290                 }
1291         }
1292
1293         ftid = adapter->tids.ftid_base + fidx;
1294
1295         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1296         fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1297         memset(fwr, 0, sizeof(*fwr));
1298
1299         /* It would be nice to put most of the following in t4_hw.c but most
1300          * of the work is translating the cxgbtool ch_filter_specification
1301          * into the Work Request and the definition of that structure is
1302          * currently in cxgbtool.h which isn't appropriate to pull into the
1303          * common code.  We may eventually try to come up with a more neutral
1304          * filter specification structure but for now it's easiest to simply
1305          * put this fairly direct code in line ...
1306          */
1307         fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1308         fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1309         fwr->tid_to_iq =
1310                 htonl(V_FW_FILTER_WR_TID(ftid) |
1311                       V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1312                       V_FW_FILTER_WR_NOREPLY(0) |
1313                       V_FW_FILTER_WR_IQ(f->fs.iq));
1314         fwr->del_filter_to_l2tix =
1315                 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1316                       V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1317                       V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1318                       V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1319                       V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1320                       V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1321                       V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1322                       V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1323                       V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1324                                              f->fs.newvlan == VLAN_REWRITE) |
1325                       V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1326                                             f->fs.newvlan == VLAN_REWRITE) |
1327                       V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1328                       V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1329                       V_FW_FILTER_WR_PRIO(f->fs.prio) |
1330                       V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1331         fwr->ethtype = htons(f->fs.val.ethtype);
1332         fwr->ethtypem = htons(f->fs.mask.ethtype);
1333         fwr->frag_to_ovlan_vldm =
1334                 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1335                  V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1336                  V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1337                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1338                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1339                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1340         fwr->smac_sel = 0;
1341         fwr->rx_chan_rx_rpl_iq =
1342                 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1343                       V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1344         fwr->maci_to_matchtypem =
1345                 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1346                       V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1347                       V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1348                       V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1349                       V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1350                       V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1351                       V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1352                       V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1353         fwr->ptcl = f->fs.val.proto;
1354         fwr->ptclm = f->fs.mask.proto;
1355         fwr->ttyp = f->fs.val.tos;
1356         fwr->ttypm = f->fs.mask.tos;
1357         fwr->ivlan = htons(f->fs.val.ivlan);
1358         fwr->ivlanm = htons(f->fs.mask.ivlan);
1359         fwr->ovlan = htons(f->fs.val.ovlan);
1360         fwr->ovlanm = htons(f->fs.mask.ovlan);
1361         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1362         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1363         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1364         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1365         fwr->lp = htons(f->fs.val.lport);
1366         fwr->lpm = htons(f->fs.mask.lport);
1367         fwr->fp = htons(f->fs.val.fport);
1368         fwr->fpm = htons(f->fs.mask.fport);
1369         if (f->fs.newsmac)
1370                 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1371
1372         /* Mark the filter as "pending" and ship off the Filter Work Request.
1373          * When we get the Work Request Reply we'll clear the pending status.
1374          */
1375         f->pending = 1;
1376         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1377         t4_ofld_send(adapter, skb);
1378         return 0;
1379 }
1380
1381 /* Delete the filter at a specified index.
1382  */
1383 static int del_filter_wr(struct adapter *adapter, int fidx)
1384 {
1385         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1386         struct sk_buff *skb;
1387         struct fw_filter_wr *fwr;
1388         unsigned int len, ftid;
1389
1390         len = sizeof(*fwr);
1391         ftid = adapter->tids.ftid_base + fidx;
1392
1393         skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1394         fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1395         t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1396
1397         /* Mark the filter as "pending" and ship off the Filter Work Request.
1398          * When we get the Work Request Reply we'll clear the pending status.
1399          */
1400         f->pending = 1;
1401         t4_mgmt_tx(adapter, skb);
1402         return 0;
1403 }
1404
1405 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1406                              void *accel_priv, select_queue_fallback_t fallback)
1407 {
1408         int txq;
1409
1410 #ifdef CONFIG_CHELSIO_T4_DCB
1411         /* If a Data Center Bridging has been successfully negotiated on this
1412          * link then we'll use the skb's priority to map it to a TX Queue.
1413          * The skb's priority is determined via the VLAN Tag Priority Code
1414          * Point field.
1415          */
1416         if (cxgb4_dcb_enabled(dev)) {
1417                 u16 vlan_tci;
1418                 int err;
1419
1420                 err = vlan_get_tag(skb, &vlan_tci);
1421                 if (unlikely(err)) {
1422                         if (net_ratelimit())
1423                                 netdev_warn(dev,
1424                                             "TX Packet without VLAN Tag on DCB Link\n");
1425                         txq = 0;
1426                 } else {
1427                         txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1428                 }
1429                 return txq;
1430         }
1431 #endif /* CONFIG_CHELSIO_T4_DCB */
1432
1433         if (select_queue) {
1434                 txq = (skb_rx_queue_recorded(skb)
1435                         ? skb_get_rx_queue(skb)
1436                         : smp_processor_id());
1437
1438                 while (unlikely(txq >= dev->real_num_tx_queues))
1439                         txq -= dev->real_num_tx_queues;
1440
1441                 return txq;
1442         }
1443
1444         return fallback(dev, skb) % dev->real_num_tx_queues;
1445 }
1446
1447 static inline int is_offload(const struct adapter *adap)
1448 {
1449         return adap->params.offload;
1450 }
1451
1452 /*
1453  * Implementation of ethtool operations.
1454  */
1455
1456 static u32 get_msglevel(struct net_device *dev)
1457 {
1458         return netdev2adap(dev)->msg_enable;
1459 }
1460
1461 static void set_msglevel(struct net_device *dev, u32 val)
1462 {
1463         netdev2adap(dev)->msg_enable = val;
1464 }
1465
1466 static char stats_strings[][ETH_GSTRING_LEN] = {
1467         "TxOctetsOK         ",
1468         "TxFramesOK         ",
1469         "TxBroadcastFrames  ",
1470         "TxMulticastFrames  ",
1471         "TxUnicastFrames    ",
1472         "TxErrorFrames      ",
1473
1474         "TxFrames64         ",
1475         "TxFrames65To127    ",
1476         "TxFrames128To255   ",
1477         "TxFrames256To511   ",
1478         "TxFrames512To1023  ",
1479         "TxFrames1024To1518 ",
1480         "TxFrames1519ToMax  ",
1481
1482         "TxFramesDropped    ",
1483         "TxPauseFrames      ",
1484         "TxPPP0Frames       ",
1485         "TxPPP1Frames       ",
1486         "TxPPP2Frames       ",
1487         "TxPPP3Frames       ",
1488         "TxPPP4Frames       ",
1489         "TxPPP5Frames       ",
1490         "TxPPP6Frames       ",
1491         "TxPPP7Frames       ",
1492
1493         "RxOctetsOK         ",
1494         "RxFramesOK         ",
1495         "RxBroadcastFrames  ",
1496         "RxMulticastFrames  ",
1497         "RxUnicastFrames    ",
1498
1499         "RxFramesTooLong    ",
1500         "RxJabberErrors     ",
1501         "RxFCSErrors        ",
1502         "RxLengthErrors     ",
1503         "RxSymbolErrors     ",
1504         "RxRuntFrames       ",
1505
1506         "RxFrames64         ",
1507         "RxFrames65To127    ",
1508         "RxFrames128To255   ",
1509         "RxFrames256To511   ",
1510         "RxFrames512To1023  ",
1511         "RxFrames1024To1518 ",
1512         "RxFrames1519ToMax  ",
1513
1514         "RxPauseFrames      ",
1515         "RxPPP0Frames       ",
1516         "RxPPP1Frames       ",
1517         "RxPPP2Frames       ",
1518         "RxPPP3Frames       ",
1519         "RxPPP4Frames       ",
1520         "RxPPP5Frames       ",
1521         "RxPPP6Frames       ",
1522         "RxPPP7Frames       ",
1523
1524         "RxBG0FramesDropped ",
1525         "RxBG1FramesDropped ",
1526         "RxBG2FramesDropped ",
1527         "RxBG3FramesDropped ",
1528         "RxBG0FramesTrunc   ",
1529         "RxBG1FramesTrunc   ",
1530         "RxBG2FramesTrunc   ",
1531         "RxBG3FramesTrunc   ",
1532
1533         "TSO                ",
1534         "TxCsumOffload      ",
1535         "RxCsumGood         ",
1536         "VLANextractions    ",
1537         "VLANinsertions     ",
1538         "GROpackets         ",
1539         "GROmerged          ",
1540         "WriteCoalSuccess   ",
1541         "WriteCoalFail      ",
1542 };
1543
1544 static int get_sset_count(struct net_device *dev, int sset)
1545 {
1546         switch (sset) {
1547         case ETH_SS_STATS:
1548                 return ARRAY_SIZE(stats_strings);
1549         default:
1550                 return -EOPNOTSUPP;
1551         }
1552 }
1553
1554 #define T4_REGMAP_SIZE (160 * 1024)
1555 #define T5_REGMAP_SIZE (332 * 1024)
1556
1557 static int get_regs_len(struct net_device *dev)
1558 {
1559         struct adapter *adap = netdev2adap(dev);
1560         if (is_t4(adap->params.chip))
1561                 return T4_REGMAP_SIZE;
1562         else
1563                 return T5_REGMAP_SIZE;
1564 }
1565
1566 static int get_eeprom_len(struct net_device *dev)
1567 {
1568         return EEPROMSIZE;
1569 }
1570
1571 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1572 {
1573         struct adapter *adapter = netdev2adap(dev);
1574
1575         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1576         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1577         strlcpy(info->bus_info, pci_name(adapter->pdev),
1578                 sizeof(info->bus_info));
1579
1580         if (adapter->params.fw_vers)
1581                 snprintf(info->fw_version, sizeof(info->fw_version),
1582                         "%u.%u.%u.%u, TP %u.%u.%u.%u",
1583                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1584                         FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1585                         FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1586                         FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1587                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1588                         FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1589                         FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1590                         FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1591 }
1592
1593 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1594 {
1595         if (stringset == ETH_SS_STATS)
1596                 memcpy(data, stats_strings, sizeof(stats_strings));
1597 }
1598
1599 /*
1600  * port stats maintained per queue of the port.  They should be in the same
1601  * order as in stats_strings above.
1602  */
1603 struct queue_port_stats {
1604         u64 tso;
1605         u64 tx_csum;
1606         u64 rx_csum;
1607         u64 vlan_ex;
1608         u64 vlan_ins;
1609         u64 gro_pkts;
1610         u64 gro_merged;
1611 };
1612
1613 static void collect_sge_port_stats(const struct adapter *adap,
1614                 const struct port_info *p, struct queue_port_stats *s)
1615 {
1616         int i;
1617         const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1618         const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1619
1620         memset(s, 0, sizeof(*s));
1621         for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1622                 s->tso += tx->tso;
1623                 s->tx_csum += tx->tx_cso;
1624                 s->rx_csum += rx->stats.rx_cso;
1625                 s->vlan_ex += rx->stats.vlan_ex;
1626                 s->vlan_ins += tx->vlan_ins;
1627                 s->gro_pkts += rx->stats.lro_pkts;
1628                 s->gro_merged += rx->stats.lro_merged;
1629         }
1630 }
1631
1632 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1633                       u64 *data)
1634 {
1635         struct port_info *pi = netdev_priv(dev);
1636         struct adapter *adapter = pi->adapter;
1637         u32 val1, val2;
1638
1639         t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1640
1641         data += sizeof(struct port_stats) / sizeof(u64);
1642         collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1643         data += sizeof(struct queue_port_stats) / sizeof(u64);
1644         if (!is_t4(adapter->params.chip)) {
1645                 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1646                 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1647                 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1648                 *data = val1 - val2;
1649                 data++;
1650                 *data = val2;
1651                 data++;
1652         } else {
1653                 memset(data, 0, 2 * sizeof(u64));
1654                 *data += 2;
1655         }
1656 }
1657
1658 /*
1659  * Return a version number to identify the type of adapter.  The scheme is:
1660  * - bits 0..9: chip version
1661  * - bits 10..15: chip revision
1662  * - bits 16..23: register dump version
1663  */
1664 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1665 {
1666         return CHELSIO_CHIP_VERSION(ap->params.chip) |
1667                 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1668 }
1669
1670 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1671                            unsigned int end)
1672 {
1673         u32 *p = buf + start;
1674
1675         for ( ; start <= end; start += sizeof(u32))
1676                 *p++ = t4_read_reg(ap, start);
1677 }
1678
1679 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1680                      void *buf)
1681 {
1682         static const unsigned int t4_reg_ranges[] = {
1683                 0x1008, 0x1108,
1684                 0x1180, 0x11b4,
1685                 0x11fc, 0x123c,
1686                 0x1300, 0x173c,
1687                 0x1800, 0x18fc,
1688                 0x3000, 0x30d8,
1689                 0x30e0, 0x5924,
1690                 0x5960, 0x59d4,
1691                 0x5a00, 0x5af8,
1692                 0x6000, 0x6098,
1693                 0x6100, 0x6150,
1694                 0x6200, 0x6208,
1695                 0x6240, 0x6248,
1696                 0x6280, 0x6338,
1697                 0x6370, 0x638c,
1698                 0x6400, 0x643c,
1699                 0x6500, 0x6524,
1700                 0x6a00, 0x6a38,
1701                 0x6a60, 0x6a78,
1702                 0x6b00, 0x6b84,
1703                 0x6bf0, 0x6c84,
1704                 0x6cf0, 0x6d84,
1705                 0x6df0, 0x6e84,
1706                 0x6ef0, 0x6f84,
1707                 0x6ff0, 0x7084,
1708                 0x70f0, 0x7184,
1709                 0x71f0, 0x7284,
1710                 0x72f0, 0x7384,
1711                 0x73f0, 0x7450,
1712                 0x7500, 0x7530,
1713                 0x7600, 0x761c,
1714                 0x7680, 0x76cc,
1715                 0x7700, 0x7798,
1716                 0x77c0, 0x77fc,
1717                 0x7900, 0x79fc,
1718                 0x7b00, 0x7c38,
1719                 0x7d00, 0x7efc,
1720                 0x8dc0, 0x8e1c,
1721                 0x8e30, 0x8e78,
1722                 0x8ea0, 0x8f6c,
1723                 0x8fc0, 0x9074,
1724                 0x90fc, 0x90fc,
1725                 0x9400, 0x9458,
1726                 0x9600, 0x96bc,
1727                 0x9800, 0x9808,
1728                 0x9820, 0x983c,
1729                 0x9850, 0x9864,
1730                 0x9c00, 0x9c6c,
1731                 0x9c80, 0x9cec,
1732                 0x9d00, 0x9d6c,
1733                 0x9d80, 0x9dec,
1734                 0x9e00, 0x9e6c,
1735                 0x9e80, 0x9eec,
1736                 0x9f00, 0x9f6c,
1737                 0x9f80, 0x9fec,
1738                 0xd004, 0xd03c,
1739                 0xdfc0, 0xdfe0,
1740                 0xe000, 0xea7c,
1741                 0xf000, 0x11190,
1742                 0x19040, 0x1906c,
1743                 0x19078, 0x19080,
1744                 0x1908c, 0x19124,
1745                 0x19150, 0x191b0,
1746                 0x191d0, 0x191e8,
1747                 0x19238, 0x1924c,
1748                 0x193f8, 0x19474,
1749                 0x19490, 0x194f8,
1750                 0x19800, 0x19f30,
1751                 0x1a000, 0x1a06c,
1752                 0x1a0b0, 0x1a120,
1753                 0x1a128, 0x1a138,
1754                 0x1a190, 0x1a1c4,
1755                 0x1a1fc, 0x1a1fc,
1756                 0x1e040, 0x1e04c,
1757                 0x1e284, 0x1e28c,
1758                 0x1e2c0, 0x1e2c0,
1759                 0x1e2e0, 0x1e2e0,
1760                 0x1e300, 0x1e384,
1761                 0x1e3c0, 0x1e3c8,
1762                 0x1e440, 0x1e44c,
1763                 0x1e684, 0x1e68c,
1764                 0x1e6c0, 0x1e6c0,
1765                 0x1e6e0, 0x1e6e0,
1766                 0x1e700, 0x1e784,
1767                 0x1e7c0, 0x1e7c8,
1768                 0x1e840, 0x1e84c,
1769                 0x1ea84, 0x1ea8c,
1770                 0x1eac0, 0x1eac0,
1771                 0x1eae0, 0x1eae0,
1772                 0x1eb00, 0x1eb84,
1773                 0x1ebc0, 0x1ebc8,
1774                 0x1ec40, 0x1ec4c,
1775                 0x1ee84, 0x1ee8c,
1776                 0x1eec0, 0x1eec0,
1777                 0x1eee0, 0x1eee0,
1778                 0x1ef00, 0x1ef84,
1779                 0x1efc0, 0x1efc8,
1780                 0x1f040, 0x1f04c,
1781                 0x1f284, 0x1f28c,
1782                 0x1f2c0, 0x1f2c0,
1783                 0x1f2e0, 0x1f2e0,
1784                 0x1f300, 0x1f384,
1785                 0x1f3c0, 0x1f3c8,
1786                 0x1f440, 0x1f44c,
1787                 0x1f684, 0x1f68c,
1788                 0x1f6c0, 0x1f6c0,
1789                 0x1f6e0, 0x1f6e0,
1790                 0x1f700, 0x1f784,
1791                 0x1f7c0, 0x1f7c8,
1792                 0x1f840, 0x1f84c,
1793                 0x1fa84, 0x1fa8c,
1794                 0x1fac0, 0x1fac0,
1795                 0x1fae0, 0x1fae0,
1796                 0x1fb00, 0x1fb84,
1797                 0x1fbc0, 0x1fbc8,
1798                 0x1fc40, 0x1fc4c,
1799                 0x1fe84, 0x1fe8c,
1800                 0x1fec0, 0x1fec0,
1801                 0x1fee0, 0x1fee0,
1802                 0x1ff00, 0x1ff84,
1803                 0x1ffc0, 0x1ffc8,
1804                 0x20000, 0x2002c,
1805                 0x20100, 0x2013c,
1806                 0x20190, 0x201c8,
1807                 0x20200, 0x20318,
1808                 0x20400, 0x20528,
1809                 0x20540, 0x20614,
1810                 0x21000, 0x21040,
1811                 0x2104c, 0x21060,
1812                 0x210c0, 0x210ec,
1813                 0x21200, 0x21268,
1814                 0x21270, 0x21284,
1815                 0x212fc, 0x21388,
1816                 0x21400, 0x21404,
1817                 0x21500, 0x21518,
1818                 0x2152c, 0x2153c,
1819                 0x21550, 0x21554,
1820                 0x21600, 0x21600,
1821                 0x21608, 0x21628,
1822                 0x21630, 0x2163c,
1823                 0x21700, 0x2171c,
1824                 0x21780, 0x2178c,
1825                 0x21800, 0x21c38,
1826                 0x21c80, 0x21d7c,
1827                 0x21e00, 0x21e04,
1828                 0x22000, 0x2202c,
1829                 0x22100, 0x2213c,
1830                 0x22190, 0x221c8,
1831                 0x22200, 0x22318,
1832                 0x22400, 0x22528,
1833                 0x22540, 0x22614,
1834                 0x23000, 0x23040,
1835                 0x2304c, 0x23060,
1836                 0x230c0, 0x230ec,
1837                 0x23200, 0x23268,
1838                 0x23270, 0x23284,
1839                 0x232fc, 0x23388,
1840                 0x23400, 0x23404,
1841                 0x23500, 0x23518,
1842                 0x2352c, 0x2353c,
1843                 0x23550, 0x23554,
1844                 0x23600, 0x23600,
1845                 0x23608, 0x23628,
1846                 0x23630, 0x2363c,
1847                 0x23700, 0x2371c,
1848                 0x23780, 0x2378c,
1849                 0x23800, 0x23c38,
1850                 0x23c80, 0x23d7c,
1851                 0x23e00, 0x23e04,
1852                 0x24000, 0x2402c,
1853                 0x24100, 0x2413c,
1854                 0x24190, 0x241c8,
1855                 0x24200, 0x24318,
1856                 0x24400, 0x24528,
1857                 0x24540, 0x24614,
1858                 0x25000, 0x25040,
1859                 0x2504c, 0x25060,
1860                 0x250c0, 0x250ec,
1861                 0x25200, 0x25268,
1862                 0x25270, 0x25284,
1863                 0x252fc, 0x25388,
1864                 0x25400, 0x25404,
1865                 0x25500, 0x25518,
1866                 0x2552c, 0x2553c,
1867                 0x25550, 0x25554,
1868                 0x25600, 0x25600,
1869                 0x25608, 0x25628,
1870                 0x25630, 0x2563c,
1871                 0x25700, 0x2571c,
1872                 0x25780, 0x2578c,
1873                 0x25800, 0x25c38,
1874                 0x25c80, 0x25d7c,
1875                 0x25e00, 0x25e04,
1876                 0x26000, 0x2602c,
1877                 0x26100, 0x2613c,
1878                 0x26190, 0x261c8,
1879                 0x26200, 0x26318,
1880                 0x26400, 0x26528,
1881                 0x26540, 0x26614,
1882                 0x27000, 0x27040,
1883                 0x2704c, 0x27060,
1884                 0x270c0, 0x270ec,
1885                 0x27200, 0x27268,
1886                 0x27270, 0x27284,
1887                 0x272fc, 0x27388,
1888                 0x27400, 0x27404,
1889                 0x27500, 0x27518,
1890                 0x2752c, 0x2753c,
1891                 0x27550, 0x27554,
1892                 0x27600, 0x27600,
1893                 0x27608, 0x27628,
1894                 0x27630, 0x2763c,
1895                 0x27700, 0x2771c,
1896                 0x27780, 0x2778c,
1897                 0x27800, 0x27c38,
1898                 0x27c80, 0x27d7c,
1899                 0x27e00, 0x27e04
1900         };
1901
1902         static const unsigned int t5_reg_ranges[] = {
1903                 0x1008, 0x1148,
1904                 0x1180, 0x11b4,
1905                 0x11fc, 0x123c,
1906                 0x1280, 0x173c,
1907                 0x1800, 0x18fc,
1908                 0x3000, 0x3028,
1909                 0x3060, 0x30d8,
1910                 0x30e0, 0x30fc,
1911                 0x3140, 0x357c,
1912                 0x35a8, 0x35cc,
1913                 0x35ec, 0x35ec,
1914                 0x3600, 0x5624,
1915                 0x56cc, 0x575c,
1916                 0x580c, 0x5814,
1917                 0x5890, 0x58bc,
1918                 0x5940, 0x59dc,
1919                 0x59fc, 0x5a18,
1920                 0x5a60, 0x5a9c,
1921                 0x5b9c, 0x5bfc,
1922                 0x6000, 0x6040,
1923                 0x6058, 0x614c,
1924                 0x7700, 0x7798,
1925                 0x77c0, 0x78fc,
1926                 0x7b00, 0x7c54,
1927                 0x7d00, 0x7efc,
1928                 0x8dc0, 0x8de0,
1929                 0x8df8, 0x8e84,
1930                 0x8ea0, 0x8f84,
1931                 0x8fc0, 0x90f8,
1932                 0x9400, 0x9470,
1933                 0x9600, 0x96f4,
1934                 0x9800, 0x9808,
1935                 0x9820, 0x983c,
1936                 0x9850, 0x9864,
1937                 0x9c00, 0x9c6c,
1938                 0x9c80, 0x9cec,
1939                 0x9d00, 0x9d6c,
1940                 0x9d80, 0x9dec,
1941                 0x9e00, 0x9e6c,
1942                 0x9e80, 0x9eec,
1943                 0x9f00, 0x9f6c,
1944                 0x9f80, 0xa020,
1945                 0xd004, 0xd03c,
1946                 0xdfc0, 0xdfe0,
1947                 0xe000, 0x11088,
1948                 0x1109c, 0x1117c,
1949                 0x11190, 0x11204,
1950                 0x19040, 0x1906c,
1951                 0x19078, 0x19080,
1952                 0x1908c, 0x19124,
1953                 0x19150, 0x191b0,
1954                 0x191d0, 0x191e8,
1955                 0x19238, 0x19290,
1956                 0x193f8, 0x19474,
1957                 0x19490, 0x194cc,
1958                 0x194f0, 0x194f8,
1959                 0x19c00, 0x19c60,
1960                 0x19c94, 0x19e10,
1961                 0x19e50, 0x19f34,
1962                 0x19f40, 0x19f50,
1963                 0x19f90, 0x19fe4,
1964                 0x1a000, 0x1a06c,
1965                 0x1a0b0, 0x1a120,
1966                 0x1a128, 0x1a138,
1967                 0x1a190, 0x1a1c4,
1968                 0x1a1fc, 0x1a1fc,
1969                 0x1e008, 0x1e00c,
1970                 0x1e040, 0x1e04c,
1971                 0x1e284, 0x1e290,
1972                 0x1e2c0, 0x1e2c0,
1973                 0x1e2e0, 0x1e2e0,
1974                 0x1e300, 0x1e384,
1975                 0x1e3c0, 0x1e3c8,
1976                 0x1e408, 0x1e40c,
1977                 0x1e440, 0x1e44c,
1978                 0x1e684, 0x1e690,
1979                 0x1e6c0, 0x1e6c0,
1980                 0x1e6e0, 0x1e6e0,
1981                 0x1e700, 0x1e784,
1982                 0x1e7c0, 0x1e7c8,
1983                 0x1e808, 0x1e80c,
1984                 0x1e840, 0x1e84c,
1985                 0x1ea84, 0x1ea90,
1986                 0x1eac0, 0x1eac0,
1987                 0x1eae0, 0x1eae0,
1988                 0x1eb00, 0x1eb84,
1989                 0x1ebc0, 0x1ebc8,
1990                 0x1ec08, 0x1ec0c,
1991                 0x1ec40, 0x1ec4c,
1992                 0x1ee84, 0x1ee90,
1993                 0x1eec0, 0x1eec0,
1994                 0x1eee0, 0x1eee0,
1995                 0x1ef00, 0x1ef84,
1996                 0x1efc0, 0x1efc8,
1997                 0x1f008, 0x1f00c,
1998                 0x1f040, 0x1f04c,
1999                 0x1f284, 0x1f290,
2000                 0x1f2c0, 0x1f2c0,
2001                 0x1f2e0, 0x1f2e0,
2002                 0x1f300, 0x1f384,
2003                 0x1f3c0, 0x1f3c8,
2004                 0x1f408, 0x1f40c,
2005                 0x1f440, 0x1f44c,
2006                 0x1f684, 0x1f690,
2007                 0x1f6c0, 0x1f6c0,
2008                 0x1f6e0, 0x1f6e0,
2009                 0x1f700, 0x1f784,
2010                 0x1f7c0, 0x1f7c8,
2011                 0x1f808, 0x1f80c,
2012                 0x1f840, 0x1f84c,
2013                 0x1fa84, 0x1fa90,
2014                 0x1fac0, 0x1fac0,
2015                 0x1fae0, 0x1fae0,
2016                 0x1fb00, 0x1fb84,
2017                 0x1fbc0, 0x1fbc8,
2018                 0x1fc08, 0x1fc0c,
2019                 0x1fc40, 0x1fc4c,
2020                 0x1fe84, 0x1fe90,
2021                 0x1fec0, 0x1fec0,
2022                 0x1fee0, 0x1fee0,
2023                 0x1ff00, 0x1ff84,
2024                 0x1ffc0, 0x1ffc8,
2025                 0x30000, 0x30030,
2026                 0x30100, 0x30144,
2027                 0x30190, 0x301d0,
2028                 0x30200, 0x30318,
2029                 0x30400, 0x3052c,
2030                 0x30540, 0x3061c,
2031                 0x30800, 0x30834,
2032                 0x308c0, 0x30908,
2033                 0x30910, 0x309ac,
2034                 0x30a00, 0x30a04,
2035                 0x30a0c, 0x30a2c,
2036                 0x30a44, 0x30a50,
2037                 0x30a74, 0x30c24,
2038                 0x30d08, 0x30d14,
2039                 0x30d1c, 0x30d20,
2040                 0x30d3c, 0x30d50,
2041                 0x31200, 0x3120c,
2042                 0x31220, 0x31220,
2043                 0x31240, 0x31240,
2044                 0x31600, 0x31600,
2045                 0x31608, 0x3160c,
2046                 0x31a00, 0x31a1c,
2047                 0x31e04, 0x31e20,
2048                 0x31e38, 0x31e3c,
2049                 0x31e80, 0x31e80,
2050                 0x31e88, 0x31ea8,
2051                 0x31eb0, 0x31eb4,
2052                 0x31ec8, 0x31ed4,
2053                 0x31fb8, 0x32004,
2054                 0x32208, 0x3223c,
2055                 0x32600, 0x32630,
2056                 0x32a00, 0x32abc,
2057                 0x32b00, 0x32b70,
2058                 0x33000, 0x33048,
2059                 0x33060, 0x3309c,
2060                 0x330f0, 0x33148,
2061                 0x33160, 0x3319c,
2062                 0x331f0, 0x332e4,
2063                 0x332f8, 0x333e4,
2064                 0x333f8, 0x33448,
2065                 0x33460, 0x3349c,
2066                 0x334f0, 0x33548,
2067                 0x33560, 0x3359c,
2068                 0x335f0, 0x336e4,
2069                 0x336f8, 0x337e4,
2070                 0x337f8, 0x337fc,
2071                 0x33814, 0x33814,
2072                 0x3382c, 0x3382c,
2073                 0x33880, 0x3388c,
2074                 0x338e8, 0x338ec,
2075                 0x33900, 0x33948,
2076                 0x33960, 0x3399c,
2077                 0x339f0, 0x33ae4,
2078                 0x33af8, 0x33b10,
2079                 0x33b28, 0x33b28,
2080                 0x33b3c, 0x33b50,
2081                 0x33bf0, 0x33c10,
2082                 0x33c28, 0x33c28,
2083                 0x33c3c, 0x33c50,
2084                 0x33cf0, 0x33cfc,
2085                 0x34000, 0x34030,
2086                 0x34100, 0x34144,
2087                 0x34190, 0x341d0,
2088                 0x34200, 0x34318,
2089                 0x34400, 0x3452c,
2090                 0x34540, 0x3461c,
2091                 0x34800, 0x34834,
2092                 0x348c0, 0x34908,
2093                 0x34910, 0x349ac,
2094                 0x34a00, 0x34a04,
2095                 0x34a0c, 0x34a2c,
2096                 0x34a44, 0x34a50,
2097                 0x34a74, 0x34c24,
2098                 0x34d08, 0x34d14,
2099                 0x34d1c, 0x34d20,
2100                 0x34d3c, 0x34d50,
2101                 0x35200, 0x3520c,
2102                 0x35220, 0x35220,
2103                 0x35240, 0x35240,
2104                 0x35600, 0x35600,
2105                 0x35608, 0x3560c,
2106                 0x35a00, 0x35a1c,
2107                 0x35e04, 0x35e20,
2108                 0x35e38, 0x35e3c,
2109                 0x35e80, 0x35e80,
2110                 0x35e88, 0x35ea8,
2111                 0x35eb0, 0x35eb4,
2112                 0x35ec8, 0x35ed4,
2113                 0x35fb8, 0x36004,
2114                 0x36208, 0x3623c,
2115                 0x36600, 0x36630,
2116                 0x36a00, 0x36abc,
2117                 0x36b00, 0x36b70,
2118                 0x37000, 0x37048,
2119                 0x37060, 0x3709c,
2120                 0x370f0, 0x37148,
2121                 0x37160, 0x3719c,
2122                 0x371f0, 0x372e4,
2123                 0x372f8, 0x373e4,
2124                 0x373f8, 0x37448,
2125                 0x37460, 0x3749c,
2126                 0x374f0, 0x37548,
2127                 0x37560, 0x3759c,
2128                 0x375f0, 0x376e4,
2129                 0x376f8, 0x377e4,
2130                 0x377f8, 0x377fc,
2131                 0x37814, 0x37814,
2132                 0x3782c, 0x3782c,
2133                 0x37880, 0x3788c,
2134                 0x378e8, 0x378ec,
2135                 0x37900, 0x37948,
2136                 0x37960, 0x3799c,
2137                 0x379f0, 0x37ae4,
2138                 0x37af8, 0x37b10,
2139                 0x37b28, 0x37b28,
2140                 0x37b3c, 0x37b50,
2141                 0x37bf0, 0x37c10,
2142                 0x37c28, 0x37c28,
2143                 0x37c3c, 0x37c50,
2144                 0x37cf0, 0x37cfc,
2145                 0x38000, 0x38030,
2146                 0x38100, 0x38144,
2147                 0x38190, 0x381d0,
2148                 0x38200, 0x38318,
2149                 0x38400, 0x3852c,
2150                 0x38540, 0x3861c,
2151                 0x38800, 0x38834,
2152                 0x388c0, 0x38908,
2153                 0x38910, 0x389ac,
2154                 0x38a00, 0x38a04,
2155                 0x38a0c, 0x38a2c,
2156                 0x38a44, 0x38a50,
2157                 0x38a74, 0x38c24,
2158                 0x38d08, 0x38d14,
2159                 0x38d1c, 0x38d20,
2160                 0x38d3c, 0x38d50,
2161                 0x39200, 0x3920c,
2162                 0x39220, 0x39220,
2163                 0x39240, 0x39240,
2164                 0x39600, 0x39600,
2165                 0x39608, 0x3960c,
2166                 0x39a00, 0x39a1c,
2167                 0x39e04, 0x39e20,
2168                 0x39e38, 0x39e3c,
2169                 0x39e80, 0x39e80,
2170                 0x39e88, 0x39ea8,
2171                 0x39eb0, 0x39eb4,
2172                 0x39ec8, 0x39ed4,
2173                 0x39fb8, 0x3a004,
2174                 0x3a208, 0x3a23c,
2175                 0x3a600, 0x3a630,
2176                 0x3aa00, 0x3aabc,
2177                 0x3ab00, 0x3ab70,
2178                 0x3b000, 0x3b048,
2179                 0x3b060, 0x3b09c,
2180                 0x3b0f0, 0x3b148,
2181                 0x3b160, 0x3b19c,
2182                 0x3b1f0, 0x3b2e4,
2183                 0x3b2f8, 0x3b3e4,
2184                 0x3b3f8, 0x3b448,
2185                 0x3b460, 0x3b49c,
2186                 0x3b4f0, 0x3b548,
2187                 0x3b560, 0x3b59c,
2188                 0x3b5f0, 0x3b6e4,
2189                 0x3b6f8, 0x3b7e4,
2190                 0x3b7f8, 0x3b7fc,
2191                 0x3b814, 0x3b814,
2192                 0x3b82c, 0x3b82c,
2193                 0x3b880, 0x3b88c,
2194                 0x3b8e8, 0x3b8ec,
2195                 0x3b900, 0x3b948,
2196                 0x3b960, 0x3b99c,
2197                 0x3b9f0, 0x3bae4,
2198                 0x3baf8, 0x3bb10,
2199                 0x3bb28, 0x3bb28,
2200                 0x3bb3c, 0x3bb50,
2201                 0x3bbf0, 0x3bc10,
2202                 0x3bc28, 0x3bc28,
2203                 0x3bc3c, 0x3bc50,
2204                 0x3bcf0, 0x3bcfc,
2205                 0x3c000, 0x3c030,
2206                 0x3c100, 0x3c144,
2207                 0x3c190, 0x3c1d0,
2208                 0x3c200, 0x3c318,
2209                 0x3c400, 0x3c52c,
2210                 0x3c540, 0x3c61c,
2211                 0x3c800, 0x3c834,
2212                 0x3c8c0, 0x3c908,
2213                 0x3c910, 0x3c9ac,
2214                 0x3ca00, 0x3ca04,
2215                 0x3ca0c, 0x3ca2c,
2216                 0x3ca44, 0x3ca50,
2217                 0x3ca74, 0x3cc24,
2218                 0x3cd08, 0x3cd14,
2219                 0x3cd1c, 0x3cd20,
2220                 0x3cd3c, 0x3cd50,
2221                 0x3d200, 0x3d20c,
2222                 0x3d220, 0x3d220,
2223                 0x3d240, 0x3d240,
2224                 0x3d600, 0x3d600,
2225                 0x3d608, 0x3d60c,
2226                 0x3da00, 0x3da1c,
2227                 0x3de04, 0x3de20,
2228                 0x3de38, 0x3de3c,
2229                 0x3de80, 0x3de80,
2230                 0x3de88, 0x3dea8,
2231                 0x3deb0, 0x3deb4,
2232                 0x3dec8, 0x3ded4,
2233                 0x3dfb8, 0x3e004,
2234                 0x3e208, 0x3e23c,
2235                 0x3e600, 0x3e630,
2236                 0x3ea00, 0x3eabc,
2237                 0x3eb00, 0x3eb70,
2238                 0x3f000, 0x3f048,
2239                 0x3f060, 0x3f09c,
2240                 0x3f0f0, 0x3f148,
2241                 0x3f160, 0x3f19c,
2242                 0x3f1f0, 0x3f2e4,
2243                 0x3f2f8, 0x3f3e4,
2244                 0x3f3f8, 0x3f448,
2245                 0x3f460, 0x3f49c,
2246                 0x3f4f0, 0x3f548,
2247                 0x3f560, 0x3f59c,
2248                 0x3f5f0, 0x3f6e4,
2249                 0x3f6f8, 0x3f7e4,
2250                 0x3f7f8, 0x3f7fc,
2251                 0x3f814, 0x3f814,
2252                 0x3f82c, 0x3f82c,
2253                 0x3f880, 0x3f88c,
2254                 0x3f8e8, 0x3f8ec,
2255                 0x3f900, 0x3f948,
2256                 0x3f960, 0x3f99c,
2257                 0x3f9f0, 0x3fae4,
2258                 0x3faf8, 0x3fb10,
2259                 0x3fb28, 0x3fb28,
2260                 0x3fb3c, 0x3fb50,
2261                 0x3fbf0, 0x3fc10,
2262                 0x3fc28, 0x3fc28,
2263                 0x3fc3c, 0x3fc50,
2264                 0x3fcf0, 0x3fcfc,
2265                 0x40000, 0x4000c,
2266                 0x40040, 0x40068,
2267                 0x40080, 0x40144,
2268                 0x40180, 0x4018c,
2269                 0x40200, 0x40298,
2270                 0x402ac, 0x4033c,
2271                 0x403f8, 0x403fc,
2272                 0x41304, 0x413c4,
2273                 0x41400, 0x4141c,
2274                 0x41480, 0x414d0,
2275                 0x44000, 0x44078,
2276                 0x440c0, 0x44278,
2277                 0x442c0, 0x44478,
2278                 0x444c0, 0x44678,
2279                 0x446c0, 0x44878,
2280                 0x448c0, 0x449fc,
2281                 0x45000, 0x45068,
2282                 0x45080, 0x45084,
2283                 0x450a0, 0x450b0,
2284                 0x45200, 0x45268,
2285                 0x45280, 0x45284,
2286                 0x452a0, 0x452b0,
2287                 0x460c0, 0x460e4,
2288                 0x47000, 0x4708c,
2289                 0x47200, 0x47250,
2290                 0x47400, 0x47420,
2291                 0x47600, 0x47618,
2292                 0x47800, 0x47814,
2293                 0x48000, 0x4800c,
2294                 0x48040, 0x48068,
2295                 0x48080, 0x48144,
2296                 0x48180, 0x4818c,
2297                 0x48200, 0x48298,
2298                 0x482ac, 0x4833c,
2299                 0x483f8, 0x483fc,
2300                 0x49304, 0x493c4,
2301                 0x49400, 0x4941c,
2302                 0x49480, 0x494d0,
2303                 0x4c000, 0x4c078,
2304                 0x4c0c0, 0x4c278,
2305                 0x4c2c0, 0x4c478,
2306                 0x4c4c0, 0x4c678,
2307                 0x4c6c0, 0x4c878,
2308                 0x4c8c0, 0x4c9fc,
2309                 0x4d000, 0x4d068,
2310                 0x4d080, 0x4d084,
2311                 0x4d0a0, 0x4d0b0,
2312                 0x4d200, 0x4d268,
2313                 0x4d280, 0x4d284,
2314                 0x4d2a0, 0x4d2b0,
2315                 0x4e0c0, 0x4e0e4,
2316                 0x4f000, 0x4f08c,
2317                 0x4f200, 0x4f250,
2318                 0x4f400, 0x4f420,
2319                 0x4f600, 0x4f618,
2320                 0x4f800, 0x4f814,
2321                 0x50000, 0x500cc,
2322                 0x50400, 0x50400,
2323                 0x50800, 0x508cc,
2324                 0x50c00, 0x50c00,
2325                 0x51000, 0x5101c,
2326                 0x51300, 0x51308,
2327         };
2328
2329         int i;
2330         struct adapter *ap = netdev2adap(dev);
2331         static const unsigned int *reg_ranges;
2332         int arr_size = 0, buf_size = 0;
2333
2334         if (is_t4(ap->params.chip)) {
2335                 reg_ranges = &t4_reg_ranges[0];
2336                 arr_size = ARRAY_SIZE(t4_reg_ranges);
2337                 buf_size = T4_REGMAP_SIZE;
2338         } else {
2339                 reg_ranges = &t5_reg_ranges[0];
2340                 arr_size = ARRAY_SIZE(t5_reg_ranges);
2341                 buf_size = T5_REGMAP_SIZE;
2342         }
2343
2344         regs->version = mk_adap_vers(ap);
2345
2346         memset(buf, 0, buf_size);
2347         for (i = 0; i < arr_size; i += 2)
2348                 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2349 }
2350
2351 static int restart_autoneg(struct net_device *dev)
2352 {
2353         struct port_info *p = netdev_priv(dev);
2354
2355         if (!netif_running(dev))
2356                 return -EAGAIN;
2357         if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2358                 return -EINVAL;
2359         t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2360         return 0;
2361 }
2362
2363 static int identify_port(struct net_device *dev,
2364                          enum ethtool_phys_id_state state)
2365 {
2366         unsigned int val;
2367         struct adapter *adap = netdev2adap(dev);
2368
2369         if (state == ETHTOOL_ID_ACTIVE)
2370                 val = 0xffff;
2371         else if (state == ETHTOOL_ID_INACTIVE)
2372                 val = 0;
2373         else
2374                 return -EINVAL;
2375
2376         return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2377 }
2378
2379 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2380 {
2381         unsigned int v = 0;
2382
2383         if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2384             type == FW_PORT_TYPE_BT_XAUI) {
2385                 v |= SUPPORTED_TP;
2386                 if (caps & FW_PORT_CAP_SPEED_100M)
2387                         v |= SUPPORTED_100baseT_Full;
2388                 if (caps & FW_PORT_CAP_SPEED_1G)
2389                         v |= SUPPORTED_1000baseT_Full;
2390                 if (caps & FW_PORT_CAP_SPEED_10G)
2391                         v |= SUPPORTED_10000baseT_Full;
2392         } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2393                 v |= SUPPORTED_Backplane;
2394                 if (caps & FW_PORT_CAP_SPEED_1G)
2395                         v |= SUPPORTED_1000baseKX_Full;
2396                 if (caps & FW_PORT_CAP_SPEED_10G)
2397                         v |= SUPPORTED_10000baseKX4_Full;
2398         } else if (type == FW_PORT_TYPE_KR)
2399                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2400         else if (type == FW_PORT_TYPE_BP_AP)
2401                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2402                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2403         else if (type == FW_PORT_TYPE_BP4_AP)
2404                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2405                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2406                      SUPPORTED_10000baseKX4_Full;
2407         else if (type == FW_PORT_TYPE_FIBER_XFI ||
2408                  type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2409                 v |= SUPPORTED_FIBRE;
2410         else if (type == FW_PORT_TYPE_BP40_BA)
2411                 v |= SUPPORTED_40000baseSR4_Full;
2412
2413         if (caps & FW_PORT_CAP_ANEG)
2414                 v |= SUPPORTED_Autoneg;
2415         return v;
2416 }
2417
2418 static unsigned int to_fw_linkcaps(unsigned int caps)
2419 {
2420         unsigned int v = 0;
2421
2422         if (caps & ADVERTISED_100baseT_Full)
2423                 v |= FW_PORT_CAP_SPEED_100M;
2424         if (caps & ADVERTISED_1000baseT_Full)
2425                 v |= FW_PORT_CAP_SPEED_1G;
2426         if (caps & ADVERTISED_10000baseT_Full)
2427                 v |= FW_PORT_CAP_SPEED_10G;
2428         if (caps & ADVERTISED_40000baseSR4_Full)
2429                 v |= FW_PORT_CAP_SPEED_40G;
2430         return v;
2431 }
2432
2433 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2434 {
2435         const struct port_info *p = netdev_priv(dev);
2436
2437         if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2438             p->port_type == FW_PORT_TYPE_BT_XFI ||
2439             p->port_type == FW_PORT_TYPE_BT_XAUI)
2440                 cmd->port = PORT_TP;
2441         else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2442                  p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2443                 cmd->port = PORT_FIBRE;
2444         else if (p->port_type == FW_PORT_TYPE_SFP ||
2445                  p->port_type == FW_PORT_TYPE_QSFP_10G ||
2446                  p->port_type == FW_PORT_TYPE_QSFP) {
2447                 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2448                     p->mod_type == FW_PORT_MOD_TYPE_SR ||
2449                     p->mod_type == FW_PORT_MOD_TYPE_ER ||
2450                     p->mod_type == FW_PORT_MOD_TYPE_LRM)
2451                         cmd->port = PORT_FIBRE;
2452                 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2453                          p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2454                         cmd->port = PORT_DA;
2455                 else
2456                         cmd->port = PORT_OTHER;
2457         } else
2458                 cmd->port = PORT_OTHER;
2459
2460         if (p->mdio_addr >= 0) {
2461                 cmd->phy_address = p->mdio_addr;
2462                 cmd->transceiver = XCVR_EXTERNAL;
2463                 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2464                         MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2465         } else {
2466                 cmd->phy_address = 0;  /* not really, but no better option */
2467                 cmd->transceiver = XCVR_INTERNAL;
2468                 cmd->mdio_support = 0;
2469         }
2470
2471         cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2472         cmd->advertising = from_fw_linkcaps(p->port_type,
2473                                             p->link_cfg.advertising);
2474         ethtool_cmd_speed_set(cmd,
2475                               netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2476         cmd->duplex = DUPLEX_FULL;
2477         cmd->autoneg = p->link_cfg.autoneg;
2478         cmd->maxtxpkt = 0;
2479         cmd->maxrxpkt = 0;
2480         return 0;
2481 }
2482
2483 static unsigned int speed_to_caps(int speed)
2484 {
2485         if (speed == 100)
2486                 return FW_PORT_CAP_SPEED_100M;
2487         if (speed == 1000)
2488                 return FW_PORT_CAP_SPEED_1G;
2489         if (speed == 10000)
2490                 return FW_PORT_CAP_SPEED_10G;
2491         if (speed == 40000)
2492                 return FW_PORT_CAP_SPEED_40G;
2493         return 0;
2494 }
2495
2496 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2497 {
2498         unsigned int cap;
2499         struct port_info *p = netdev_priv(dev);
2500         struct link_config *lc = &p->link_cfg;
2501         u32 speed = ethtool_cmd_speed(cmd);
2502
2503         if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2504                 return -EINVAL;
2505
2506         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2507                 /*
2508                  * PHY offers a single speed.  See if that's what's
2509                  * being requested.
2510                  */
2511                 if (cmd->autoneg == AUTONEG_DISABLE &&
2512                     (lc->supported & speed_to_caps(speed)))
2513                         return 0;
2514                 return -EINVAL;
2515         }
2516
2517         if (cmd->autoneg == AUTONEG_DISABLE) {
2518                 cap = speed_to_caps(speed);
2519
2520                 if (!(lc->supported & cap) ||
2521                     (speed == 1000) ||
2522                     (speed == 10000) ||
2523                     (speed == 40000))
2524                         return -EINVAL;
2525                 lc->requested_speed = cap;
2526                 lc->advertising = 0;
2527         } else {
2528                 cap = to_fw_linkcaps(cmd->advertising);
2529                 if (!(lc->supported & cap))
2530                         return -EINVAL;
2531                 lc->requested_speed = 0;
2532                 lc->advertising = cap | FW_PORT_CAP_ANEG;
2533         }
2534         lc->autoneg = cmd->autoneg;
2535
2536         if (netif_running(dev))
2537                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2538                                      lc);
2539         return 0;
2540 }
2541
2542 static void get_pauseparam(struct net_device *dev,
2543                            struct ethtool_pauseparam *epause)
2544 {
2545         struct port_info *p = netdev_priv(dev);
2546
2547         epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2548         epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2549         epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2550 }
2551
2552 static int set_pauseparam(struct net_device *dev,
2553                           struct ethtool_pauseparam *epause)
2554 {
2555         struct port_info *p = netdev_priv(dev);
2556         struct link_config *lc = &p->link_cfg;
2557
2558         if (epause->autoneg == AUTONEG_DISABLE)
2559                 lc->requested_fc = 0;
2560         else if (lc->supported & FW_PORT_CAP_ANEG)
2561                 lc->requested_fc = PAUSE_AUTONEG;
2562         else
2563                 return -EINVAL;
2564
2565         if (epause->rx_pause)
2566                 lc->requested_fc |= PAUSE_RX;
2567         if (epause->tx_pause)
2568                 lc->requested_fc |= PAUSE_TX;
2569         if (netif_running(dev))
2570                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2571                                      lc);
2572         return 0;
2573 }
2574
2575 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2576 {
2577         const struct port_info *pi = netdev_priv(dev);
2578         const struct sge *s = &pi->adapter->sge;
2579
2580         e->rx_max_pending = MAX_RX_BUFFERS;
2581         e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2582         e->rx_jumbo_max_pending = 0;
2583         e->tx_max_pending = MAX_TXQ_ENTRIES;
2584
2585         e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2586         e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2587         e->rx_jumbo_pending = 0;
2588         e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2589 }
2590
2591 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2592 {
2593         int i;
2594         const struct port_info *pi = netdev_priv(dev);
2595         struct adapter *adapter = pi->adapter;
2596         struct sge *s = &adapter->sge;
2597
2598         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2599             e->tx_pending > MAX_TXQ_ENTRIES ||
2600             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2601             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2602             e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2603                 return -EINVAL;
2604
2605         if (adapter->flags & FULL_INIT_DONE)
2606                 return -EBUSY;
2607
2608         for (i = 0; i < pi->nqsets; ++i) {
2609                 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2610                 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2611                 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2612         }
2613         return 0;
2614 }
2615
2616 static int closest_timer(const struct sge *s, int time)
2617 {
2618         int i, delta, match = 0, min_delta = INT_MAX;
2619
2620         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2621                 delta = time - s->timer_val[i];
2622                 if (delta < 0)
2623                         delta = -delta;
2624                 if (delta < min_delta) {
2625                         min_delta = delta;
2626                         match = i;
2627                 }
2628         }
2629         return match;
2630 }
2631
2632 static int closest_thres(const struct sge *s, int thres)
2633 {
2634         int i, delta, match = 0, min_delta = INT_MAX;
2635
2636         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2637                 delta = thres - s->counter_val[i];
2638                 if (delta < 0)
2639                         delta = -delta;
2640                 if (delta < min_delta) {
2641                         min_delta = delta;
2642                         match = i;
2643                 }
2644         }
2645         return match;
2646 }
2647
2648 /*
2649  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2650  */
2651 static unsigned int qtimer_val(const struct adapter *adap,
2652                                const struct sge_rspq *q)
2653 {
2654         unsigned int idx = q->intr_params >> 1;
2655
2656         return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2657 }
2658
2659 /**
2660  *      set_rspq_intr_params - set a queue's interrupt holdoff parameters
2661  *      @q: the Rx queue
2662  *      @us: the hold-off time in us, or 0 to disable timer
2663  *      @cnt: the hold-off packet count, or 0 to disable counter
2664  *
2665  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
2666  *      one of the two needs to be enabled for the queue to generate interrupts.
2667  */
2668 static int set_rspq_intr_params(struct sge_rspq *q,
2669                                 unsigned int us, unsigned int cnt)
2670 {
2671         struct adapter *adap = q->adap;
2672
2673         if ((us | cnt) == 0)
2674                 cnt = 1;
2675
2676         if (cnt) {
2677                 int err;
2678                 u32 v, new_idx;
2679
2680                 new_idx = closest_thres(&adap->sge, cnt);
2681                 if (q->desc && q->pktcnt_idx != new_idx) {
2682                         /* the queue has already been created, update it */
2683                         v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2684                             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2685                             FW_PARAMS_PARAM_YZ(q->cntxt_id);
2686                         err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2687                                             &new_idx);
2688                         if (err)
2689                                 return err;
2690                 }
2691                 q->pktcnt_idx = new_idx;
2692         }
2693
2694         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2695         q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2696         return 0;
2697 }
2698
2699 /**
2700  * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2701  * @dev: the network device
2702  * @us: the hold-off time in us, or 0 to disable timer
2703  * @cnt: the hold-off packet count, or 0 to disable counter
2704  *
2705  * Set the RX interrupt hold-off parameters for a network device.
2706  */
2707 static int set_rx_intr_params(struct net_device *dev,
2708                               unsigned int us, unsigned int cnt)
2709 {
2710         int i, err;
2711         struct port_info *pi = netdev_priv(dev);
2712         struct adapter *adap = pi->adapter;
2713         struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2714
2715         for (i = 0; i < pi->nqsets; i++, q++) {
2716                 err = set_rspq_intr_params(&q->rspq, us, cnt);
2717                 if (err)
2718                         return err;
2719         }
2720         return 0;
2721 }
2722
2723 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2724 {
2725         return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2726                                   c->rx_max_coalesced_frames);
2727 }
2728
2729 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2730 {
2731         const struct port_info *pi = netdev_priv(dev);
2732         const struct adapter *adap = pi->adapter;
2733         const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2734
2735         c->rx_coalesce_usecs = qtimer_val(adap, rq);
2736         c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2737                 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2738         return 0;
2739 }
2740
2741 /**
2742  *      eeprom_ptov - translate a physical EEPROM address to virtual
2743  *      @phys_addr: the physical EEPROM address
2744  *      @fn: the PCI function number
2745  *      @sz: size of function-specific area
2746  *
2747  *      Translate a physical EEPROM address to virtual.  The first 1K is
2748  *      accessed through virtual addresses starting at 31K, the rest is
2749  *      accessed through virtual addresses starting at 0.
2750  *
2751  *      The mapping is as follows:
2752  *      [0..1K) -> [31K..32K)
2753  *      [1K..1K+A) -> [31K-A..31K)
2754  *      [1K+A..ES) -> [0..ES-A-1K)
2755  *
2756  *      where A = @fn * @sz, and ES = EEPROM size.
2757  */
2758 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2759 {
2760         fn *= sz;
2761         if (phys_addr < 1024)
2762                 return phys_addr + (31 << 10);
2763         if (phys_addr < 1024 + fn)
2764                 return 31744 - fn + phys_addr - 1024;
2765         if (phys_addr < EEPROMSIZE)
2766                 return phys_addr - 1024 - fn;
2767         return -EINVAL;
2768 }
2769
2770 /*
2771  * The next two routines implement eeprom read/write from physical addresses.
2772  */
2773 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2774 {
2775         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2776
2777         if (vaddr >= 0)
2778                 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2779         return vaddr < 0 ? vaddr : 0;
2780 }
2781
2782 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2783 {
2784         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2785
2786         if (vaddr >= 0)
2787                 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2788         return vaddr < 0 ? vaddr : 0;
2789 }
2790
2791 #define EEPROM_MAGIC 0x38E2F10C
2792
2793 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2794                       u8 *data)
2795 {
2796         int i, err = 0;
2797         struct adapter *adapter = netdev2adap(dev);
2798
2799         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2800         if (!buf)
2801                 return -ENOMEM;
2802
2803         e->magic = EEPROM_MAGIC;
2804         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2805                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2806
2807         if (!err)
2808                 memcpy(data, buf + e->offset, e->len);
2809         kfree(buf);
2810         return err;
2811 }
2812
2813 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2814                       u8 *data)
2815 {
2816         u8 *buf;
2817         int err = 0;
2818         u32 aligned_offset, aligned_len, *p;
2819         struct adapter *adapter = netdev2adap(dev);
2820
2821         if (eeprom->magic != EEPROM_MAGIC)
2822                 return -EINVAL;
2823
2824         aligned_offset = eeprom->offset & ~3;
2825         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2826
2827         if (adapter->fn > 0) {
2828                 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2829
2830                 if (aligned_offset < start ||
2831                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
2832                         return -EPERM;
2833         }
2834
2835         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2836                 /*
2837                  * RMW possibly needed for first or last words.
2838                  */
2839                 buf = kmalloc(aligned_len, GFP_KERNEL);
2840                 if (!buf)
2841                         return -ENOMEM;
2842                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2843                 if (!err && aligned_len > 4)
2844                         err = eeprom_rd_phys(adapter,
2845                                              aligned_offset + aligned_len - 4,
2846                                              (u32 *)&buf[aligned_len - 4]);
2847                 if (err)
2848                         goto out;
2849                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2850         } else
2851                 buf = data;
2852
2853         err = t4_seeprom_wp(adapter, false);
2854         if (err)
2855                 goto out;
2856
2857         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2858                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2859                 aligned_offset += 4;
2860         }
2861
2862         if (!err)
2863                 err = t4_seeprom_wp(adapter, true);
2864 out:
2865         if (buf != data)
2866                 kfree(buf);
2867         return err;
2868 }
2869
2870 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2871 {
2872         int ret;
2873         const struct firmware *fw;
2874         struct adapter *adap = netdev2adap(netdev);
2875
2876         ef->data[sizeof(ef->data) - 1] = '\0';
2877         ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2878         if (ret < 0)
2879                 return ret;
2880
2881         ret = t4_load_fw(adap, fw->data, fw->size);
2882         release_firmware(fw);
2883         if (!ret)
2884                 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2885         return ret;
2886 }
2887
2888 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2889 #define BCAST_CRC 0xa0ccc1a6
2890
2891 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2892 {
2893         wol->supported = WAKE_BCAST | WAKE_MAGIC;
2894         wol->wolopts = netdev2adap(dev)->wol;
2895         memset(&wol->sopass, 0, sizeof(wol->sopass));
2896 }
2897
2898 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2899 {
2900         int err = 0;
2901         struct port_info *pi = netdev_priv(dev);
2902
2903         if (wol->wolopts & ~WOL_SUPPORTED)
2904                 return -EINVAL;
2905         t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2906                             (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2907         if (wol->wolopts & WAKE_BCAST) {
2908                 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2909                                         ~0ULL, 0, false);
2910                 if (!err)
2911                         err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2912                                                 ~6ULL, ~0ULL, BCAST_CRC, true);
2913         } else
2914                 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2915         return err;
2916 }
2917
2918 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2919 {
2920         const struct port_info *pi = netdev_priv(dev);
2921         netdev_features_t changed = dev->features ^ features;
2922         int err;
2923
2924         if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2925                 return 0;
2926
2927         err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2928                             -1, -1, -1,
2929                             !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2930         if (unlikely(err))
2931                 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2932         return err;
2933 }
2934
2935 static u32 get_rss_table_size(struct net_device *dev)
2936 {
2937         const struct port_info *pi = netdev_priv(dev);
2938
2939         return pi->rss_size;
2940 }
2941
2942 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2943 {
2944         const struct port_info *pi = netdev_priv(dev);
2945         unsigned int n = pi->rss_size;
2946
2947         while (n--)
2948                 p[n] = pi->rss[n];
2949         return 0;
2950 }
2951
2952 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2953 {
2954         unsigned int i;
2955         struct port_info *pi = netdev_priv(dev);
2956
2957         for (i = 0; i < pi->rss_size; i++)
2958                 pi->rss[i] = p[i];
2959         if (pi->adapter->flags & FULL_INIT_DONE)
2960                 return write_rss(pi, pi->rss);
2961         return 0;
2962 }
2963
2964 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2965                      u32 *rules)
2966 {
2967         const struct port_info *pi = netdev_priv(dev);
2968
2969         switch (info->cmd) {
2970         case ETHTOOL_GRXFH: {
2971                 unsigned int v = pi->rss_mode;
2972
2973                 info->data = 0;
2974                 switch (info->flow_type) {
2975                 case TCP_V4_FLOW:
2976                         if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2977                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2978                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2979                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2980                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2981                         break;
2982                 case UDP_V4_FLOW:
2983                         if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2984                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2985                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2986                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2987                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2988                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2989                         break;
2990                 case SCTP_V4_FLOW:
2991                 case AH_ESP_V4_FLOW:
2992                 case IPV4_FLOW:
2993                         if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2994                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2995                         break;
2996                 case TCP_V6_FLOW:
2997                         if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2998                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2999                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
3000                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3001                                 info->data = RXH_IP_SRC | RXH_IP_DST;
3002                         break;
3003                 case UDP_V6_FLOW:
3004                         if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3005                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3006                                 info->data = RXH_IP_SRC | RXH_IP_DST |
3007                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
3008                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3009                                 info->data = RXH_IP_SRC | RXH_IP_DST;
3010                         break;
3011                 case SCTP_V6_FLOW:
3012                 case AH_ESP_V6_FLOW:
3013                 case IPV6_FLOW:
3014                         if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3015                                 info->data = RXH_IP_SRC | RXH_IP_DST;
3016                         break;
3017                 }
3018                 return 0;
3019         }
3020         case ETHTOOL_GRXRINGS:
3021                 info->data = pi->nqsets;
3022                 return 0;
3023         }
3024         return -EOPNOTSUPP;
3025 }
3026
3027 static const struct ethtool_ops cxgb_ethtool_ops = {
3028         .get_settings      = get_settings,
3029         .set_settings      = set_settings,
3030         .get_drvinfo       = get_drvinfo,
3031         .get_msglevel      = get_msglevel,
3032         .set_msglevel      = set_msglevel,
3033         .get_ringparam     = get_sge_param,
3034         .set_ringparam     = set_sge_param,
3035         .get_coalesce      = get_coalesce,
3036         .set_coalesce      = set_coalesce,
3037         .get_eeprom_len    = get_eeprom_len,
3038         .get_eeprom        = get_eeprom,
3039         .set_eeprom        = set_eeprom,
3040         .get_pauseparam    = get_pauseparam,
3041         .set_pauseparam    = set_pauseparam,
3042         .get_link          = ethtool_op_get_link,
3043         .get_strings       = get_strings,
3044         .set_phys_id       = identify_port,
3045         .nway_reset        = restart_autoneg,
3046         .get_sset_count    = get_sset_count,
3047         .get_ethtool_stats = get_stats,
3048         .get_regs_len      = get_regs_len,
3049         .get_regs          = get_regs,
3050         .get_wol           = get_wol,
3051         .set_wol           = set_wol,
3052         .get_rxnfc         = get_rxnfc,
3053         .get_rxfh_indir_size = get_rss_table_size,
3054         .get_rxfh          = get_rss_table,
3055         .set_rxfh          = set_rss_table,
3056         .flash_device      = set_flash,
3057 };
3058
3059 /*
3060  * debugfs support
3061  */
3062 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3063                         loff_t *ppos)
3064 {
3065         loff_t pos = *ppos;
3066         loff_t avail = file_inode(file)->i_size;
3067         unsigned int mem = (uintptr_t)file->private_data & 3;
3068         struct adapter *adap = file->private_data - mem;
3069         __be32 *data;
3070         int ret;
3071
3072         if (pos < 0)
3073                 return -EINVAL;
3074         if (pos >= avail)
3075                 return 0;
3076         if (count > avail - pos)
3077                 count = avail - pos;
3078
3079         data = t4_alloc_mem(count);
3080         if (!data)
3081                 return -ENOMEM;
3082
3083         spin_lock(&adap->win0_lock);
3084         ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3085         spin_unlock(&adap->win0_lock);
3086         if (ret) {
3087                 t4_free_mem(data);
3088                 return ret;
3089         }
3090         ret = copy_to_user(buf, data, count);
3091
3092         t4_free_mem(data);
3093         if (ret)
3094                 return -EFAULT;
3095
3096         *ppos = pos + count;
3097         return count;
3098 }
3099
3100 static const struct file_operations mem_debugfs_fops = {
3101         .owner   = THIS_MODULE,
3102         .open    = simple_open,
3103         .read    = mem_read,
3104         .llseek  = default_llseek,
3105 };
3106
3107 static void add_debugfs_mem(struct adapter *adap, const char *name,
3108                             unsigned int idx, unsigned int size_mb)
3109 {
3110         struct dentry *de;
3111
3112         de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3113                                  (void *)adap + idx, &mem_debugfs_fops);
3114         if (de && de->d_inode)
3115                 de->d_inode->i_size = size_mb << 20;
3116 }
3117
3118 static int setup_debugfs(struct adapter *adap)
3119 {
3120         int i;
3121         u32 size;
3122
3123         if (IS_ERR_OR_NULL(adap->debugfs_root))
3124                 return -1;
3125
3126         i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3127         if (i & EDRAM0_ENABLE) {
3128                 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3129                 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3130         }
3131         if (i & EDRAM1_ENABLE) {
3132                 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3133                 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3134         }
3135         if (is_t4(adap->params.chip)) {
3136                 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3137                 if (i & EXT_MEM_ENABLE)
3138                         add_debugfs_mem(adap, "mc", MEM_MC,
3139                                         EXT_MEM_SIZE_GET(size));
3140         } else {
3141                 if (i & EXT_MEM_ENABLE) {
3142                         size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3143                         add_debugfs_mem(adap, "mc0", MEM_MC0,
3144                                         EXT_MEM_SIZE_GET(size));
3145                 }
3146                 if (i & EXT_MEM1_ENABLE) {
3147                         size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3148                         add_debugfs_mem(adap, "mc1", MEM_MC1,
3149                                         EXT_MEM_SIZE_GET(size));
3150                 }
3151         }
3152         if (adap->l2t)
3153                 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3154                                     &t4_l2t_fops);
3155         return 0;
3156 }
3157
3158 /*
3159  * upper-layer driver support
3160  */
3161
3162 /*
3163  * Allocate an active-open TID and set it to the supplied value.
3164  */
3165 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3166 {
3167         int atid = -1;
3168
3169         spin_lock_bh(&t->atid_lock);
3170         if (t->afree) {
3171                 union aopen_entry *p = t->afree;
3172
3173                 atid = (p - t->atid_tab) + t->atid_base;
3174                 t->afree = p->next;
3175                 p->data = data;
3176                 t->atids_in_use++;
3177         }
3178         spin_unlock_bh(&t->atid_lock);
3179         return atid;
3180 }
3181 EXPORT_SYMBOL(cxgb4_alloc_atid);
3182
3183 /*
3184  * Release an active-open TID.
3185  */
3186 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3187 {
3188         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3189
3190         spin_lock_bh(&t->atid_lock);
3191         p->next = t->afree;
3192         t->afree = p;
3193         t->atids_in_use--;
3194         spin_unlock_bh(&t->atid_lock);
3195 }
3196 EXPORT_SYMBOL(cxgb4_free_atid);
3197
3198 /*
3199  * Allocate a server TID and set it to the supplied value.
3200  */
3201 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3202 {
3203         int stid;
3204
3205         spin_lock_bh(&t->stid_lock);