86a3f42a3dcc01371db5996747b1764c3e09843c
[sfrench/cifs-2.6.git] / drivers / net / ethernet / ti / netcp_ethss.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Keystone GBE and XGBE subsystem code
4  *
5  * Copyright (C) 2014 Texas Instruments Incorporated
6  * Authors:     Sandeep Nair <sandeep_n@ti.com>
7  *              Sandeep Paulraj <s-paulraj@ti.com>
8  *              Cyril Chemparathy <cyril@ti.com>
9  *              Santosh Shilimkar <santosh.shilimkar@ti.com>
10  *              Wingman Kwok <w-kwok2@ti.com>
11  */
12
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/of_mdio.h>
16 #include <linux/of_net.h>
17 #include <linux/of_address.h>
18 #include <linux/if_vlan.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/net_tstamp.h>
21 #include <linux/ethtool.h>
22
23 #include "cpsw.h"
24 #include "cpsw_ale.h"
25 #include "netcp.h"
26 #include "cpts.h"
27
28 #define NETCP_DRIVER_NAME               "TI KeyStone Ethernet Driver"
29 #define NETCP_DRIVER_VERSION            "v1.0"
30
31 #define GBE_IDENT(reg)                  ((reg >> 16) & 0xffff)
32 #define GBE_MAJOR_VERSION(reg)          (reg >> 8 & 0x7)
33 #define GBE_MINOR_VERSION(reg)          (reg & 0xff)
34 #define GBE_RTL_VERSION(reg)            ((reg >> 11) & 0x1f)
35
36 /* 1G Ethernet SS defines */
37 #define GBE_MODULE_NAME                 "netcp-gbe"
38 #define GBE_SS_VERSION_14               0x4ed2
39
40 #define GBE_SS_REG_INDEX                0
41 #define GBE_SGMII34_REG_INDEX           1
42 #define GBE_SM_REG_INDEX                2
43 /* offset relative to base of GBE_SS_REG_INDEX */
44 #define GBE13_SGMII_MODULE_OFFSET       0x100
45 /* offset relative to base of GBE_SM_REG_INDEX */
46 #define GBE13_HOST_PORT_OFFSET          0x34
47 #define GBE13_SLAVE_PORT_OFFSET         0x60
48 #define GBE13_EMAC_OFFSET               0x100
49 #define GBE13_SLAVE_PORT2_OFFSET        0x200
50 #define GBE13_HW_STATS_OFFSET           0x300
51 #define GBE13_CPTS_OFFSET               0x500
52 #define GBE13_ALE_OFFSET                0x600
53 #define GBE13_HOST_PORT_NUM             0
54 #define GBE13_NUM_ALE_ENTRIES           1024
55
56 /* 1G Ethernet NU SS defines */
57 #define GBENU_MODULE_NAME               "netcp-gbenu"
58 #define GBE_SS_ID_NU                    0x4ee6
59 #define GBE_SS_ID_2U                    0x4ee8
60
61 #define IS_SS_ID_MU(d) \
62         ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
63          (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
64
65 #define IS_SS_ID_NU(d) \
66         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
67
68 #define IS_SS_ID_VER_14(d) \
69         (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
70 #define IS_SS_ID_2U(d) \
71         (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
72
73 #define GBENU_SS_REG_INDEX              0
74 #define GBENU_SM_REG_INDEX              1
75 #define GBENU_SGMII_MODULE_OFFSET       0x100
76 #define GBENU_HOST_PORT_OFFSET          0x1000
77 #define GBENU_SLAVE_PORT_OFFSET         0x2000
78 #define GBENU_EMAC_OFFSET               0x2330
79 #define GBENU_HW_STATS_OFFSET           0x1a000
80 #define GBENU_CPTS_OFFSET               0x1d000
81 #define GBENU_ALE_OFFSET                0x1e000
82 #define GBENU_HOST_PORT_NUM             0
83 #define GBENU_SGMII_MODULE_SIZE         0x100
84
85 /* 10G Ethernet SS defines */
86 #define XGBE_MODULE_NAME                "netcp-xgbe"
87 #define XGBE_SS_VERSION_10              0x4ee4
88
89 #define XGBE_SS_REG_INDEX               0
90 #define XGBE_SM_REG_INDEX               1
91 #define XGBE_SERDES_REG_INDEX           2
92
93 /* offset relative to base of XGBE_SS_REG_INDEX */
94 #define XGBE10_SGMII_MODULE_OFFSET      0x100
95 #define IS_SS_ID_XGBE(d)                ((d)->ss_version == XGBE_SS_VERSION_10)
96 /* offset relative to base of XGBE_SM_REG_INDEX */
97 #define XGBE10_HOST_PORT_OFFSET         0x34
98 #define XGBE10_SLAVE_PORT_OFFSET        0x64
99 #define XGBE10_EMAC_OFFSET              0x400
100 #define XGBE10_CPTS_OFFSET              0x600
101 #define XGBE10_ALE_OFFSET               0x700
102 #define XGBE10_HW_STATS_OFFSET          0x800
103 #define XGBE10_HOST_PORT_NUM            0
104 #define XGBE10_NUM_ALE_ENTRIES          2048
105
106 #define GBE_TIMER_INTERVAL                      (HZ / 2)
107
108 /* Soft reset register values */
109 #define SOFT_RESET_MASK                         BIT(0)
110 #define SOFT_RESET                              BIT(0)
111 #define DEVICE_EMACSL_RESET_POLL_COUNT          100
112 #define GMACSL_RET_WARN_RESET_INCOMPLETE        -2
113
114 #define MACSL_RX_ENABLE_CSF                     BIT(23)
115 #define MACSL_ENABLE_EXT_CTL                    BIT(18)
116 #define MACSL_XGMII_ENABLE                      BIT(13)
117 #define MACSL_XGIG_MODE                         BIT(8)
118 #define MACSL_GIG_MODE                          BIT(7)
119 #define MACSL_GMII_ENABLE                       BIT(5)
120 #define MACSL_FULLDUPLEX                        BIT(0)
121
122 #define GBE_CTL_P0_ENABLE                       BIT(2)
123 #define ETH_SW_CTL_P0_TX_CRC_REMOVE             BIT(13)
124 #define GBE13_REG_VAL_STAT_ENABLE_ALL           0xff
125 #define XGBE_REG_VAL_STAT_ENABLE_ALL            0xf
126 #define GBE_STATS_CD_SEL                        BIT(28)
127
128 #define GBE_PORT_MASK(x)                        (BIT(x) - 1)
129 #define GBE_MASK_NO_PORTS                       0
130
131 #define GBE_DEF_1G_MAC_CONTROL                                  \
132                 (MACSL_GIG_MODE | MACSL_GMII_ENABLE |           \
133                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
134
135 #define GBE_DEF_10G_MAC_CONTROL                         \
136                 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE |         \
137                  MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
138
139 #define GBE_STATSA_MODULE                       0
140 #define GBE_STATSB_MODULE                       1
141 #define GBE_STATSC_MODULE                       2
142 #define GBE_STATSD_MODULE                       3
143
144 #define GBENU_STATS0_MODULE                     0
145 #define GBENU_STATS1_MODULE                     1
146 #define GBENU_STATS2_MODULE                     2
147 #define GBENU_STATS3_MODULE                     3
148 #define GBENU_STATS4_MODULE                     4
149 #define GBENU_STATS5_MODULE                     5
150 #define GBENU_STATS6_MODULE                     6
151 #define GBENU_STATS7_MODULE                     7
152 #define GBENU_STATS8_MODULE                     8
153
154 #define XGBE_STATS0_MODULE                      0
155 #define XGBE_STATS1_MODULE                      1
156 #define XGBE_STATS2_MODULE                      2
157
158 /* s: 0-based slave_port */
159 #define SGMII_BASE(d, s) \
160         (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
161
162 #define GBE_TX_QUEUE                            648
163 #define GBE_TXHOOK_ORDER                        0
164 #define GBE_RXHOOK_ORDER                        0
165 #define GBE_DEFAULT_ALE_AGEOUT                  30
166 #define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
167 #define SLAVE_LINK_IS_RGMII(s) \
168         (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
169          ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
170 #define SLAVE_LINK_IS_SGMII(s) \
171         ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
172 #define NETCP_LINK_STATE_INVALID                -1
173
174 #define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
175                 offsetof(struct gbe##_##rb, rn)
176 #define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
177                 offsetof(struct gbenu##_##rb, rn)
178 #define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
179                 offsetof(struct xgbe##_##rb, rn)
180 #define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
181
182 #define HOST_TX_PRI_MAP_DEFAULT                 0x00000000
183
184 #if IS_ENABLED(CONFIG_TI_CPTS)
185 /* Px_TS_CTL register fields */
186 #define TS_RX_ANX_F_EN                          BIT(0)
187 #define TS_RX_VLAN_LT1_EN                       BIT(1)
188 #define TS_RX_VLAN_LT2_EN                       BIT(2)
189 #define TS_RX_ANX_D_EN                          BIT(3)
190 #define TS_TX_ANX_F_EN                          BIT(4)
191 #define TS_TX_VLAN_LT1_EN                       BIT(5)
192 #define TS_TX_VLAN_LT2_EN                       BIT(6)
193 #define TS_TX_ANX_D_EN                          BIT(7)
194 #define TS_LT2_EN                               BIT(8)
195 #define TS_RX_ANX_E_EN                          BIT(9)
196 #define TS_TX_ANX_E_EN                          BIT(10)
197 #define TS_MSG_TYPE_EN_SHIFT                    16
198 #define TS_MSG_TYPE_EN_MASK                     0xffff
199
200 /* Px_TS_SEQ_LTYPE register fields */
201 #define TS_SEQ_ID_OFS_SHIFT                     16
202 #define TS_SEQ_ID_OFS_MASK                      0x3f
203
204 /* Px_TS_CTL_LTYPE2 register fields */
205 #define TS_107                                  BIT(16)
206 #define TS_129                                  BIT(17)
207 #define TS_130                                  BIT(18)
208 #define TS_131                                  BIT(19)
209 #define TS_132                                  BIT(20)
210 #define TS_319                                  BIT(21)
211 #define TS_320                                  BIT(22)
212 #define TS_TTL_NONZERO                          BIT(23)
213 #define TS_UNI_EN                               BIT(24)
214 #define TS_UNI_EN_SHIFT                         24
215
216 #define TS_TX_ANX_ALL_EN         \
217         (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
218
219 #define TS_RX_ANX_ALL_EN         \
220         (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
221
222 #define TS_CTL_DST_PORT                         TS_319
223 #define TS_CTL_DST_PORT_SHIFT                   21
224
225 #define TS_CTL_MADDR_ALL        \
226         (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
227
228 #define TS_CTL_MADDR_SHIFT                      16
229
230 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
231 #define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
232 #endif /* CONFIG_TI_CPTS */
233
234 struct xgbe_ss_regs {
235         u32     id_ver;
236         u32     synce_count;
237         u32     synce_mux;
238         u32     control;
239 };
240
241 struct xgbe_switch_regs {
242         u32     id_ver;
243         u32     control;
244         u32     emcontrol;
245         u32     stat_port_en;
246         u32     ptype;
247         u32     soft_idle;
248         u32     thru_rate;
249         u32     gap_thresh;
250         u32     tx_start_wds;
251         u32     flow_control;
252         u32     cppi_thresh;
253 };
254
255 struct xgbe_port_regs {
256         u32     blk_cnt;
257         u32     port_vlan;
258         u32     tx_pri_map;
259         u32     sa_lo;
260         u32     sa_hi;
261         u32     ts_ctl;
262         u32     ts_seq_ltype;
263         u32     ts_vlan;
264         u32     ts_ctl_ltype2;
265         u32     ts_ctl2;
266         u32     control;
267 };
268
269 struct xgbe_host_port_regs {
270         u32     blk_cnt;
271         u32     port_vlan;
272         u32     tx_pri_map;
273         u32     src_id;
274         u32     rx_pri_map;
275         u32     rx_maxlen;
276 };
277
278 struct xgbe_emac_regs {
279         u32     id_ver;
280         u32     mac_control;
281         u32     mac_status;
282         u32     soft_reset;
283         u32     rx_maxlen;
284         u32     __reserved_0;
285         u32     rx_pause;
286         u32     tx_pause;
287         u32     em_control;
288         u32     __reserved_1;
289         u32     tx_gap;
290         u32     rsvd[4];
291 };
292
293 struct xgbe_host_hw_stats {
294         u32     rx_good_frames;
295         u32     rx_broadcast_frames;
296         u32     rx_multicast_frames;
297         u32     __rsvd_0[3];
298         u32     rx_oversized_frames;
299         u32     __rsvd_1;
300         u32     rx_undersized_frames;
301         u32     __rsvd_2;
302         u32     overrun_type4;
303         u32     overrun_type5;
304         u32     rx_bytes;
305         u32     tx_good_frames;
306         u32     tx_broadcast_frames;
307         u32     tx_multicast_frames;
308         u32     __rsvd_3[9];
309         u32     tx_bytes;
310         u32     tx_64byte_frames;
311         u32     tx_65_to_127byte_frames;
312         u32     tx_128_to_255byte_frames;
313         u32     tx_256_to_511byte_frames;
314         u32     tx_512_to_1023byte_frames;
315         u32     tx_1024byte_frames;
316         u32     net_bytes;
317         u32     rx_sof_overruns;
318         u32     rx_mof_overruns;
319         u32     rx_dma_overruns;
320 };
321
322 struct xgbe_hw_stats {
323         u32     rx_good_frames;
324         u32     rx_broadcast_frames;
325         u32     rx_multicast_frames;
326         u32     rx_pause_frames;
327         u32     rx_crc_errors;
328         u32     rx_align_code_errors;
329         u32     rx_oversized_frames;
330         u32     rx_jabber_frames;
331         u32     rx_undersized_frames;
332         u32     rx_fragments;
333         u32     overrun_type4;
334         u32     overrun_type5;
335         u32     rx_bytes;
336         u32     tx_good_frames;
337         u32     tx_broadcast_frames;
338         u32     tx_multicast_frames;
339         u32     tx_pause_frames;
340         u32     tx_deferred_frames;
341         u32     tx_collision_frames;
342         u32     tx_single_coll_frames;
343         u32     tx_mult_coll_frames;
344         u32     tx_excessive_collisions;
345         u32     tx_late_collisions;
346         u32     tx_underrun;
347         u32     tx_carrier_sense_errors;
348         u32     tx_bytes;
349         u32     tx_64byte_frames;
350         u32     tx_65_to_127byte_frames;
351         u32     tx_128_to_255byte_frames;
352         u32     tx_256_to_511byte_frames;
353         u32     tx_512_to_1023byte_frames;
354         u32     tx_1024byte_frames;
355         u32     net_bytes;
356         u32     rx_sof_overruns;
357         u32     rx_mof_overruns;
358         u32     rx_dma_overruns;
359 };
360
361 struct gbenu_ss_regs {
362         u32     id_ver;
363         u32     synce_count;            /* NU */
364         u32     synce_mux;              /* NU */
365         u32     control;                /* 2U */
366         u32     __rsvd_0[2];            /* 2U */
367         u32     rgmii_status;           /* 2U */
368         u32     ss_status;              /* 2U */
369 };
370
371 struct gbenu_switch_regs {
372         u32     id_ver;
373         u32     control;
374         u32     __rsvd_0[2];
375         u32     emcontrol;
376         u32     stat_port_en;
377         u32     ptype;                  /* NU */
378         u32     soft_idle;
379         u32     thru_rate;              /* NU */
380         u32     gap_thresh;             /* NU */
381         u32     tx_start_wds;           /* NU */
382         u32     eee_prescale;           /* 2U */
383         u32     tx_g_oflow_thresh_set;  /* NU */
384         u32     tx_g_oflow_thresh_clr;  /* NU */
385         u32     tx_g_buf_thresh_set_l;  /* NU */
386         u32     tx_g_buf_thresh_set_h;  /* NU */
387         u32     tx_g_buf_thresh_clr_l;  /* NU */
388         u32     tx_g_buf_thresh_clr_h;  /* NU */
389 };
390
391 struct gbenu_port_regs {
392         u32     __rsvd_0;
393         u32     control;
394         u32     max_blks;               /* 2U */
395         u32     mem_align1;
396         u32     blk_cnt;
397         u32     port_vlan;
398         u32     tx_pri_map;             /* NU */
399         u32     pri_ctl;                /* 2U */
400         u32     rx_pri_map;
401         u32     rx_maxlen;
402         u32     tx_blks_pri;            /* NU */
403         u32     __rsvd_1;
404         u32     idle2lpi;               /* 2U */
405         u32     lpi2idle;               /* 2U */
406         u32     eee_status;             /* 2U */
407         u32     __rsvd_2;
408         u32     __rsvd_3[176];          /* NU: more to add */
409         u32     __rsvd_4[2];
410         u32     sa_lo;
411         u32     sa_hi;
412         u32     ts_ctl;
413         u32     ts_seq_ltype;
414         u32     ts_vlan;
415         u32     ts_ctl_ltype2;
416         u32     ts_ctl2;
417 };
418
419 struct gbenu_host_port_regs {
420         u32     __rsvd_0;
421         u32     control;
422         u32     flow_id_offset;         /* 2U */
423         u32     __rsvd_1;
424         u32     blk_cnt;
425         u32     port_vlan;
426         u32     tx_pri_map;             /* NU */
427         u32     pri_ctl;
428         u32     rx_pri_map;
429         u32     rx_maxlen;
430         u32     tx_blks_pri;            /* NU */
431         u32     __rsvd_2;
432         u32     idle2lpi;               /* 2U */
433         u32     lpi2wake;               /* 2U */
434         u32     eee_status;             /* 2U */
435         u32     __rsvd_3;
436         u32     __rsvd_4[184];          /* NU */
437         u32     host_blks_pri;          /* NU */
438 };
439
440 struct gbenu_emac_regs {
441         u32     mac_control;
442         u32     mac_status;
443         u32     soft_reset;
444         u32     boff_test;
445         u32     rx_pause;
446         u32     __rsvd_0[11];           /* NU */
447         u32     tx_pause;
448         u32     __rsvd_1[11];           /* NU */
449         u32     em_control;
450         u32     tx_gap;
451 };
452
453 /* Some hw stat regs are applicable to slave port only.
454  * This is handled by gbenu_et_stats struct.  Also some
455  * are for SS version NU and some are for 2U.
456  */
457 struct gbenu_hw_stats {
458         u32     rx_good_frames;
459         u32     rx_broadcast_frames;
460         u32     rx_multicast_frames;
461         u32     rx_pause_frames;                /* slave */
462         u32     rx_crc_errors;
463         u32     rx_align_code_errors;           /* slave */
464         u32     rx_oversized_frames;
465         u32     rx_jabber_frames;               /* slave */
466         u32     rx_undersized_frames;
467         u32     rx_fragments;                   /* slave */
468         u32     ale_drop;
469         u32     ale_overrun_drop;
470         u32     rx_bytes;
471         u32     tx_good_frames;
472         u32     tx_broadcast_frames;
473         u32     tx_multicast_frames;
474         u32     tx_pause_frames;                /* slave */
475         u32     tx_deferred_frames;             /* slave */
476         u32     tx_collision_frames;            /* slave */
477         u32     tx_single_coll_frames;          /* slave */
478         u32     tx_mult_coll_frames;            /* slave */
479         u32     tx_excessive_collisions;        /* slave */
480         u32     tx_late_collisions;             /* slave */
481         u32     rx_ipg_error;                   /* slave 10G only */
482         u32     tx_carrier_sense_errors;        /* slave */
483         u32     tx_bytes;
484         u32     tx_64B_frames;
485         u32     tx_65_to_127B_frames;
486         u32     tx_128_to_255B_frames;
487         u32     tx_256_to_511B_frames;
488         u32     tx_512_to_1023B_frames;
489         u32     tx_1024B_frames;
490         u32     net_bytes;
491         u32     rx_bottom_fifo_drop;
492         u32     rx_port_mask_drop;
493         u32     rx_top_fifo_drop;
494         u32     ale_rate_limit_drop;
495         u32     ale_vid_ingress_drop;
496         u32     ale_da_eq_sa_drop;
497         u32     __rsvd_0[3];
498         u32     ale_unknown_ucast;
499         u32     ale_unknown_ucast_bytes;
500         u32     ale_unknown_mcast;
501         u32     ale_unknown_mcast_bytes;
502         u32     ale_unknown_bcast;
503         u32     ale_unknown_bcast_bytes;
504         u32     ale_pol_match;
505         u32     ale_pol_match_red;              /* NU */
506         u32     ale_pol_match_yellow;           /* NU */
507         u32     __rsvd_1[44];
508         u32     tx_mem_protect_err;
509         /* following NU only */
510         u32     tx_pri0;
511         u32     tx_pri1;
512         u32     tx_pri2;
513         u32     tx_pri3;
514         u32     tx_pri4;
515         u32     tx_pri5;
516         u32     tx_pri6;
517         u32     tx_pri7;
518         u32     tx_pri0_bcnt;
519         u32     tx_pri1_bcnt;
520         u32     tx_pri2_bcnt;
521         u32     tx_pri3_bcnt;
522         u32     tx_pri4_bcnt;
523         u32     tx_pri5_bcnt;
524         u32     tx_pri6_bcnt;
525         u32     tx_pri7_bcnt;
526         u32     tx_pri0_drop;
527         u32     tx_pri1_drop;
528         u32     tx_pri2_drop;
529         u32     tx_pri3_drop;
530         u32     tx_pri4_drop;
531         u32     tx_pri5_drop;
532         u32     tx_pri6_drop;
533         u32     tx_pri7_drop;
534         u32     tx_pri0_drop_bcnt;
535         u32     tx_pri1_drop_bcnt;
536         u32     tx_pri2_drop_bcnt;
537         u32     tx_pri3_drop_bcnt;
538         u32     tx_pri4_drop_bcnt;
539         u32     tx_pri5_drop_bcnt;
540         u32     tx_pri6_drop_bcnt;
541         u32     tx_pri7_drop_bcnt;
542 };
543
544 #define GBENU_HW_STATS_REG_MAP_SZ       0x200
545
546 struct gbe_ss_regs {
547         u32     id_ver;
548         u32     synce_count;
549         u32     synce_mux;
550 };
551
552 struct gbe_ss_regs_ofs {
553         u16     id_ver;
554         u16     control;
555         u16     rgmii_status; /* 2U */
556 };
557
558 struct gbe_switch_regs {
559         u32     id_ver;
560         u32     control;
561         u32     soft_reset;
562         u32     stat_port_en;
563         u32     ptype;
564         u32     soft_idle;
565         u32     thru_rate;
566         u32     gap_thresh;
567         u32     tx_start_wds;
568         u32     flow_control;
569 };
570
571 struct gbe_switch_regs_ofs {
572         u16     id_ver;
573         u16     control;
574         u16     soft_reset;
575         u16     emcontrol;
576         u16     stat_port_en;
577         u16     ptype;
578         u16     flow_control;
579 };
580
581 struct gbe_port_regs {
582         u32     max_blks;
583         u32     blk_cnt;
584         u32     port_vlan;
585         u32     tx_pri_map;
586         u32     sa_lo;
587         u32     sa_hi;
588         u32     ts_ctl;
589         u32     ts_seq_ltype;
590         u32     ts_vlan;
591         u32     ts_ctl_ltype2;
592         u32     ts_ctl2;
593 };
594
595 struct gbe_port_regs_ofs {
596         u16     port_vlan;
597         u16     tx_pri_map;
598         u16     rx_pri_map;
599         u16     sa_lo;
600         u16     sa_hi;
601         u16     ts_ctl;
602         u16     ts_seq_ltype;
603         u16     ts_vlan;
604         u16     ts_ctl_ltype2;
605         u16     ts_ctl2;
606         u16     rx_maxlen;      /* 2U, NU */
607 };
608
609 struct gbe_host_port_regs {
610         u32     src_id;
611         u32     port_vlan;
612         u32     rx_pri_map;
613         u32     rx_maxlen;
614 };
615
616 struct gbe_host_port_regs_ofs {
617         u16     port_vlan;
618         u16     tx_pri_map;
619         u16     rx_maxlen;
620 };
621
622 struct gbe_emac_regs {
623         u32     id_ver;
624         u32     mac_control;
625         u32     mac_status;
626         u32     soft_reset;
627         u32     rx_maxlen;
628         u32     __reserved_0;
629         u32     rx_pause;
630         u32     tx_pause;
631         u32     __reserved_1;
632         u32     rx_pri_map;
633         u32     rsvd[6];
634 };
635
636 struct gbe_emac_regs_ofs {
637         u16     mac_control;
638         u16     soft_reset;
639         u16     rx_maxlen;
640 };
641
642 struct gbe_hw_stats {
643         u32     rx_good_frames;
644         u32     rx_broadcast_frames;
645         u32     rx_multicast_frames;
646         u32     rx_pause_frames;
647         u32     rx_crc_errors;
648         u32     rx_align_code_errors;
649         u32     rx_oversized_frames;
650         u32     rx_jabber_frames;
651         u32     rx_undersized_frames;
652         u32     rx_fragments;
653         u32     __pad_0[2];
654         u32     rx_bytes;
655         u32     tx_good_frames;
656         u32     tx_broadcast_frames;
657         u32     tx_multicast_frames;
658         u32     tx_pause_frames;
659         u32     tx_deferred_frames;
660         u32     tx_collision_frames;
661         u32     tx_single_coll_frames;
662         u32     tx_mult_coll_frames;
663         u32     tx_excessive_collisions;
664         u32     tx_late_collisions;
665         u32     tx_underrun;
666         u32     tx_carrier_sense_errors;
667         u32     tx_bytes;
668         u32     tx_64byte_frames;
669         u32     tx_65_to_127byte_frames;
670         u32     tx_128_to_255byte_frames;
671         u32     tx_256_to_511byte_frames;
672         u32     tx_512_to_1023byte_frames;
673         u32     tx_1024byte_frames;
674         u32     net_bytes;
675         u32     rx_sof_overruns;
676         u32     rx_mof_overruns;
677         u32     rx_dma_overruns;
678 };
679
680 #define GBE_MAX_HW_STAT_MODS                    9
681 #define GBE_HW_STATS_REG_MAP_SZ                 0x100
682
683 struct ts_ctl {
684         int     uni;
685         u8      dst_port_map;
686         u8      maddr_map;
687         u8      ts_mcast_type;
688 };
689
690 struct gbe_slave {
691         void __iomem                    *port_regs;
692         void __iomem                    *emac_regs;
693         struct gbe_port_regs_ofs        port_regs_ofs;
694         struct gbe_emac_regs_ofs        emac_regs_ofs;
695         int                             slave_num; /* 0 based logical number */
696         int                             port_num;  /* actual port number */
697         atomic_t                        link_state;
698         bool                            open;
699         struct phy_device               *phy;
700         u32                             link_interface;
701         u32                             mac_control;
702         u8                              phy_port_t;
703         struct device_node              *node;
704         struct device_node              *phy_node;
705         struct ts_ctl                   ts_ctl;
706         struct list_head                slave_list;
707 };
708
709 struct gbe_priv {
710         struct device                   *dev;
711         struct netcp_device             *netcp_device;
712         struct timer_list               timer;
713         u32                             num_slaves;
714         u32                             ale_entries;
715         u32                             ale_ports;
716         bool                            enable_ale;
717         u8                              max_num_slaves;
718         u8                              max_num_ports; /* max_num_slaves + 1 */
719         u8                              num_stats_mods;
720         struct netcp_tx_pipe            tx_pipe;
721
722         int                             host_port;
723         u32                             rx_packet_max;
724         u32                             ss_version;
725         u32                             stats_en_mask;
726
727         void __iomem                    *ss_regs;
728         void __iomem                    *switch_regs;
729         void __iomem                    *host_port_regs;
730         void __iomem                    *ale_reg;
731         void __iomem                    *cpts_reg;
732         void __iomem                    *sgmii_port_regs;
733         void __iomem                    *sgmii_port34_regs;
734         void __iomem                    *xgbe_serdes_regs;
735         void __iomem                    *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
736
737         struct gbe_ss_regs_ofs          ss_regs_ofs;
738         struct gbe_switch_regs_ofs      switch_regs_ofs;
739         struct gbe_host_port_regs_ofs   host_port_regs_ofs;
740
741         struct cpsw_ale                 *ale;
742         unsigned int                    tx_queue_id;
743         const char                      *dma_chan_name;
744
745         struct list_head                gbe_intf_head;
746         struct list_head                secondary_slaves;
747         struct net_device               *dummy_ndev;
748
749         u64                             *hw_stats;
750         u32                             *hw_stats_prev;
751         const struct netcp_ethtool_stat *et_stats;
752         int                             num_et_stats;
753         /*  Lock for updating the hwstats */
754         spinlock_t                      hw_stats_lock;
755
756         int                             cpts_registered;
757         struct cpts                     *cpts;
758         int                             rx_ts_enabled;
759         int                             tx_ts_enabled;
760 };
761
762 struct gbe_intf {
763         struct net_device       *ndev;
764         struct device           *dev;
765         struct gbe_priv         *gbe_dev;
766         struct netcp_tx_pipe    tx_pipe;
767         struct gbe_slave        *slave;
768         struct list_head        gbe_intf_list;
769         unsigned long           active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
770 };
771
772 static struct netcp_module gbe_module;
773 static struct netcp_module xgbe_module;
774
775 /* Statistic management */
776 struct netcp_ethtool_stat {
777         char desc[ETH_GSTRING_LEN];
778         int type;
779         u32 size;
780         int offset;
781 };
782
783 #define GBE_STATSA_INFO(field)                                          \
784 {                                                                       \
785         "GBE_A:"#field, GBE_STATSA_MODULE,                              \
786         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
787         offsetof(struct gbe_hw_stats, field)                            \
788 }
789
790 #define GBE_STATSB_INFO(field)                                          \
791 {                                                                       \
792         "GBE_B:"#field, GBE_STATSB_MODULE,                              \
793         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
794         offsetof(struct gbe_hw_stats, field)                            \
795 }
796
797 #define GBE_STATSC_INFO(field)                                          \
798 {                                                                       \
799         "GBE_C:"#field, GBE_STATSC_MODULE,                              \
800         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
801         offsetof(struct gbe_hw_stats, field)                            \
802 }
803
804 #define GBE_STATSD_INFO(field)                                          \
805 {                                                                       \
806         "GBE_D:"#field, GBE_STATSD_MODULE,                              \
807         FIELD_SIZEOF(struct gbe_hw_stats, field),                       \
808         offsetof(struct gbe_hw_stats, field)                            \
809 }
810
811 static const struct netcp_ethtool_stat gbe13_et_stats[] = {
812         /* GBE module A */
813         GBE_STATSA_INFO(rx_good_frames),
814         GBE_STATSA_INFO(rx_broadcast_frames),
815         GBE_STATSA_INFO(rx_multicast_frames),
816         GBE_STATSA_INFO(rx_pause_frames),
817         GBE_STATSA_INFO(rx_crc_errors),
818         GBE_STATSA_INFO(rx_align_code_errors),
819         GBE_STATSA_INFO(rx_oversized_frames),
820         GBE_STATSA_INFO(rx_jabber_frames),
821         GBE_STATSA_INFO(rx_undersized_frames),
822         GBE_STATSA_INFO(rx_fragments),
823         GBE_STATSA_INFO(rx_bytes),
824         GBE_STATSA_INFO(tx_good_frames),
825         GBE_STATSA_INFO(tx_broadcast_frames),
826         GBE_STATSA_INFO(tx_multicast_frames),
827         GBE_STATSA_INFO(tx_pause_frames),
828         GBE_STATSA_INFO(tx_deferred_frames),
829         GBE_STATSA_INFO(tx_collision_frames),
830         GBE_STATSA_INFO(tx_single_coll_frames),
831         GBE_STATSA_INFO(tx_mult_coll_frames),
832         GBE_STATSA_INFO(tx_excessive_collisions),
833         GBE_STATSA_INFO(tx_late_collisions),
834         GBE_STATSA_INFO(tx_underrun),
835         GBE_STATSA_INFO(tx_carrier_sense_errors),
836         GBE_STATSA_INFO(tx_bytes),
837         GBE_STATSA_INFO(tx_64byte_frames),
838         GBE_STATSA_INFO(tx_65_to_127byte_frames),
839         GBE_STATSA_INFO(tx_128_to_255byte_frames),
840         GBE_STATSA_INFO(tx_256_to_511byte_frames),
841         GBE_STATSA_INFO(tx_512_to_1023byte_frames),
842         GBE_STATSA_INFO(tx_1024byte_frames),
843         GBE_STATSA_INFO(net_bytes),
844         GBE_STATSA_INFO(rx_sof_overruns),
845         GBE_STATSA_INFO(rx_mof_overruns),
846         GBE_STATSA_INFO(rx_dma_overruns),
847         /* GBE module B */
848         GBE_STATSB_INFO(rx_good_frames),
849         GBE_STATSB_INFO(rx_broadcast_frames),
850         GBE_STATSB_INFO(rx_multicast_frames),
851         GBE_STATSB_INFO(rx_pause_frames),
852         GBE_STATSB_INFO(rx_crc_errors),
853         GBE_STATSB_INFO(rx_align_code_errors),
854         GBE_STATSB_INFO(rx_oversized_frames),
855         GBE_STATSB_INFO(rx_jabber_frames),
856         GBE_STATSB_INFO(rx_undersized_frames),
857         GBE_STATSB_INFO(rx_fragments),
858         GBE_STATSB_INFO(rx_bytes),
859         GBE_STATSB_INFO(tx_good_frames),
860         GBE_STATSB_INFO(tx_broadcast_frames),
861         GBE_STATSB_INFO(tx_multicast_frames),
862         GBE_STATSB_INFO(tx_pause_frames),
863         GBE_STATSB_INFO(tx_deferred_frames),
864         GBE_STATSB_INFO(tx_collision_frames),
865         GBE_STATSB_INFO(tx_single_coll_frames),
866         GBE_STATSB_INFO(tx_mult_coll_frames),
867         GBE_STATSB_INFO(tx_excessive_collisions),
868         GBE_STATSB_INFO(tx_late_collisions),
869         GBE_STATSB_INFO(tx_underrun),
870         GBE_STATSB_INFO(tx_carrier_sense_errors),
871         GBE_STATSB_INFO(tx_bytes),
872         GBE_STATSB_INFO(tx_64byte_frames),
873         GBE_STATSB_INFO(tx_65_to_127byte_frames),
874         GBE_STATSB_INFO(tx_128_to_255byte_frames),
875         GBE_STATSB_INFO(tx_256_to_511byte_frames),
876         GBE_STATSB_INFO(tx_512_to_1023byte_frames),
877         GBE_STATSB_INFO(tx_1024byte_frames),
878         GBE_STATSB_INFO(net_bytes),
879         GBE_STATSB_INFO(rx_sof_overruns),
880         GBE_STATSB_INFO(rx_mof_overruns),
881         GBE_STATSB_INFO(rx_dma_overruns),
882         /* GBE module C */
883         GBE_STATSC_INFO(rx_good_frames),
884         GBE_STATSC_INFO(rx_broadcast_frames),
885         GBE_STATSC_INFO(rx_multicast_frames),
886         GBE_STATSC_INFO(rx_pause_frames),
887         GBE_STATSC_INFO(rx_crc_errors),
888         GBE_STATSC_INFO(rx_align_code_errors),
889         GBE_STATSC_INFO(rx_oversized_frames),
890         GBE_STATSC_INFO(rx_jabber_frames),
891         GBE_STATSC_INFO(rx_undersized_frames),
892         GBE_STATSC_INFO(rx_fragments),
893         GBE_STATSC_INFO(rx_bytes),
894         GBE_STATSC_INFO(tx_good_frames),
895         GBE_STATSC_INFO(tx_broadcast_frames),
896         GBE_STATSC_INFO(tx_multicast_frames),
897         GBE_STATSC_INFO(tx_pause_frames),
898         GBE_STATSC_INFO(tx_deferred_frames),
899         GBE_STATSC_INFO(tx_collision_frames),
900         GBE_STATSC_INFO(tx_single_coll_frames),
901         GBE_STATSC_INFO(tx_mult_coll_frames),
902         GBE_STATSC_INFO(tx_excessive_collisions),
903         GBE_STATSC_INFO(tx_late_collisions),
904         GBE_STATSC_INFO(tx_underrun),
905         GBE_STATSC_INFO(tx_carrier_sense_errors),
906         GBE_STATSC_INFO(tx_bytes),
907         GBE_STATSC_INFO(tx_64byte_frames),
908         GBE_STATSC_INFO(tx_65_to_127byte_frames),
909         GBE_STATSC_INFO(tx_128_to_255byte_frames),
910         GBE_STATSC_INFO(tx_256_to_511byte_frames),
911         GBE_STATSC_INFO(tx_512_to_1023byte_frames),
912         GBE_STATSC_INFO(tx_1024byte_frames),
913         GBE_STATSC_INFO(net_bytes),
914         GBE_STATSC_INFO(rx_sof_overruns),
915         GBE_STATSC_INFO(rx_mof_overruns),
916         GBE_STATSC_INFO(rx_dma_overruns),
917         /* GBE module D */
918         GBE_STATSD_INFO(rx_good_frames),
919         GBE_STATSD_INFO(rx_broadcast_frames),
920         GBE_STATSD_INFO(rx_multicast_frames),
921         GBE_STATSD_INFO(rx_pause_frames),
922         GBE_STATSD_INFO(rx_crc_errors),
923         GBE_STATSD_INFO(rx_align_code_errors),
924         GBE_STATSD_INFO(rx_oversized_frames),
925         GBE_STATSD_INFO(rx_jabber_frames),
926         GBE_STATSD_INFO(rx_undersized_frames),
927         GBE_STATSD_INFO(rx_fragments),
928         GBE_STATSD_INFO(rx_bytes),
929         GBE_STATSD_INFO(tx_good_frames),
930         GBE_STATSD_INFO(tx_broadcast_frames),
931         GBE_STATSD_INFO(tx_multicast_frames),
932         GBE_STATSD_INFO(tx_pause_frames),
933         GBE_STATSD_INFO(tx_deferred_frames),
934         GBE_STATSD_INFO(tx_collision_frames),
935         GBE_STATSD_INFO(tx_single_coll_frames),
936         GBE_STATSD_INFO(tx_mult_coll_frames),
937         GBE_STATSD_INFO(tx_excessive_collisions),
938         GBE_STATSD_INFO(tx_late_collisions),
939         GBE_STATSD_INFO(tx_underrun),
940         GBE_STATSD_INFO(tx_carrier_sense_errors),
941         GBE_STATSD_INFO(tx_bytes),
942         GBE_STATSD_INFO(tx_64byte_frames),
943         GBE_STATSD_INFO(tx_65_to_127byte_frames),
944         GBE_STATSD_INFO(tx_128_to_255byte_frames),
945         GBE_STATSD_INFO(tx_256_to_511byte_frames),
946         GBE_STATSD_INFO(tx_512_to_1023byte_frames),
947         GBE_STATSD_INFO(tx_1024byte_frames),
948         GBE_STATSD_INFO(net_bytes),
949         GBE_STATSD_INFO(rx_sof_overruns),
950         GBE_STATSD_INFO(rx_mof_overruns),
951         GBE_STATSD_INFO(rx_dma_overruns),
952 };
953
954 /* This is the size of entries in GBENU_STATS_HOST */
955 #define GBENU_ET_STATS_HOST_SIZE        52
956
957 #define GBENU_STATS_HOST(field)                                 \
958 {                                                               \
959         "GBE_HOST:"#field, GBENU_STATS0_MODULE,                 \
960         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
961         offsetof(struct gbenu_hw_stats, field)                  \
962 }
963
964 /* This is the size of entries in GBENU_STATS_PORT */
965 #define GBENU_ET_STATS_PORT_SIZE        65
966
967 #define GBENU_STATS_P1(field)                                   \
968 {                                                               \
969         "GBE_P1:"#field, GBENU_STATS1_MODULE,                   \
970         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
971         offsetof(struct gbenu_hw_stats, field)                  \
972 }
973
974 #define GBENU_STATS_P2(field)                                   \
975 {                                                               \
976         "GBE_P2:"#field, GBENU_STATS2_MODULE,                   \
977         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
978         offsetof(struct gbenu_hw_stats, field)                  \
979 }
980
981 #define GBENU_STATS_P3(field)                                   \
982 {                                                               \
983         "GBE_P3:"#field, GBENU_STATS3_MODULE,                   \
984         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
985         offsetof(struct gbenu_hw_stats, field)                  \
986 }
987
988 #define GBENU_STATS_P4(field)                                   \
989 {                                                               \
990         "GBE_P4:"#field, GBENU_STATS4_MODULE,                   \
991         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
992         offsetof(struct gbenu_hw_stats, field)                  \
993 }
994
995 #define GBENU_STATS_P5(field)                                   \
996 {                                                               \
997         "GBE_P5:"#field, GBENU_STATS5_MODULE,                   \
998         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
999         offsetof(struct gbenu_hw_stats, field)                  \
1000 }
1001
1002 #define GBENU_STATS_P6(field)                                   \
1003 {                                                               \
1004         "GBE_P6:"#field, GBENU_STATS6_MODULE,                   \
1005         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1006         offsetof(struct gbenu_hw_stats, field)                  \
1007 }
1008
1009 #define GBENU_STATS_P7(field)                                   \
1010 {                                                               \
1011         "GBE_P7:"#field, GBENU_STATS7_MODULE,                   \
1012         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1013         offsetof(struct gbenu_hw_stats, field)                  \
1014 }
1015
1016 #define GBENU_STATS_P8(field)                                   \
1017 {                                                               \
1018         "GBE_P8:"#field, GBENU_STATS8_MODULE,                   \
1019         FIELD_SIZEOF(struct gbenu_hw_stats, field),             \
1020         offsetof(struct gbenu_hw_stats, field)                  \
1021 }
1022
1023 static const struct netcp_ethtool_stat gbenu_et_stats[] = {
1024         /* GBENU Host Module */
1025         GBENU_STATS_HOST(rx_good_frames),
1026         GBENU_STATS_HOST(rx_broadcast_frames),
1027         GBENU_STATS_HOST(rx_multicast_frames),
1028         GBENU_STATS_HOST(rx_crc_errors),
1029         GBENU_STATS_HOST(rx_oversized_frames),
1030         GBENU_STATS_HOST(rx_undersized_frames),
1031         GBENU_STATS_HOST(ale_drop),
1032         GBENU_STATS_HOST(ale_overrun_drop),
1033         GBENU_STATS_HOST(rx_bytes),
1034         GBENU_STATS_HOST(tx_good_frames),
1035         GBENU_STATS_HOST(tx_broadcast_frames),
1036         GBENU_STATS_HOST(tx_multicast_frames),
1037         GBENU_STATS_HOST(tx_bytes),
1038         GBENU_STATS_HOST(tx_64B_frames),
1039         GBENU_STATS_HOST(tx_65_to_127B_frames),
1040         GBENU_STATS_HOST(tx_128_to_255B_frames),
1041         GBENU_STATS_HOST(tx_256_to_511B_frames),
1042         GBENU_STATS_HOST(tx_512_to_1023B_frames),
1043         GBENU_STATS_HOST(tx_1024B_frames),
1044         GBENU_STATS_HOST(net_bytes),
1045         GBENU_STATS_HOST(rx_bottom_fifo_drop),
1046         GBENU_STATS_HOST(rx_port_mask_drop),
1047         GBENU_STATS_HOST(rx_top_fifo_drop),
1048         GBENU_STATS_HOST(ale_rate_limit_drop),
1049         GBENU_STATS_HOST(ale_vid_ingress_drop),
1050         GBENU_STATS_HOST(ale_da_eq_sa_drop),
1051         GBENU_STATS_HOST(ale_unknown_ucast),
1052         GBENU_STATS_HOST(ale_unknown_ucast_bytes),
1053         GBENU_STATS_HOST(ale_unknown_mcast),
1054         GBENU_STATS_HOST(ale_unknown_mcast_bytes),
1055         GBENU_STATS_HOST(ale_unknown_bcast),
1056         GBENU_STATS_HOST(ale_unknown_bcast_bytes),
1057         GBENU_STATS_HOST(ale_pol_match),
1058         GBENU_STATS_HOST(ale_pol_match_red),
1059         GBENU_STATS_HOST(ale_pol_match_yellow),
1060         GBENU_STATS_HOST(tx_mem_protect_err),
1061         GBENU_STATS_HOST(tx_pri0_drop),
1062         GBENU_STATS_HOST(tx_pri1_drop),
1063         GBENU_STATS_HOST(tx_pri2_drop),
1064         GBENU_STATS_HOST(tx_pri3_drop),
1065         GBENU_STATS_HOST(tx_pri4_drop),
1066         GBENU_STATS_HOST(tx_pri5_drop),
1067         GBENU_STATS_HOST(tx_pri6_drop),
1068         GBENU_STATS_HOST(tx_pri7_drop),
1069         GBENU_STATS_HOST(tx_pri0_drop_bcnt),
1070         GBENU_STATS_HOST(tx_pri1_drop_bcnt),
1071         GBENU_STATS_HOST(tx_pri2_drop_bcnt),
1072         GBENU_STATS_HOST(tx_pri3_drop_bcnt),
1073         GBENU_STATS_HOST(tx_pri4_drop_bcnt),
1074         GBENU_STATS_HOST(tx_pri5_drop_bcnt),
1075         GBENU_STATS_HOST(tx_pri6_drop_bcnt),
1076         GBENU_STATS_HOST(tx_pri7_drop_bcnt),
1077         /* GBENU Module 1 */
1078         GBENU_STATS_P1(rx_good_frames),
1079         GBENU_STATS_P1(rx_broadcast_frames),
1080         GBENU_STATS_P1(rx_multicast_frames),
1081         GBENU_STATS_P1(rx_pause_frames),
1082         GBENU_STATS_P1(rx_crc_errors),
1083         GBENU_STATS_P1(rx_align_code_errors),
1084         GBENU_STATS_P1(rx_oversized_frames),
1085         GBENU_STATS_P1(rx_jabber_frames),
1086         GBENU_STATS_P1(rx_undersized_frames),
1087         GBENU_STATS_P1(rx_fragments),
1088         GBENU_STATS_P1(ale_drop),
1089         GBENU_STATS_P1(ale_overrun_drop),
1090         GBENU_STATS_P1(rx_bytes),
1091         GBENU_STATS_P1(tx_good_frames),
1092         GBENU_STATS_P1(tx_broadcast_frames),
1093         GBENU_STATS_P1(tx_multicast_frames),
1094         GBENU_STATS_P1(tx_pause_frames),
1095         GBENU_STATS_P1(tx_deferred_frames),
1096         GBENU_STATS_P1(tx_collision_frames),
1097         GBENU_STATS_P1(tx_single_coll_frames),
1098         GBENU_STATS_P1(tx_mult_coll_frames),
1099         GBENU_STATS_P1(tx_excessive_collisions),
1100         GBENU_STATS_P1(tx_late_collisions),
1101         GBENU_STATS_P1(rx_ipg_error),
1102         GBENU_STATS_P1(tx_carrier_sense_errors),
1103         GBENU_STATS_P1(tx_bytes),
1104         GBENU_STATS_P1(tx_64B_frames),
1105         GBENU_STATS_P1(tx_65_to_127B_frames),
1106         GBENU_STATS_P1(tx_128_to_255B_frames),
1107         GBENU_STATS_P1(tx_256_to_511B_frames),
1108         GBENU_STATS_P1(tx_512_to_1023B_frames),
1109         GBENU_STATS_P1(tx_1024B_frames),
1110         GBENU_STATS_P1(net_bytes),
1111         GBENU_STATS_P1(rx_bottom_fifo_drop),
1112         GBENU_STATS_P1(rx_port_mask_drop),
1113         GBENU_STATS_P1(rx_top_fifo_drop),
1114         GBENU_STATS_P1(ale_rate_limit_drop),
1115         GBENU_STATS_P1(ale_vid_ingress_drop),
1116         GBENU_STATS_P1(ale_da_eq_sa_drop),
1117         GBENU_STATS_P1(ale_unknown_ucast),
1118         GBENU_STATS_P1(ale_unknown_ucast_bytes),
1119         GBENU_STATS_P1(ale_unknown_mcast),
1120         GBENU_STATS_P1(ale_unknown_mcast_bytes),
1121         GBENU_STATS_P1(ale_unknown_bcast),
1122         GBENU_STATS_P1(ale_unknown_bcast_bytes),
1123         GBENU_STATS_P1(ale_pol_match),
1124         GBENU_STATS_P1(ale_pol_match_red),
1125         GBENU_STATS_P1(ale_pol_match_yellow),
1126         GBENU_STATS_P1(tx_mem_protect_err),
1127         GBENU_STATS_P1(tx_pri0_drop),
1128         GBENU_STATS_P1(tx_pri1_drop),
1129         GBENU_STATS_P1(tx_pri2_drop),
1130         GBENU_STATS_P1(tx_pri3_drop),
1131         GBENU_STATS_P1(tx_pri4_drop),
1132         GBENU_STATS_P1(tx_pri5_drop),
1133         GBENU_STATS_P1(tx_pri6_drop),
1134         GBENU_STATS_P1(tx_pri7_drop),
1135         GBENU_STATS_P1(tx_pri0_drop_bcnt),
1136         GBENU_STATS_P1(tx_pri1_drop_bcnt),
1137         GBENU_STATS_P1(tx_pri2_drop_bcnt),
1138         GBENU_STATS_P1(tx_pri3_drop_bcnt),
1139         GBENU_STATS_P1(tx_pri4_drop_bcnt),
1140         GBENU_STATS_P1(tx_pri5_drop_bcnt),
1141         GBENU_STATS_P1(tx_pri6_drop_bcnt),
1142         GBENU_STATS_P1(tx_pri7_drop_bcnt),
1143         /* GBENU Module 2 */
1144         GBENU_STATS_P2(rx_good_frames),
1145         GBENU_STATS_P2(rx_broadcast_frames),
1146         GBENU_STATS_P2(rx_multicast_frames),
1147         GBENU_STATS_P2(rx_pause_frames),
1148         GBENU_STATS_P2(rx_crc_errors),
1149         GBENU_STATS_P2(rx_align_code_errors),
1150         GBENU_STATS_P2(rx_oversized_frames),
1151         GBENU_STATS_P2(rx_jabber_frames),
1152         GBENU_STATS_P2(rx_undersized_frames),
1153         GBENU_STATS_P2(rx_fragments),
1154         GBENU_STATS_P2(ale_drop),
1155         GBENU_STATS_P2(ale_overrun_drop),
1156         GBENU_STATS_P2(rx_bytes),
1157         GBENU_STATS_P2(tx_good_frames),
1158         GBENU_STATS_P2(tx_broadcast_frames),
1159         GBENU_STATS_P2(tx_multicast_frames),
1160         GBENU_STATS_P2(tx_pause_frames),
1161         GBENU_STATS_P2(tx_deferred_frames),
1162         GBENU_STATS_P2(tx_collision_frames),
1163         GBENU_STATS_P2(tx_single_coll_frames),
1164         GBENU_STATS_P2(tx_mult_coll_frames),
1165         GBENU_STATS_P2(tx_excessive_collisions),
1166         GBENU_STATS_P2(tx_late_collisions),
1167         GBENU_STATS_P2(rx_ipg_error),
1168         GBENU_STATS_P2(tx_carrier_sense_errors),
1169         GBENU_STATS_P2(tx_bytes),
1170         GBENU_STATS_P2(tx_64B_frames),
1171         GBENU_STATS_P2(tx_65_to_127B_frames),
1172         GBENU_STATS_P2(tx_128_to_255B_frames),
1173         GBENU_STATS_P2(tx_256_to_511B_frames),
1174         GBENU_STATS_P2(tx_512_to_1023B_frames),
1175         GBENU_STATS_P2(tx_1024B_frames),
1176         GBENU_STATS_P2(net_bytes),
1177         GBENU_STATS_P2(rx_bottom_fifo_drop),
1178         GBENU_STATS_P2(rx_port_mask_drop),
1179         GBENU_STATS_P2(rx_top_fifo_drop),
1180         GBENU_STATS_P2(ale_rate_limit_drop),
1181         GBENU_STATS_P2(ale_vid_ingress_drop),
1182         GBENU_STATS_P2(ale_da_eq_sa_drop),
1183         GBENU_STATS_P2(ale_unknown_ucast),
1184         GBENU_STATS_P2(ale_unknown_ucast_bytes),
1185         GBENU_STATS_P2(ale_unknown_mcast),
1186         GBENU_STATS_P2(ale_unknown_mcast_bytes),
1187         GBENU_STATS_P2(ale_unknown_bcast),
1188         GBENU_STATS_P2(ale_unknown_bcast_bytes),
1189         GBENU_STATS_P2(ale_pol_match),
1190         GBENU_STATS_P2(ale_pol_match_red),
1191         GBENU_STATS_P2(ale_pol_match_yellow),
1192         GBENU_STATS_P2(tx_mem_protect_err),
1193         GBENU_STATS_P2(tx_pri0_drop),
1194         GBENU_STATS_P2(tx_pri1_drop),
1195         GBENU_STATS_P2(tx_pri2_drop),
1196         GBENU_STATS_P2(tx_pri3_drop),
1197         GBENU_STATS_P2(tx_pri4_drop),
1198         GBENU_STATS_P2(tx_pri5_drop),
1199         GBENU_STATS_P2(tx_pri6_drop),
1200         GBENU_STATS_P2(tx_pri7_drop),
1201         GBENU_STATS_P2(tx_pri0_drop_bcnt),
1202         GBENU_STATS_P2(tx_pri1_drop_bcnt),
1203         GBENU_STATS_P2(tx_pri2_drop_bcnt),
1204         GBENU_STATS_P2(tx_pri3_drop_bcnt),
1205         GBENU_STATS_P2(tx_pri4_drop_bcnt),
1206         GBENU_STATS_P2(tx_pri5_drop_bcnt),
1207         GBENU_STATS_P2(tx_pri6_drop_bcnt),
1208         GBENU_STATS_P2(tx_pri7_drop_bcnt),
1209         /* GBENU Module 3 */
1210         GBENU_STATS_P3(rx_good_frames),
1211         GBENU_STATS_P3(rx_broadcast_frames),
1212         GBENU_STATS_P3(rx_multicast_frames),
1213         GBENU_STATS_P3(rx_pause_frames),
1214         GBENU_STATS_P3(rx_crc_errors),
1215         GBENU_STATS_P3(rx_align_code_errors),
1216         GBENU_STATS_P3(rx_oversized_frames),
1217         GBENU_STATS_P3(rx_jabber_frames),
1218         GBENU_STATS_P3(rx_undersized_frames),
1219         GBENU_STATS_P3(rx_fragments),
1220         GBENU_STATS_P3(ale_drop),
1221         GBENU_STATS_P3(ale_overrun_drop),
1222         GBENU_STATS_P3(rx_bytes),
1223         GBENU_STATS_P3(tx_good_frames),
1224         GBENU_STATS_P3(tx_broadcast_frames),
1225         GBENU_STATS_P3(tx_multicast_frames),
1226         GBENU_STATS_P3(tx_pause_frames),
1227         GBENU_STATS_P3(tx_deferred_frames),
1228         GBENU_STATS_P3(tx_collision_frames),
1229         GBENU_STATS_P3(tx_single_coll_frames),
1230         GBENU_STATS_P3(tx_mult_coll_frames),
1231         GBENU_STATS_P3(tx_excessive_collisions),
1232         GBENU_STATS_P3(tx_late_collisions),
1233         GBENU_STATS_P3(rx_ipg_error),
1234         GBENU_STATS_P3(tx_carrier_sense_errors),
1235         GBENU_STATS_P3(tx_bytes),
1236         GBENU_STATS_P3(tx_64B_frames),
1237         GBENU_STATS_P3(tx_65_to_127B_frames),
1238         GBENU_STATS_P3(tx_128_to_255B_frames),
1239         GBENU_STATS_P3(tx_256_to_511B_frames),
1240         GBENU_STATS_P3(tx_512_to_1023B_frames),
1241         GBENU_STATS_P3(tx_1024B_frames),
1242         GBENU_STATS_P3(net_bytes),
1243         GBENU_STATS_P3(rx_bottom_fifo_drop),
1244         GBENU_STATS_P3(rx_port_mask_drop),
1245         GBENU_STATS_P3(rx_top_fifo_drop),
1246         GBENU_STATS_P3(ale_rate_limit_drop),
1247         GBENU_STATS_P3(ale_vid_ingress_drop),
1248         GBENU_STATS_P3(ale_da_eq_sa_drop),
1249         GBENU_STATS_P3(ale_unknown_ucast),
1250         GBENU_STATS_P3(ale_unknown_ucast_bytes),
1251         GBENU_STATS_P3(ale_unknown_mcast),
1252         GBENU_STATS_P3(ale_unknown_mcast_bytes),
1253         GBENU_STATS_P3(ale_unknown_bcast),
1254         GBENU_STATS_P3(ale_unknown_bcast_bytes),
1255         GBENU_STATS_P3(ale_pol_match),
1256         GBENU_STATS_P3(ale_pol_match_red),
1257         GBENU_STATS_P3(ale_pol_match_yellow),
1258         GBENU_STATS_P3(tx_mem_protect_err),
1259         GBENU_STATS_P3(tx_pri0_drop),
1260         GBENU_STATS_P3(tx_pri1_drop),
1261         GBENU_STATS_P3(tx_pri2_drop),
1262         GBENU_STATS_P3(tx_pri3_drop),
1263         GBENU_STATS_P3(tx_pri4_drop),
1264         GBENU_STATS_P3(tx_pri5_drop),
1265         GBENU_STATS_P3(tx_pri6_drop),
1266         GBENU_STATS_P3(tx_pri7_drop),
1267         GBENU_STATS_P3(tx_pri0_drop_bcnt),
1268         GBENU_STATS_P3(tx_pri1_drop_bcnt),
1269         GBENU_STATS_P3(tx_pri2_drop_bcnt),
1270         GBENU_STATS_P3(tx_pri3_drop_bcnt),
1271         GBENU_STATS_P3(tx_pri4_drop_bcnt),
1272         GBENU_STATS_P3(tx_pri5_drop_bcnt),
1273         GBENU_STATS_P3(tx_pri6_drop_bcnt),
1274         GBENU_STATS_P3(tx_pri7_drop_bcnt),
1275         /* GBENU Module 4 */
1276         GBENU_STATS_P4(rx_good_frames),
1277         GBENU_STATS_P4(rx_broadcast_frames),
1278         GBENU_STATS_P4(rx_multicast_frames),
1279         GBENU_STATS_P4(rx_pause_frames),
1280         GBENU_STATS_P4(rx_crc_errors),
1281         GBENU_STATS_P4(rx_align_code_errors),
1282         GBENU_STATS_P4(rx_oversized_frames),
1283         GBENU_STATS_P4(rx_jabber_frames),
1284         GBENU_STATS_P4(rx_undersized_frames),
1285         GBENU_STATS_P4(rx_fragments),
1286         GBENU_STATS_P4(ale_drop),
1287         GBENU_STATS_P4(ale_overrun_drop),
1288         GBENU_STATS_P4(rx_bytes),
1289         GBENU_STATS_P4(tx_good_frames),
1290         GBENU_STATS_P4(tx_broadcast_frames),
1291         GBENU_STATS_P4(tx_multicast_frames),
1292         GBENU_STATS_P4(tx_pause_frames),
1293         GBENU_STATS_P4(tx_deferred_frames),
1294         GBENU_STATS_P4(tx_collision_frames),
1295         GBENU_STATS_P4(tx_single_coll_frames),
1296         GBENU_STATS_P4(tx_mult_coll_frames),
1297         GBENU_STATS_P4(tx_excessive_collisions),
1298         GBENU_STATS_P4(tx_late_collisions),
1299         GBENU_STATS_P4(rx_ipg_error),
1300         GBENU_STATS_P4(tx_carrier_sense_errors),
1301         GBENU_STATS_P4(tx_bytes),
1302         GBENU_STATS_P4(tx_64B_frames),
1303         GBENU_STATS_P4(tx_65_to_127B_frames),
1304         GBENU_STATS_P4(tx_128_to_255B_frames),
1305         GBENU_STATS_P4(tx_256_to_511B_frames),
1306         GBENU_STATS_P4(tx_512_to_1023B_frames),
1307         GBENU_STATS_P4(tx_1024B_frames),
1308         GBENU_STATS_P4(net_bytes),
1309         GBENU_STATS_P4(rx_bottom_fifo_drop),
1310         GBENU_STATS_P4(rx_port_mask_drop),
1311         GBENU_STATS_P4(rx_top_fifo_drop),
1312         GBENU_STATS_P4(ale_rate_limit_drop),
1313         GBENU_STATS_P4(ale_vid_ingress_drop),
1314         GBENU_STATS_P4(ale_da_eq_sa_drop),
1315         GBENU_STATS_P4(ale_unknown_ucast),
1316         GBENU_STATS_P4(ale_unknown_ucast_bytes),
1317         GBENU_STATS_P4(ale_unknown_mcast),
1318         GBENU_STATS_P4(ale_unknown_mcast_bytes),
1319         GBENU_STATS_P4(ale_unknown_bcast),
1320         GBENU_STATS_P4(ale_unknown_bcast_bytes),
1321         GBENU_STATS_P4(ale_pol_match),
1322         GBENU_STATS_P4(ale_pol_match_red),
1323         GBENU_STATS_P4(ale_pol_match_yellow),
1324         GBENU_STATS_P4(tx_mem_protect_err),
1325         GBENU_STATS_P4(tx_pri0_drop),
1326         GBENU_STATS_P4(tx_pri1_drop),
1327         GBENU_STATS_P4(tx_pri2_drop),
1328         GBENU_STATS_P4(tx_pri3_drop),
1329         GBENU_STATS_P4(tx_pri4_drop),
1330         GBENU_STATS_P4(tx_pri5_drop),
1331         GBENU_STATS_P4(tx_pri6_drop),
1332         GBENU_STATS_P4(tx_pri7_drop),
1333         GBENU_STATS_P4(tx_pri0_drop_bcnt),
1334         GBENU_STATS_P4(tx_pri1_drop_bcnt),
1335         GBENU_STATS_P4(tx_pri2_drop_bcnt),
1336         GBENU_STATS_P4(tx_pri3_drop_bcnt),
1337         GBENU_STATS_P4(tx_pri4_drop_bcnt),
1338         GBENU_STATS_P4(tx_pri5_drop_bcnt),
1339         GBENU_STATS_P4(tx_pri6_drop_bcnt),
1340         GBENU_STATS_P4(tx_pri7_drop_bcnt),
1341         /* GBENU Module 5 */
1342         GBENU_STATS_P5(rx_good_frames),
1343         GBENU_STATS_P5(rx_broadcast_frames),
1344         GBENU_STATS_P5(rx_multicast_frames),
1345         GBENU_STATS_P5(rx_pause_frames),
1346         GBENU_STATS_P5(rx_crc_errors),
1347         GBENU_STATS_P5(rx_align_code_errors),
1348         GBENU_STATS_P5(rx_oversized_frames),
1349         GBENU_STATS_P5(rx_jabber_frames),
1350         GBENU_STATS_P5(rx_undersized_frames),
1351         GBENU_STATS_P5(rx_fragments),
1352         GBENU_STATS_P5(ale_drop),
1353         GBENU_STATS_P5(ale_overrun_drop),
1354         GBENU_STATS_P5(rx_bytes),
1355         GBENU_STATS_P5(tx_good_frames),
1356         GBENU_STATS_P5(tx_broadcast_frames),
1357         GBENU_STATS_P5(tx_multicast_frames),
1358         GBENU_STATS_P5(tx_pause_frames),
1359         GBENU_STATS_P5(tx_deferred_frames),
1360         GBENU_STATS_P5(tx_collision_frames),
1361         GBENU_STATS_P5(tx_single_coll_frames),
1362         GBENU_STATS_P5(tx_mult_coll_frames),
1363         GBENU_STATS_P5(tx_excessive_collisions),
1364         GBENU_STATS_P5(tx_late_collisions),
1365         GBENU_STATS_P5(rx_ipg_error),
1366         GBENU_STATS_P5(tx_carrier_sense_errors),
1367         GBENU_STATS_P5(tx_bytes),
1368         GBENU_STATS_P5(tx_64B_frames),
1369         GBENU_STATS_P5(tx_65_to_127B_frames),
1370         GBENU_STATS_P5(tx_128_to_255B_frames),
1371         GBENU_STATS_P5(tx_256_to_511B_frames),
1372         GBENU_STATS_P5(tx_512_to_1023B_frames),
1373         GBENU_STATS_P5(tx_1024B_frames),
1374         GBENU_STATS_P5(net_bytes),
1375         GBENU_STATS_P5(rx_bottom_fifo_drop),
1376         GBENU_STATS_P5(rx_port_mask_drop),
1377         GBENU_STATS_P5(rx_top_fifo_drop),
1378         GBENU_STATS_P5(ale_rate_limit_drop),
1379         GBENU_STATS_P5(ale_vid_ingress_drop),
1380         GBENU_STATS_P5(ale_da_eq_sa_drop),
1381         GBENU_STATS_P5(ale_unknown_ucast),
1382         GBENU_STATS_P5(ale_unknown_ucast_bytes),
1383         GBENU_STATS_P5(ale_unknown_mcast),
1384         GBENU_STATS_P5(ale_unknown_mcast_bytes),
1385         GBENU_STATS_P5(ale_unknown_bcast),
1386         GBENU_STATS_P5(ale_unknown_bcast_bytes),
1387         GBENU_STATS_P5(ale_pol_match),
1388         GBENU_STATS_P5(ale_pol_match_red),
1389         GBENU_STATS_P5(ale_pol_match_yellow),
1390         GBENU_STATS_P5(tx_mem_protect_err),
1391         GBENU_STATS_P5(tx_pri0_drop),
1392         GBENU_STATS_P5(tx_pri1_drop),
1393         GBENU_STATS_P5(tx_pri2_drop),
1394         GBENU_STATS_P5(tx_pri3_drop),
1395         GBENU_STATS_P5(tx_pri4_drop),
1396         GBENU_STATS_P5(tx_pri5_drop),
1397         GBENU_STATS_P5(tx_pri6_drop),
1398         GBENU_STATS_P5(tx_pri7_drop),
1399         GBENU_STATS_P5(tx_pri0_drop_bcnt),
1400         GBENU_STATS_P5(tx_pri1_drop_bcnt),
1401         GBENU_STATS_P5(tx_pri2_drop_bcnt),
1402         GBENU_STATS_P5(tx_pri3_drop_bcnt),
1403         GBENU_STATS_P5(tx_pri4_drop_bcnt),
1404         GBENU_STATS_P5(tx_pri5_drop_bcnt),
1405         GBENU_STATS_P5(tx_pri6_drop_bcnt),
1406         GBENU_STATS_P5(tx_pri7_drop_bcnt),
1407         /* GBENU Module 6 */
1408         GBENU_STATS_P6(rx_good_frames),
1409         GBENU_STATS_P6(rx_broadcast_frames),
1410         GBENU_STATS_P6(rx_multicast_frames),
1411         GBENU_STATS_P6(rx_pause_frames),
1412         GBENU_STATS_P6(rx_crc_errors),
1413         GBENU_STATS_P6(rx_align_code_errors),
1414         GBENU_STATS_P6(rx_oversized_frames),
1415         GBENU_STATS_P6(rx_jabber_frames),
1416         GBENU_STATS_P6(rx_undersized_frames),
1417         GBENU_STATS_P6(rx_fragments),
1418         GBENU_STATS_P6(ale_drop),
1419         GBENU_STATS_P6(ale_overrun_drop),
1420         GBENU_STATS_P6(rx_bytes),
1421         GBENU_STATS_P6(tx_good_frames),
1422         GBENU_STATS_P6(tx_broadcast_frames),
1423         GBENU_STATS_P6(tx_multicast_frames),
1424         GBENU_STATS_P6(tx_pause_frames),
1425         GBENU_STATS_P6(tx_deferred_frames),
1426         GBENU_STATS_P6(tx_collision_frames),
1427         GBENU_STATS_P6(tx_single_coll_frames),
1428         GBENU_STATS_P6(tx_mult_coll_frames),
1429         GBENU_STATS_P6(tx_excessive_collisions),
1430         GBENU_STATS_P6(tx_late_collisions),
1431         GBENU_STATS_P6(rx_ipg_error),
1432         GBENU_STATS_P6(tx_carrier_sense_errors),
1433         GBENU_STATS_P6(tx_bytes),
1434         GBENU_STATS_P6(tx_64B_frames),
1435         GBENU_STATS_P6(tx_65_to_127B_frames),
1436         GBENU_STATS_P6(tx_128_to_255B_frames),
1437         GBENU_STATS_P6(tx_256_to_511B_frames),
1438         GBENU_STATS_P6(tx_512_to_1023B_frames),
1439         GBENU_STATS_P6(tx_1024B_frames),
1440         GBENU_STATS_P6(net_bytes),
1441         GBENU_STATS_P6(rx_bottom_fifo_drop),
1442         GBENU_STATS_P6(rx_port_mask_drop),
1443         GBENU_STATS_P6(rx_top_fifo_drop),
1444         GBENU_STATS_P6(ale_rate_limit_drop),
1445         GBENU_STATS_P6(ale_vid_ingress_drop),
1446         GBENU_STATS_P6(ale_da_eq_sa_drop),
1447         GBENU_STATS_P6(ale_unknown_ucast),
1448         GBENU_STATS_P6(ale_unknown_ucast_bytes),
1449         GBENU_STATS_P6(ale_unknown_mcast),
1450         GBENU_STATS_P6(ale_unknown_mcast_bytes),
1451         GBENU_STATS_P6(ale_unknown_bcast),
1452         GBENU_STATS_P6(ale_unknown_bcast_bytes),
1453         GBENU_STATS_P6(ale_pol_match),
1454         GBENU_STATS_P6(ale_pol_match_red),
1455         GBENU_STATS_P6(ale_pol_match_yellow),
1456         GBENU_STATS_P6(tx_mem_protect_err),
1457         GBENU_STATS_P6(tx_pri0_drop),
1458         GBENU_STATS_P6(tx_pri1_drop),
1459         GBENU_STATS_P6(tx_pri2_drop),
1460         GBENU_STATS_P6(tx_pri3_drop),
1461         GBENU_STATS_P6(tx_pri4_drop),
1462         GBENU_STATS_P6(tx_pri5_drop),
1463         GBENU_STATS_P6(tx_pri6_drop),
1464         GBENU_STATS_P6(tx_pri7_drop),
1465         GBENU_STATS_P6(tx_pri0_drop_bcnt),
1466         GBENU_STATS_P6(tx_pri1_drop_bcnt),
1467         GBENU_STATS_P6(tx_pri2_drop_bcnt),
1468         GBENU_STATS_P6(tx_pri3_drop_bcnt),
1469         GBENU_STATS_P6(tx_pri4_drop_bcnt),
1470         GBENU_STATS_P6(tx_pri5_drop_bcnt),
1471         GBENU_STATS_P6(tx_pri6_drop_bcnt),
1472         GBENU_STATS_P6(tx_pri7_drop_bcnt),
1473         /* GBENU Module 7 */
1474         GBENU_STATS_P7(rx_good_frames),
1475         GBENU_STATS_P7(rx_broadcast_frames),
1476         GBENU_STATS_P7(rx_multicast_frames),
1477         GBENU_STATS_P7(rx_pause_frames),
1478         GBENU_STATS_P7(rx_crc_errors),
1479         GBENU_STATS_P7(rx_align_code_errors),
1480         GBENU_STATS_P7(rx_oversized_frames),
1481         GBENU_STATS_P7(rx_jabber_frames),
1482         GBENU_STATS_P7(rx_undersized_frames),
1483         GBENU_STATS_P7(rx_fragments),
1484         GBENU_STATS_P7(ale_drop),
1485         GBENU_STATS_P7(ale_overrun_drop),
1486         GBENU_STATS_P7(rx_bytes),
1487         GBENU_STATS_P7(tx_good_frames),
1488         GBENU_STATS_P7(tx_broadcast_frames),
1489         GBENU_STATS_P7(tx_multicast_frames),
1490         GBENU_STATS_P7(tx_pause_frames),
1491         GBENU_STATS_P7(tx_deferred_frames),
1492         GBENU_STATS_P7(tx_collision_frames),
1493         GBENU_STATS_P7(tx_single_coll_frames),
1494         GBENU_STATS_P7(tx_mult_coll_frames),
1495         GBENU_STATS_P7(tx_excessive_collisions),
1496         GBENU_STATS_P7(tx_late_collisions),
1497         GBENU_STATS_P7(rx_ipg_error),
1498         GBENU_STATS_P7(tx_carrier_sense_errors),
1499         GBENU_STATS_P7(tx_bytes),
1500         GBENU_STATS_P7(tx_64B_frames),
1501         GBENU_STATS_P7(tx_65_to_127B_frames),
1502         GBENU_STATS_P7(tx_128_to_255B_frames),
1503         GBENU_STATS_P7(tx_256_to_511B_frames),
1504         GBENU_STATS_P7(tx_512_to_1023B_frames),
1505         GBENU_STATS_P7(tx_1024B_frames),
1506         GBENU_STATS_P7(net_bytes),
1507         GBENU_STATS_P7(rx_bottom_fifo_drop),
1508         GBENU_STATS_P7(rx_port_mask_drop),
1509         GBENU_STATS_P7(rx_top_fifo_drop),
1510         GBENU_STATS_P7(ale_rate_limit_drop),
1511         GBENU_STATS_P7(ale_vid_ingress_drop),
1512         GBENU_STATS_P7(ale_da_eq_sa_drop),
1513         GBENU_STATS_P7(ale_unknown_ucast),
1514         GBENU_STATS_P7(ale_unknown_ucast_bytes),
1515         GBENU_STATS_P7(ale_unknown_mcast),
1516         GBENU_STATS_P7(ale_unknown_mcast_bytes),
1517         GBENU_STATS_P7(ale_unknown_bcast),
1518         GBENU_STATS_P7(ale_unknown_bcast_bytes),
1519         GBENU_STATS_P7(ale_pol_match),
1520         GBENU_STATS_P7(ale_pol_match_red),
1521         GBENU_STATS_P7(ale_pol_match_yellow),
1522         GBENU_STATS_P7(tx_mem_protect_err),
1523         GBENU_STATS_P7(tx_pri0_drop),
1524         GBENU_STATS_P7(tx_pri1_drop),
1525         GBENU_STATS_P7(tx_pri2_drop),
1526         GBENU_STATS_P7(tx_pri3_drop),
1527         GBENU_STATS_P7(tx_pri4_drop),
1528         GBENU_STATS_P7(tx_pri5_drop),
1529         GBENU_STATS_P7(tx_pri6_drop),
1530         GBENU_STATS_P7(tx_pri7_drop),
1531         GBENU_STATS_P7(tx_pri0_drop_bcnt),
1532         GBENU_STATS_P7(tx_pri1_drop_bcnt),
1533         GBENU_STATS_P7(tx_pri2_drop_bcnt),
1534         GBENU_STATS_P7(tx_pri3_drop_bcnt),
1535         GBENU_STATS_P7(tx_pri4_drop_bcnt),
1536         GBENU_STATS_P7(tx_pri5_drop_bcnt),
1537         GBENU_STATS_P7(tx_pri6_drop_bcnt),
1538         GBENU_STATS_P7(tx_pri7_drop_bcnt),
1539         /* GBENU Module 8 */
1540         GBENU_STATS_P8(rx_good_frames),
1541         GBENU_STATS_P8(rx_broadcast_frames),
1542         GBENU_STATS_P8(rx_multicast_frames),
1543         GBENU_STATS_P8(rx_pause_frames),
1544         GBENU_STATS_P8(rx_crc_errors),
1545         GBENU_STATS_P8(rx_align_code_errors),
1546         GBENU_STATS_P8(rx_oversized_frames),
1547         GBENU_STATS_P8(rx_jabber_frames),
1548         GBENU_STATS_P8(rx_undersized_frames),
1549         GBENU_STATS_P8(rx_fragments),
1550         GBENU_STATS_P8(ale_drop),
1551         GBENU_STATS_P8(ale_overrun_drop),
1552         GBENU_STATS_P8(rx_bytes),
1553         GBENU_STATS_P8(tx_good_frames),
1554         GBENU_STATS_P8(tx_broadcast_frames),
1555         GBENU_STATS_P8(tx_multicast_frames),
1556         GBENU_STATS_P8(tx_pause_frames),
1557         GBENU_STATS_P8(tx_deferred_frames),
1558         GBENU_STATS_P8(tx_collision_frames),
1559         GBENU_STATS_P8(tx_single_coll_frames),
1560         GBENU_STATS_P8(tx_mult_coll_frames),
1561         GBENU_STATS_P8(tx_excessive_collisions),
1562         GBENU_STATS_P8(tx_late_collisions),
1563         GBENU_STATS_P8(rx_ipg_error),
1564         GBENU_STATS_P8(tx_carrier_sense_errors),
1565         GBENU_STATS_P8(tx_bytes),
1566         GBENU_STATS_P8(tx_64B_frames),
1567         GBENU_STATS_P8(tx_65_to_127B_frames),
1568         GBENU_STATS_P8(tx_128_to_255B_frames),
1569         GBENU_STATS_P8(tx_256_to_511B_frames),
1570         GBENU_STATS_P8(tx_512_to_1023B_frames),
1571         GBENU_STATS_P8(tx_1024B_frames),
1572         GBENU_STATS_P8(net_bytes),
1573         GBENU_STATS_P8(rx_bottom_fifo_drop),
1574         GBENU_STATS_P8(rx_port_mask_drop),
1575         GBENU_STATS_P8(rx_top_fifo_drop),
1576         GBENU_STATS_P8(ale_rate_limit_drop),
1577         GBENU_STATS_P8(ale_vid_ingress_drop),
1578         GBENU_STATS_P8(ale_da_eq_sa_drop),
1579         GBENU_STATS_P8(ale_unknown_ucast),
1580         GBENU_STATS_P8(ale_unknown_ucast_bytes),
1581         GBENU_STATS_P8(ale_unknown_mcast),
1582         GBENU_STATS_P8(ale_unknown_mcast_bytes),
1583         GBENU_STATS_P8(ale_unknown_bcast),
1584         GBENU_STATS_P8(ale_unknown_bcast_bytes),
1585         GBENU_STATS_P8(ale_pol_match),
1586         GBENU_STATS_P8(ale_pol_match_red),
1587         GBENU_STATS_P8(ale_pol_match_yellow),
1588         GBENU_STATS_P8(tx_mem_protect_err),
1589         GBENU_STATS_P8(tx_pri0_drop),
1590         GBENU_STATS_P8(tx_pri1_drop),
1591         GBENU_STATS_P8(tx_pri2_drop),
1592         GBENU_STATS_P8(tx_pri3_drop),
1593         GBENU_STATS_P8(tx_pri4_drop),
1594         GBENU_STATS_P8(tx_pri5_drop),
1595         GBENU_STATS_P8(tx_pri6_drop),
1596         GBENU_STATS_P8(tx_pri7_drop),
1597         GBENU_STATS_P8(tx_pri0_drop_bcnt),
1598         GBENU_STATS_P8(tx_pri1_drop_bcnt),
1599         GBENU_STATS_P8(tx_pri2_drop_bcnt),
1600         GBENU_STATS_P8(tx_pri3_drop_bcnt),
1601         GBENU_STATS_P8(tx_pri4_drop_bcnt),
1602         GBENU_STATS_P8(tx_pri5_drop_bcnt),
1603         GBENU_STATS_P8(tx_pri6_drop_bcnt),
1604         GBENU_STATS_P8(tx_pri7_drop_bcnt),
1605 };
1606
1607 #define XGBE_STATS0_INFO(field)                         \
1608 {                                                       \
1609         "GBE_0:"#field, XGBE_STATS0_MODULE,             \
1610         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1611         offsetof(struct xgbe_hw_stats, field)           \
1612 }
1613
1614 #define XGBE_STATS1_INFO(field)                         \
1615 {                                                       \
1616         "GBE_1:"#field, XGBE_STATS1_MODULE,             \
1617         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1618         offsetof(struct xgbe_hw_stats, field)           \
1619 }
1620
1621 #define XGBE_STATS2_INFO(field)                         \
1622 {                                                       \
1623         "GBE_2:"#field, XGBE_STATS2_MODULE,             \
1624         FIELD_SIZEOF(struct xgbe_hw_stats, field),      \
1625         offsetof(struct xgbe_hw_stats, field)           \
1626 }
1627
1628 static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1629         /* GBE module 0 */
1630         XGBE_STATS0_INFO(rx_good_frames),
1631         XGBE_STATS0_INFO(rx_broadcast_frames),
1632         XGBE_STATS0_INFO(rx_multicast_frames),
1633         XGBE_STATS0_INFO(rx_oversized_frames),
1634         XGBE_STATS0_INFO(rx_undersized_frames),
1635         XGBE_STATS0_INFO(overrun_type4),
1636         XGBE_STATS0_INFO(overrun_type5),
1637         XGBE_STATS0_INFO(rx_bytes),
1638         XGBE_STATS0_INFO(tx_good_frames),
1639         XGBE_STATS0_INFO(tx_broadcast_frames),
1640         XGBE_STATS0_INFO(tx_multicast_frames),
1641         XGBE_STATS0_INFO(tx_bytes),
1642         XGBE_STATS0_INFO(tx_64byte_frames),
1643         XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1644         XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1645         XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1646         XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1647         XGBE_STATS0_INFO(tx_1024byte_frames),
1648         XGBE_STATS0_INFO(net_bytes),
1649         XGBE_STATS0_INFO(rx_sof_overruns),
1650         XGBE_STATS0_INFO(rx_mof_overruns),
1651         XGBE_STATS0_INFO(rx_dma_overruns),
1652         /* XGBE module 1 */
1653         XGBE_STATS1_INFO(rx_good_frames),
1654         XGBE_STATS1_INFO(rx_broadcast_frames),
1655         XGBE_STATS1_INFO(rx_multicast_frames),
1656         XGBE_STATS1_INFO(rx_pause_frames),
1657         XGBE_STATS1_INFO(rx_crc_errors),
1658         XGBE_STATS1_INFO(rx_align_code_errors),
1659         XGBE_STATS1_INFO(rx_oversized_frames),
1660         XGBE_STATS1_INFO(rx_jabber_frames),
1661         XGBE_STATS1_INFO(rx_undersized_frames),
1662         XGBE_STATS1_INFO(rx_fragments),
1663         XGBE_STATS1_INFO(overrun_type4),
1664         XGBE_STATS1_INFO(overrun_type5),
1665         XGBE_STATS1_INFO(rx_bytes),
1666         XGBE_STATS1_INFO(tx_good_frames),
1667         XGBE_STATS1_INFO(tx_broadcast_frames),
1668         XGBE_STATS1_INFO(tx_multicast_frames),
1669         XGBE_STATS1_INFO(tx_pause_frames),
1670         XGBE_STATS1_INFO(tx_deferred_frames),
1671         XGBE_STATS1_INFO(tx_collision_frames),
1672         XGBE_STATS1_INFO(tx_single_coll_frames),
1673         XGBE_STATS1_INFO(tx_mult_coll_frames),
1674         XGBE_STATS1_INFO(tx_excessive_collisions),
1675         XGBE_STATS1_INFO(tx_late_collisions),
1676         XGBE_STATS1_INFO(tx_underrun),
1677         XGBE_STATS1_INFO(tx_carrier_sense_errors),
1678         XGBE_STATS1_INFO(tx_bytes),
1679         XGBE_STATS1_INFO(tx_64byte_frames),
1680         XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1681         XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1682         XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1683         XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1684         XGBE_STATS1_INFO(tx_1024byte_frames),
1685         XGBE_STATS1_INFO(net_bytes),
1686         XGBE_STATS1_INFO(rx_sof_overruns),
1687         XGBE_STATS1_INFO(rx_mof_overruns),
1688         XGBE_STATS1_INFO(rx_dma_overruns),
1689         /* XGBE module 2 */
1690         XGBE_STATS2_INFO(rx_good_frames),
1691         XGBE_STATS2_INFO(rx_broadcast_frames),
1692         XGBE_STATS2_INFO(rx_multicast_frames),
1693         XGBE_STATS2_INFO(rx_pause_frames),
1694         XGBE_STATS2_INFO(rx_crc_errors),
1695         XGBE_STATS2_INFO(rx_align_code_errors),
1696         XGBE_STATS2_INFO(rx_oversized_frames),
1697         XGBE_STATS2_INFO(rx_jabber_frames),
1698         XGBE_STATS2_INFO(rx_undersized_frames),
1699         XGBE_STATS2_INFO(rx_fragments),
1700         XGBE_STATS2_INFO(overrun_type4),
1701         XGBE_STATS2_INFO(overrun_type5),
1702         XGBE_STATS2_INFO(rx_bytes),
1703         XGBE_STATS2_INFO(tx_good_frames),
1704         XGBE_STATS2_INFO(tx_broadcast_frames),
1705         XGBE_STATS2_INFO(tx_multicast_frames),
1706         XGBE_STATS2_INFO(tx_pause_frames),
1707         XGBE_STATS2_INFO(tx_deferred_frames),
1708         XGBE_STATS2_INFO(tx_collision_frames),
1709         XGBE_STATS2_INFO(tx_single_coll_frames),
1710         XGBE_STATS2_INFO(tx_mult_coll_frames),
1711         XGBE_STATS2_INFO(tx_excessive_collisions),
1712         XGBE_STATS2_INFO(tx_late_collisions),
1713         XGBE_STATS2_INFO(tx_underrun),
1714         XGBE_STATS2_INFO(tx_carrier_sense_errors),
1715         XGBE_STATS2_INFO(tx_bytes),
1716         XGBE_STATS2_INFO(tx_64byte_frames),
1717         XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1718         XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1719         XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1720         XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1721         XGBE_STATS2_INFO(tx_1024byte_frames),
1722         XGBE_STATS2_INFO(net_bytes),
1723         XGBE_STATS2_INFO(rx_sof_overruns),
1724         XGBE_STATS2_INFO(rx_mof_overruns),
1725         XGBE_STATS2_INFO(rx_dma_overruns),
1726 };
1727
1728 #define for_each_intf(i, priv) \
1729         list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1730
1731 #define for_each_sec_slave(slave, priv) \
1732         list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1733
1734 #define first_sec_slave(priv)                                   \
1735         list_first_entry(&priv->secondary_slaves, \
1736                         struct gbe_slave, slave_list)
1737
1738 static void keystone_get_drvinfo(struct net_device *ndev,
1739                                  struct ethtool_drvinfo *info)
1740 {
1741         strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1742         strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1743 }
1744
1745 static u32 keystone_get_msglevel(struct net_device *ndev)
1746 {
1747         struct netcp_intf *netcp = netdev_priv(ndev);
1748
1749         return netcp->msg_enable;
1750 }
1751
1752 static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1753 {
1754         struct netcp_intf *netcp = netdev_priv(ndev);
1755
1756         netcp->msg_enable = value;
1757 }
1758
1759 static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
1760 {
1761         struct gbe_intf *gbe_intf;
1762
1763         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1764         if (!gbe_intf)
1765                 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1766
1767         return gbe_intf;
1768 }
1769
1770 static void keystone_get_stat_strings(struct net_device *ndev,
1771                                       uint32_t stringset, uint8_t *data)
1772 {
1773         struct netcp_intf *netcp = netdev_priv(ndev);
1774         struct gbe_intf *gbe_intf;
1775         struct gbe_priv *gbe_dev;
1776         int i;
1777
1778         gbe_intf = keystone_get_intf_data(netcp);
1779         if (!gbe_intf)
1780                 return;
1781         gbe_dev = gbe_intf->gbe_dev;
1782
1783         switch (stringset) {
1784         case ETH_SS_STATS:
1785                 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1786                         memcpy(data, gbe_dev->et_stats[i].desc,
1787                                ETH_GSTRING_LEN);
1788                         data += ETH_GSTRING_LEN;
1789                 }
1790                 break;
1791         case ETH_SS_TEST:
1792                 break;
1793         }
1794 }
1795
1796 static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1797 {
1798         struct netcp_intf *netcp = netdev_priv(ndev);
1799         struct gbe_intf *gbe_intf;
1800         struct gbe_priv *gbe_dev;
1801
1802         gbe_intf = keystone_get_intf_data(netcp);
1803         if (!gbe_intf)
1804                 return -EINVAL;
1805         gbe_dev = gbe_intf->gbe_dev;
1806
1807         switch (stringset) {
1808         case ETH_SS_TEST:
1809                 return 0;
1810         case ETH_SS_STATS:
1811                 return gbe_dev->num_et_stats;
1812         default:
1813                 return -EINVAL;
1814         }
1815 }
1816
1817 static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
1818 {
1819         void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
1820         u32  __iomem *p_stats_entry;
1821         int i;
1822
1823         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1824                 if (gbe_dev->et_stats[i].type == stats_mod) {
1825                         p_stats_entry = base + gbe_dev->et_stats[i].offset;
1826                         gbe_dev->hw_stats[i] = 0;
1827                         gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
1828                 }
1829         }
1830 }
1831
1832 static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
1833                                              int et_stats_entry)
1834 {
1835         void __iomem *base = NULL;
1836         u32  __iomem *p_stats_entry;
1837         u32 curr, delta;
1838
1839         /* The hw_stats_regs pointers are already
1840          * properly set to point to the right base:
1841          */
1842         base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
1843         p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
1844         curr = readl(p_stats_entry);
1845         delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
1846         gbe_dev->hw_stats_prev[et_stats_entry] = curr;
1847         gbe_dev->hw_stats[et_stats_entry] += delta;
1848 }
1849
1850 static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1851 {
1852         int i;
1853
1854         for (i = 0; i < gbe_dev->num_et_stats; i++) {
1855                 gbe_update_hw_stats_entry(gbe_dev, i);
1856
1857                 if (data)
1858                         data[i] = gbe_dev->hw_stats[i];
1859         }
1860 }
1861
1862 static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
1863                                                int stats_mod)
1864 {
1865         u32 val;
1866
1867         val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1868
1869         switch (stats_mod) {
1870         case GBE_STATSA_MODULE:
1871         case GBE_STATSB_MODULE:
1872                 val &= ~GBE_STATS_CD_SEL;
1873                 break;
1874         case GBE_STATSC_MODULE:
1875         case GBE_STATSD_MODULE:
1876                 val |= GBE_STATS_CD_SEL;
1877                 break;
1878         default:
1879                 return;
1880         }
1881
1882         /* make the stat module visible */
1883         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1884 }
1885
1886 static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
1887 {
1888         gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
1889         gbe_reset_mod_stats(gbe_dev, stats_mod);
1890 }
1891
1892 static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1893 {
1894         u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
1895         int et_entry, j, pair;
1896
1897         for (pair = 0; pair < 2; pair++) {
1898                 gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
1899                                                       GBE_STATSC_MODULE :
1900                                                       GBE_STATSA_MODULE));
1901
1902                 for (j = 0; j < half_num_et_stats; j++) {
1903                         et_entry = pair * half_num_et_stats + j;
1904                         gbe_update_hw_stats_entry(gbe_dev, et_entry);
1905
1906                         if (data)
1907                                 data[et_entry] = gbe_dev->hw_stats[et_entry];
1908                 }
1909         }
1910 }
1911
1912 static void keystone_get_ethtool_stats(struct net_device *ndev,
1913                                        struct ethtool_stats *stats,
1914                                        uint64_t *data)
1915 {
1916         struct netcp_intf *netcp = netdev_priv(ndev);
1917         struct gbe_intf *gbe_intf;
1918         struct gbe_priv *gbe_dev;
1919
1920         gbe_intf = keystone_get_intf_data(netcp);
1921         if (!gbe_intf)
1922                 return;
1923
1924         gbe_dev = gbe_intf->gbe_dev;
1925         spin_lock_bh(&gbe_dev->hw_stats_lock);
1926         if (IS_SS_ID_VER_14(gbe_dev))
1927                 gbe_update_stats_ver14(gbe_dev, data);
1928         else
1929                 gbe_update_stats(gbe_dev, data);
1930         spin_unlock_bh(&gbe_dev->hw_stats_lock);
1931 }
1932
1933 static int keystone_get_link_ksettings(struct net_device *ndev,
1934                                        struct ethtool_link_ksettings *cmd)
1935 {
1936         struct netcp_intf *netcp = netdev_priv(ndev);
1937         struct phy_device *phy = ndev->phydev;
1938         struct gbe_intf *gbe_intf;
1939
1940         if (!phy)
1941                 return -EINVAL;
1942
1943         gbe_intf = keystone_get_intf_data(netcp);
1944         if (!gbe_intf)
1945                 return -EINVAL;
1946
1947         if (!gbe_intf->slave)
1948                 return -EINVAL;
1949
1950         phy_ethtool_ksettings_get(phy, cmd);
1951         cmd->base.port = gbe_intf->slave->phy_port_t;
1952
1953         return 0;
1954 }
1955
1956 static int keystone_set_link_ksettings(struct net_device *ndev,
1957                                        const struct ethtool_link_ksettings *cmd)
1958 {
1959         struct netcp_intf *netcp = netdev_priv(ndev);
1960         struct phy_device *phy = ndev->phydev;
1961         struct gbe_intf *gbe_intf;
1962         u8 port = cmd->base.port;
1963         u32 advertising, supported;
1964         u32 features;
1965
1966         ethtool_convert_link_mode_to_legacy_u32(&advertising,
1967                                                 cmd->link_modes.advertising);
1968         ethtool_convert_link_mode_to_legacy_u32(&supported,
1969                                                 cmd->link_modes.supported);
1970         features = advertising & supported;
1971
1972         if (!phy)
1973                 return -EINVAL;
1974
1975         gbe_intf = keystone_get_intf_data(netcp);
1976         if (!gbe_intf)
1977                 return -EINVAL;
1978
1979         if (!gbe_intf->slave)
1980                 return -EINVAL;
1981
1982         if (port != gbe_intf->slave->phy_port_t) {
1983                 if ((port == PORT_TP) && !(features & ADVERTISED_TP))
1984                         return -EINVAL;
1985
1986                 if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
1987                         return -EINVAL;
1988
1989                 if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
1990                         return -EINVAL;
1991
1992                 if ((port == PORT_MII) && !(features & ADVERTISED_MII))
1993                         return -EINVAL;
1994
1995                 if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1996                         return -EINVAL;
1997         }
1998
1999         gbe_intf->slave->phy_port_t = port;
2000         return phy_ethtool_ksettings_set(phy, cmd);
2001 }
2002
2003 #if IS_ENABLED(CONFIG_TI_CPTS)
2004 static int keystone_get_ts_info(struct net_device *ndev,
2005                                 struct ethtool_ts_info *info)
2006 {
2007         struct netcp_intf *netcp = netdev_priv(ndev);
2008         struct gbe_intf *gbe_intf;
2009
2010         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2011         if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
2012                 return -EINVAL;
2013
2014         info->so_timestamping =
2015                 SOF_TIMESTAMPING_TX_HARDWARE |
2016                 SOF_TIMESTAMPING_TX_SOFTWARE |
2017                 SOF_TIMESTAMPING_RX_HARDWARE |
2018                 SOF_TIMESTAMPING_RX_SOFTWARE |
2019                 SOF_TIMESTAMPING_SOFTWARE |
2020                 SOF_TIMESTAMPING_RAW_HARDWARE;
2021         info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
2022         info->tx_types =
2023                 (1 << HWTSTAMP_TX_OFF) |
2024                 (1 << HWTSTAMP_TX_ON);
2025         info->rx_filters =
2026                 (1 << HWTSTAMP_FILTER_NONE) |
2027                 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
2028                 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2029         return 0;
2030 }
2031 #else
2032 static int keystone_get_ts_info(struct net_device *ndev,
2033                                 struct ethtool_ts_info *info)
2034 {
2035         info->so_timestamping =
2036                 SOF_TIMESTAMPING_TX_SOFTWARE |
2037                 SOF_TIMESTAMPING_RX_SOFTWARE |
2038                 SOF_TIMESTAMPING_SOFTWARE;
2039         info->phc_index = -1;
2040         info->tx_types = 0;
2041         info->rx_filters = 0;
2042         return 0;
2043 }
2044 #endif /* CONFIG_TI_CPTS */
2045
2046 static const struct ethtool_ops keystone_ethtool_ops = {
2047         .get_drvinfo            = keystone_get_drvinfo,
2048         .get_link               = ethtool_op_get_link,
2049         .get_msglevel           = keystone_get_msglevel,
2050         .set_msglevel           = keystone_set_msglevel,
2051         .get_strings            = keystone_get_stat_strings,
2052         .get_sset_count         = keystone_get_sset_count,
2053         .get_ethtool_stats      = keystone_get_ethtool_stats,
2054         .get_link_ksettings     = keystone_get_link_ksettings,
2055         .set_link_ksettings     = keystone_set_link_ksettings,
2056         .get_ts_info            = keystone_get_ts_info,
2057 };
2058
2059 static void gbe_set_slave_mac(struct gbe_slave *slave,
2060                               struct gbe_intf *gbe_intf)
2061 {
2062         struct net_device *ndev = gbe_intf->ndev;
2063
2064         writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
2065         writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
2066 }
2067
2068 static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
2069 {
2070         if (priv->host_port == 0)
2071                 return slave_num + 1;
2072
2073         return slave_num;
2074 }
2075
2076 static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
2077                                           struct net_device *ndev,
2078                                           struct gbe_slave *slave,
2079                                           int up)
2080 {
2081         struct phy_device *phy = slave->phy;
2082         u32 mac_control = 0;
2083
2084         if (up) {
2085                 mac_control = slave->mac_control;
2086                 if (phy && (phy->speed == SPEED_1000)) {
2087                         mac_control |= MACSL_GIG_MODE;
2088                         mac_control &= ~MACSL_XGIG_MODE;
2089                 } else if (phy && (phy->speed == SPEED_10000)) {
2090                         mac_control |= MACSL_XGIG_MODE;
2091                         mac_control &= ~MACSL_GIG_MODE;
2092                 }
2093
2094                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2095                                                  mac_control));
2096
2097                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2098                                      ALE_PORT_STATE,
2099                                      ALE_PORT_STATE_FORWARD);
2100
2101                 if (ndev && slave->open &&
2102                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2103                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2104                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2105                         netif_carrier_on(ndev);
2106         } else {
2107                 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
2108                                                  mac_control));
2109                 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2110                                      ALE_PORT_STATE,
2111                                      ALE_PORT_STATE_DISABLE);
2112                 if (ndev &&
2113                     ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2114                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
2115                     (slave->link_interface != XGMII_LINK_MAC_PHY)))
2116                         netif_carrier_off(ndev);
2117         }
2118
2119         if (phy)
2120                 phy_print_status(phy);
2121 }
2122
2123 static bool gbe_phy_link_status(struct gbe_slave *slave)
2124 {
2125          return !slave->phy || slave->phy->link;
2126 }
2127
2128 #define RGMII_REG_STATUS_LINK   BIT(0)
2129
2130 static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
2131 {
2132         u32 val = 0;
2133
2134         val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
2135         *status = !!(val & RGMII_REG_STATUS_LINK);
2136 }
2137
2138 static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
2139                                           struct gbe_slave *slave,
2140                                           struct net_device *ndev)
2141 {
2142         bool sw_link_state = true, phy_link_state;
2143         int sp = slave->slave_num, link_state;
2144
2145         if (!slave->open)
2146                 return;
2147
2148         if (SLAVE_LINK_IS_RGMII(slave))
2149                 netcp_2u_rgmii_get_port_link(gbe_dev,
2150                                              &sw_link_state);
2151         if (SLAVE_LINK_IS_SGMII(slave))
2152                 sw_link_state =
2153                 netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
2154
2155         phy_link_state = gbe_phy_link_status(slave);
2156         link_state = phy_link_state & sw_link_state;
2157
2158         if (atomic_xchg(&slave->link_state, link_state) != link_state)
2159                 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
2160                                               link_state);
2161 }
2162
2163 static void xgbe_adjust_link(struct net_device *ndev)
2164 {
2165         struct netcp_intf *netcp = netdev_priv(ndev);
2166         struct gbe_intf *gbe_intf;
2167
2168         gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
2169         if (!gbe_intf)
2170                 return;
2171
2172         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2173                                       ndev);
2174 }
2175
2176 static void gbe_adjust_link(struct net_device *ndev)
2177 {
2178         struct netcp_intf *netcp = netdev_priv(ndev);
2179         struct gbe_intf *gbe_intf;
2180
2181         gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
2182         if (!gbe_intf)
2183                 return;
2184
2185         netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
2186                                       ndev);
2187 }
2188
2189 static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
2190 {
2191         struct gbe_priv *gbe_dev = netdev_priv(ndev);
2192         struct gbe_slave *slave;
2193
2194         for_each_sec_slave(slave, gbe_dev)
2195                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2196 }
2197
2198 /* Reset EMAC
2199  * Soft reset is set and polled until clear, or until a timeout occurs
2200  */
2201 static int gbe_port_reset(struct gbe_slave *slave)
2202 {
2203         u32 i, v;
2204
2205         /* Set the soft reset bit */
2206         writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
2207
2208         /* Wait for the bit to clear */
2209         for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
2210                 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
2211                 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
2212                         return 0;
2213         }
2214
2215         /* Timeout on the reset */
2216         return GMACSL_RET_WARN_RESET_INCOMPLETE;
2217 }
2218
2219 /* Configure EMAC */
2220 static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2221                             int max_rx_len)
2222 {
2223         void __iomem *rx_maxlen_reg;
2224         u32 xgmii_mode;
2225
2226         if (max_rx_len > NETCP_MAX_FRAME_SIZE)
2227                 max_rx_len = NETCP_MAX_FRAME_SIZE;
2228
2229         /* Enable correct MII mode at SS level */
2230         if (IS_SS_ID_XGBE(gbe_dev) &&
2231             (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
2232                 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
2233                 xgmii_mode |= (1 << slave->slave_num);
2234                 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
2235         }
2236
2237         if (IS_SS_ID_MU(gbe_dev))
2238                 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
2239         else
2240                 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
2241
2242         writel(max_rx_len, rx_maxlen_reg);
2243         writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
2244 }
2245
2246 static void gbe_sgmii_rtreset(struct gbe_priv *priv,
2247                               struct gbe_slave *slave, bool set)
2248 {
2249         if (SLAVE_LINK_IS_XGMII(slave))
2250                 return;
2251
2252         netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
2253                             slave->slave_num, set);
2254 }
2255
2256 static void gbe_slave_stop(struct gbe_intf *intf)
2257 {
2258         struct gbe_priv *gbe_dev = intf->gbe_dev;
2259         struct gbe_slave *slave = intf->slave;
2260
2261         if (!IS_SS_ID_2U(gbe_dev))
2262                 gbe_sgmii_rtreset(gbe_dev, slave, true);
2263         gbe_port_reset(slave);
2264         /* Disable forwarding */
2265         cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
2266                              ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
2267         cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
2268                            1 << slave->port_num, 0, 0);
2269
2270         if (!slave->phy)
2271                 return;
2272
2273         phy_stop(slave->phy);
2274         phy_disconnect(slave->phy);
2275         slave->phy = NULL;
2276 }
2277
2278 static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
2279 {
2280         if (SLAVE_LINK_IS_XGMII(slave))
2281                 return;
2282
2283         netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
2284         netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
2285                            slave->link_interface);
2286 }
2287
2288 static int gbe_slave_open(struct gbe_intf *gbe_intf)
2289 {
2290         struct gbe_priv *priv = gbe_intf->gbe_dev;
2291         struct gbe_slave *slave = gbe_intf->slave;
2292         phy_interface_t phy_mode;
2293         bool has_phy = false;
2294         int err;
2295
2296         void (*hndlr)(struct net_device *) = gbe_adjust_link;
2297
2298         if (!IS_SS_ID_2U(priv))
2299                 gbe_sgmii_config(priv, slave);
2300         gbe_port_reset(slave);
2301         if (!IS_SS_ID_2U(priv))
2302                 gbe_sgmii_rtreset(priv, slave, false);
2303         gbe_port_config(priv, slave, priv->rx_packet_max);
2304         gbe_set_slave_mac(slave, gbe_intf);
2305         /* For NU & 2U switch, map the vlan priorities to zero
2306          * as we only configure to use priority 0
2307          */
2308         if (IS_SS_ID_MU(priv))
2309                 writel(HOST_TX_PRI_MAP_DEFAULT,
2310                        GBE_REG_ADDR(slave, port_regs, rx_pri_map));
2311
2312         /* enable forwarding */
2313         cpsw_ale_control_set(priv->ale, slave->port_num,
2314                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2315         cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
2316                            1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
2317
2318         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2319                 has_phy = true;
2320                 phy_mode = PHY_INTERFACE_MODE_SGMII;
2321                 slave->phy_port_t = PORT_MII;
2322         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
2323                 has_phy = true;
2324                 err = of_get_phy_mode(slave->node, &phy_mode);
2325                 /* if phy-mode is not present, default to
2326                  * PHY_INTERFACE_MODE_RGMII
2327                  */
2328                 if (err)
2329                         phy_mode = PHY_INTERFACE_MODE_RGMII;
2330
2331                 if (!phy_interface_mode_is_rgmii(phy_mode)) {
2332                         dev_err(priv->dev,
2333                                 "Unsupported phy mode %d\n", phy_mode);
2334                         return -EINVAL;
2335                 }
2336                 slave->phy_port_t = PORT_MII;
2337         } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
2338                 has_phy = true;
2339                 phy_mode = PHY_INTERFACE_MODE_NA;
2340                 slave->phy_port_t = PORT_FIBRE;
2341         }
2342
2343         if (has_phy) {
2344                 if (IS_SS_ID_XGBE(priv))
2345                         hndlr = xgbe_adjust_link;
2346
2347                 slave->phy = of_phy_connect(gbe_intf->ndev,
2348                                             slave->phy_node,
2349                                             hndlr, 0,
2350                                             phy_mode);
2351                 if (!slave->phy) {
2352                         dev_err(priv->dev, "phy not found on slave %d\n",
2353                                 slave->slave_num);
2354                         return -ENODEV;
2355                 }
2356                 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
2357                         phydev_name(slave->phy));
2358                 phy_start(slave->phy);
2359         }
2360         return 0;
2361 }
2362
2363 static void gbe_init_host_port(struct gbe_priv *priv)
2364 {
2365         int bypass_en = 1;
2366
2367         /* Host Tx Pri */
2368         if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
2369                 writel(HOST_TX_PRI_MAP_DEFAULT,
2370                        GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
2371
2372         /* Max length register */
2373         writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2374                                                   rx_maxlen));
2375
2376         cpsw_ale_start(priv->ale);
2377
2378         if (priv->enable_ale)
2379                 bypass_en = 0;
2380
2381         cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2382
2383         cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2384
2385         cpsw_ale_control_set(priv->ale, priv->host_port,
2386                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2387
2388         cpsw_ale_control_set(priv->ale, 0,
2389                              ALE_PORT_UNKNOWN_VLAN_MEMBER,
2390                              GBE_PORT_MASK(priv->ale_ports));
2391
2392         cpsw_ale_control_set(priv->ale, 0,
2393                              ALE_PORT_UNKNOWN_MCAST_FLOOD,
2394                              GBE_PORT_MASK(priv->ale_ports - 1));
2395
2396         cpsw_ale_control_set(priv->ale, 0,
2397                              ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2398                              GBE_PORT_MASK(priv->ale_ports));
2399
2400         cpsw_ale_control_set(priv->ale, 0,
2401                              ALE_PORT_UNTAGGED_EGRESS,
2402                              GBE_PORT_MASK(priv->ale_ports));
2403 }
2404
2405 static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2406 {
2407         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2408         u16 vlan_id;
2409
2410         cpsw_ale_add_mcast(gbe_dev->ale, addr,
2411                            GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2412                            ALE_MCAST_FWD_2);
2413         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2414                 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2415                                    GBE_PORT_MASK(gbe_dev->ale_ports),
2416                                    ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2417         }
2418 }
2419
2420 static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2421 {
2422         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2423         u16 vlan_id;
2424
2425         cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2426
2427         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2428                 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2429                                    ALE_VLAN, vlan_id);
2430 }
2431
2432 static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2433 {
2434         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2435         u16 vlan_id;
2436
2437         cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2438
2439         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2440                 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2441         }
2442 }
2443
2444 static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2445 {
2446         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2447         u16 vlan_id;
2448
2449         cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2450
2451         for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2452                 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2453                                    ALE_VLAN, vlan_id);
2454         }
2455 }
2456
2457 static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2458 {
2459         struct gbe_intf *gbe_intf = intf_priv;
2460         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2461
2462         dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2463                 naddr->addr, naddr->type);
2464
2465         switch (naddr->type) {
2466         case ADDR_MCAST:
2467         case ADDR_BCAST:
2468                 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2469                 break;
2470         case ADDR_UCAST:
2471         case ADDR_DEV:
2472                 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2473                 break;
2474         case ADDR_ANY:
2475                 /* nothing to do for promiscuous */
2476         default:
2477                 break;
2478         }
2479
2480         return 0;
2481 }
2482
2483 static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2484 {
2485         struct gbe_intf *gbe_intf = intf_priv;
2486         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2487
2488         dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2489                 naddr->addr, naddr->type);
2490
2491         switch (naddr->type) {
2492         case ADDR_MCAST:
2493         case ADDR_BCAST:
2494                 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2495                 break;
2496         case ADDR_UCAST:
2497         case ADDR_DEV:
2498                 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2499                 break;
2500         case ADDR_ANY:
2501                 /* nothing to do for promiscuous */
2502         default:
2503                 break;
2504         }
2505
2506         return 0;
2507 }
2508
2509 static int gbe_add_vid(void *intf_priv, int vid)
2510 {
2511         struct gbe_intf *gbe_intf = intf_priv;
2512         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2513
2514         set_bit(vid, gbe_intf->active_vlans);
2515
2516         cpsw_ale_add_vlan(gbe_dev->ale, vid,
2517                           GBE_PORT_MASK(gbe_dev->ale_ports),
2518                           GBE_MASK_NO_PORTS,
2519                           GBE_PORT_MASK(gbe_dev->ale_ports),
2520                           GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2521
2522         return 0;
2523 }
2524
2525 static int gbe_del_vid(void *intf_priv, int vid)
2526 {
2527         struct gbe_intf *gbe_intf = intf_priv;
2528         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2529
2530         cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2531         clear_bit(vid, gbe_intf->active_vlans);
2532         return 0;
2533 }
2534
2535 #if IS_ENABLED(CONFIG_TI_CPTS)
2536 #define HAS_PHY_TXTSTAMP(p) ((p)->drv && (p)->drv->txtstamp)
2537 #define HAS_PHY_RXTSTAMP(p) ((p)->drv && (p)->drv->rxtstamp)
2538
2539 static void gbe_txtstamp(void *context, struct sk_buff *skb)
2540 {
2541         struct gbe_intf *gbe_intf = context;
2542         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2543
2544         cpts_tx_timestamp(gbe_dev->cpts, skb);
2545 }
2546
2547 static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
2548                               const struct netcp_packet *p_info)
2549 {
2550         struct sk_buff *skb = p_info->skb;
2551
2552         return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
2553 }
2554
2555 static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2556                                  struct netcp_packet *p_info)
2557 {
2558         struct phy_device *phydev = p_info->skb->dev->phydev;
2559         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2560
2561         if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
2562             !gbe_dev->tx_ts_enabled)
2563                 return 0;
2564
2565         /* If phy has the txtstamp api, assume it will do it.
2566          * We mark it here because skb_tx_timestamp() is called
2567          * after all the txhooks are called.
2568          */
2569         if (phydev && HAS_PHY_TXTSTAMP(phydev)) {
2570                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2571                 return 0;
2572         }
2573
2574         if (gbe_need_txtstamp(gbe_intf, p_info)) {
2575                 p_info->txtstamp = gbe_txtstamp;
2576                 p_info->ts_context = (void *)gbe_intf;
2577                 skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
2578         }
2579
2580         return 0;
2581 }
2582
2583 static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
2584 {
2585         struct phy_device *phydev = p_info->skb->dev->phydev;
2586         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2587
2588         if (p_info->rxtstamp_complete)
2589                 return 0;
2590
2591         if (phydev && HAS_PHY_RXTSTAMP(phydev)) {
2592                 p_info->rxtstamp_complete = true;
2593                 return 0;
2594         }
2595
2596         if (gbe_dev->rx_ts_enabled)
2597                 cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
2598
2599         p_info->rxtstamp_complete = true;
2600
2601         return 0;
2602 }
2603
2604 static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2605 {
2606         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2607         struct cpts *cpts = gbe_dev->cpts;
2608         struct hwtstamp_config cfg;
2609
2610         if (!cpts)
2611                 return -EOPNOTSUPP;
2612
2613         cfg.flags = 0;
2614         cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2615         cfg.rx_filter = gbe_dev->rx_ts_enabled;
2616
2617         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2618 }
2619
2620 static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
2621 {
2622         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2623         struct gbe_slave *slave = gbe_intf->slave;
2624         u32 ts_en, seq_id, ctl;
2625
2626         if (!gbe_dev->rx_ts_enabled &&
2627             !gbe_dev->tx_ts_enabled) {
2628                 writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
2629                 return;
2630         }
2631
2632         seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2633         ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
2634         ctl = ETH_P_1588 | TS_TTL_NONZERO |
2635                 (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
2636                 (slave->ts_ctl.uni ?  TS_UNI_EN :
2637                         slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
2638
2639         if (gbe_dev->tx_ts_enabled)
2640                 ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
2641
2642         if (gbe_dev->rx_ts_enabled)
2643                 ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
2644
2645         writel(ts_en,  GBE_REG_ADDR(slave, port_regs, ts_ctl));
2646         writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
2647         writel(ctl,    GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
2648 }
2649
2650 static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
2651 {
2652         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2653         struct cpts *cpts = gbe_dev->cpts;
2654         struct hwtstamp_config cfg;
2655
2656         if (!cpts)
2657                 return -EOPNOTSUPP;
2658
2659         if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2660                 return -EFAULT;
2661
2662         /* reserved for future extensions */
2663         if (cfg.flags)
2664                 return -EINVAL;
2665
2666         switch (cfg.tx_type) {
2667         case HWTSTAMP_TX_OFF:
2668                 gbe_dev->tx_ts_enabled = 0;
2669                 break;
2670         case HWTSTAMP_TX_ON:
2671                 gbe_dev->tx_ts_enabled = 1;
2672                 break;
2673         default:
2674                 return -ERANGE;
2675         }
2676
2677         switch (cfg.rx_filter) {
2678         case HWTSTAMP_FILTER_NONE:
2679                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
2680                 break;
2681         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2682         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2683         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2684                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2685                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2686                 break;
2687         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2688         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2689         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2690         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2691         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2692         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2693         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2694         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2695         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2696                 gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
2697                 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2698                 break;
2699         default:
2700                 return -ERANGE;
2701         }
2702
2703         gbe_hwtstamp(gbe_intf);
2704
2705         return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2706 }
2707
2708 static void gbe_register_cpts(struct gbe_priv *gbe_dev)
2709 {
2710         if (!gbe_dev->cpts)
2711                 return;
2712
2713         if (gbe_dev->cpts_registered > 0)
2714                 goto done;
2715
2716         if (cpts_register(gbe_dev->cpts)) {
2717                 dev_err(gbe_dev->dev, "error registering cpts device\n");
2718                 return;
2719         }
2720
2721 done:
2722         ++gbe_dev->cpts_registered;
2723 }
2724
2725 static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2726 {
2727         if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
2728                 return;
2729
2730         if (--gbe_dev->cpts_registered)
2731                 return;
2732
2733         cpts_unregister(gbe_dev->cpts);
2734 }
2735 #else
2736 static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
2737                                         struct netcp_packet *p_info)
2738 {
2739         return 0;
2740 }
2741
2742 static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
2743                                struct netcp_packet *p_info)
2744 {
2745         return 0;
2746 }
2747
2748 static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
2749                                struct ifreq *ifr, int cmd)
2750 {
2751         return -EOPNOTSUPP;
2752 }
2753
2754 static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
2755 {
2756 }
2757
2758 static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
2759 {
2760 }
2761
2762 static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
2763 {
2764         return -EOPNOTSUPP;
2765 }
2766
2767 static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
2768 {
2769         return -EOPNOTSUPP;
2770 }
2771 #endif /* CONFIG_TI_CPTS */
2772
2773 static int gbe_set_rx_mode(void *intf_priv, bool promisc)
2774 {
2775         struct gbe_intf *gbe_intf = intf_priv;
2776         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2777         struct cpsw_ale *ale = gbe_dev->ale;
2778         unsigned long timeout;
2779         int i, ret = -ETIMEDOUT;
2780
2781         /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
2782          * slaves are port 1 and up
2783          */
2784         for (i = 0; i <= gbe_dev->num_slaves; i++) {
2785                 cpsw_ale_control_set(ale, i,
2786                                      ALE_PORT_NOLEARN, !!promisc);
2787                 cpsw_ale_control_set(ale, i,
2788                                      ALE_PORT_NO_SA_UPDATE, !!promisc);
2789         }
2790
2791         if (!promisc) {
2792                 /* Don't Flood All Unicast Packets to Host port */
2793                 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
2794                 dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
2795                 return 0;
2796         }
2797
2798         timeout = jiffies + HZ;
2799
2800         /* Clear All Untouched entries */
2801         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2802         do {
2803                 cpu_relax();
2804                 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
2805                         ret = 0;
2806                         break;
2807                 }
2808
2809         } while (time_after(timeout, jiffies));
2810
2811         /* Make sure it is not a false timeout */
2812         if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
2813                 return ret;
2814
2815         cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
2816
2817         /* Clear all mcast from ALE */
2818         cpsw_ale_flush_multicast(ale,
2819                                  GBE_PORT_MASK(gbe_dev->ale_ports),
2820                                  -1);
2821
2822         /* Flood All Unicast Packets to Host port */
2823         cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
2824         dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
2825         return ret;
2826 }
2827
2828 static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2829 {
2830         struct gbe_intf *gbe_intf = intf_priv;
2831         struct phy_device *phy = gbe_intf->slave->phy;
2832
2833         if (!phy || !phy->drv->hwtstamp) {
2834                 switch (cmd) {
2835                 case SIOCGHWTSTAMP:
2836                         return gbe_hwtstamp_get(gbe_intf, req);
2837                 case SIOCSHWTSTAMP:
2838                         return gbe_hwtstamp_set(gbe_intf, req);
2839                 }
2840         }
2841
2842         if (phy)
2843                 return phy_mii_ioctl(phy, req, cmd);
2844
2845         return -EOPNOTSUPP;
2846 }
2847
2848 static void netcp_ethss_timer(struct timer_list *t)
2849 {
2850         struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
2851         struct gbe_intf *gbe_intf;
2852         struct gbe_slave *slave;
2853
2854         /* Check & update SGMII link state of interfaces */
2855         for_each_intf(gbe_intf, gbe_dev) {
2856                 if (!gbe_intf->slave->open)
2857                         continue;
2858                 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2859                                               gbe_intf->ndev);
2860         }
2861
2862         /* Check & update SGMII link state of secondary ports */
2863         for_each_sec_slave(slave, gbe_dev) {
2864                 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2865         }
2866
2867         /* A timer runs as a BH, no need to block them */
2868         spin_lock(&gbe_dev->hw_stats_lock);
2869
2870         if (IS_SS_ID_VER_14(gbe_dev))
2871                 gbe_update_stats_ver14(gbe_dev, NULL);
2872         else
2873                 gbe_update_stats(gbe_dev, NULL);
2874
2875         spin_unlock(&gbe_dev->hw_stats_lock);
2876
2877         gbe_dev->timer.expires  = jiffies + GBE_TIMER_INTERVAL;
2878         add_timer(&gbe_dev->timer);
2879 }
2880
2881 static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
2882 {
2883         struct gbe_intf *gbe_intf = data;
2884
2885         p_info->tx_pipe = &gbe_intf->tx_pipe;
2886
2887         return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
2888 }
2889
2890 static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
2891 {
2892         struct gbe_intf *gbe_intf = data;
2893
2894         return gbe_rxtstamp(gbe_intf, p_info);
2895 }
2896
2897 static int gbe_open(void *intf_priv, struct net_device *ndev)
2898 {
2899         struct gbe_intf *gbe_intf = intf_priv;
2900         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2901         struct netcp_intf *netcp = netdev_priv(ndev);
2902         struct gbe_slave *slave = gbe_intf->slave;
2903         int port_num = slave->port_num;
2904         u32 reg, val;
2905         int ret;
2906
2907         reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2908         dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2909                 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2910                 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2911
2912         /* For 10G and on NetCP 1.5, use directed to port */
2913         if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
2914                 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
2915
2916         if (gbe_dev->enable_ale)
2917                 gbe_intf->tx_pipe.switch_to_port = 0;
2918         else
2919                 gbe_intf->tx_pipe.switch_to_port = port_num;
2920
2921         dev_dbg(gbe_dev->dev,
2922                 "opened TX channel %s: %p with to port %d, flags %d\n",
2923                 gbe_intf->tx_pipe.dma_chan_name,
2924                 gbe_intf->tx_pipe.dma_channel,
2925                 gbe_intf->tx_pipe.switch_to_port,
2926                 gbe_intf->tx_pipe.flags);
2927
2928         gbe_slave_stop(gbe_intf);
2929
2930         /* disable priority elevation and enable statistics on all ports */
2931         writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2932
2933         /* Control register */
2934         val = GBE_CTL_P0_ENABLE;
2935         if (IS_SS_ID_MU(gbe_dev)) {
2936                 val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
2937                 netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
2938         }
2939         writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2940
2941         /* All statistics enabled and STAT AB visible by default */
2942         writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2943                                                     stat_port_en));
2944
2945         ret = gbe_slave_open(gbe_intf);
2946         if (ret)
2947                 goto fail;
2948
2949         netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2950         netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2951
2952         slave->open = true;
2953         netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2954
2955         gbe_register_cpts(gbe_dev);
2956
2957         return 0;
2958
2959 fail:
2960         gbe_slave_stop(gbe_intf);
2961         return ret;
2962 }
2963
2964 static int gbe_close(void *intf_priv, struct net_device *ndev)
2965 {
2966         struct gbe_intf *gbe_intf = intf_priv;
2967         struct netcp_intf *netcp = netdev_priv(ndev);
2968         struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2969
2970         gbe_unregister_cpts(gbe_dev);
2971
2972         gbe_slave_stop(gbe_intf);
2973
2974         netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
2975         netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
2976
2977         gbe_intf->slave->open = false;
2978         atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2979         return 0;
2980 }
2981
2982 #if IS_ENABLED(CONFIG_TI_CPTS)
2983 static void init_slave_ts_ctl(struct gbe_slave *slave)
2984 {
2985         slave->ts_ctl.uni = 1;
2986         slave->ts_ctl.dst_port_map =
2987                 (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
2988         slave->ts_ctl.maddr_map =
2989                 (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
2990 }
2991
2992 #else
2993 static void init_slave_ts_ctl(struct gbe_slave *slave)
2994 {
2995 }
2996 #endif /* CONFIG_TI_CPTS */
2997
2998 static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2999                       struct device_node *node)
3000 {
3001         int port_reg_num;
3002         u32 port_reg_ofs, emac_reg_ofs;
3003         u32 port_reg_blk_sz, emac_reg_blk_sz;
3004
3005         if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
3006                 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
3007                 return -EINVAL;
3008         }
3009
3010         if (of_property_read_u32(node, "link-interface",
3011                                  &slave->link_interface)) {
3012                 dev_warn(gbe_dev->dev,
3013                          "missing link-interface value defaulting to 1G mac-phy link\n");
3014                 slave->link_interface = SGMII_LINK_MAC_PHY;
3015         }
3016
3017         slave->node = node;
3018         slave->open = false;
3019         if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3020             (slave->link_interface == RGMII_LINK_MAC_PHY) ||
3021             (slave->link_interface == XGMII_LINK_MAC_PHY))
3022                 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
3023         slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
3024
3025         if (slave->link_interface >= XGMII_LINK_MAC_PHY)
3026                 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
3027         else
3028                 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
3029
3030         /* Emac regs memmap are contiguous but port regs are not */
3031         port_reg_num = slave->slave_num;
3032         if (IS_SS_ID_VER_14(gbe_dev)) {
3033                 if (slave->slave_num > 1) {
3034                         port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
3035                         port_reg_num -= 2;
3036                 } else {
3037                         port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
3038                 }
3039                 emac_reg_ofs = GBE13_EMAC_OFFSET;
3040                 port_reg_blk_sz = 0x30;
3041                 emac_reg_blk_sz = 0x40;
3042         } else if (IS_SS_ID_MU(gbe_dev)) {
3043                 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
3044                 emac_reg_ofs = GBENU_EMAC_OFFSET;
3045                 port_reg_blk_sz = 0x1000;
3046                 emac_reg_blk_sz = 0x1000;
3047         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3048                 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
3049                 emac_reg_ofs = XGBE10_EMAC_OFFSET;
3050                 port_reg_blk_sz = 0x30;
3051                 emac_reg_blk_sz = 0x40;
3052         } else {
3053                 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
3054                         gbe_dev->ss_version);
3055                 return -EINVAL;
3056         }
3057
3058         slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
3059                                 (port_reg_blk_sz * port_reg_num);
3060         slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
3061                                 (emac_reg_blk_sz * slave->slave_num);
3062
3063         if (IS_SS_ID_VER_14(gbe_dev)) {
3064                 /* Initialize  slave port register offsets */
3065                 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
3066                 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3067                 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
3068                 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
3069                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3070                 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3071                 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3072                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3073                 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3074
3075                 /* Initialize EMAC register offsets */
3076                 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
3077                 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3078                 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3079
3080         } else if (IS_SS_ID_MU(gbe_dev)) {
3081                 /* Initialize  slave port register offsets */
3082                 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
3083                 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
3084                 GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
3085                 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
3086                 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
3087                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
3088                 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3089                 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
3090                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3091                 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
3092                 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
3093
3094                 /* Initialize EMAC register offsets */
3095                 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
3096                 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
3097
3098         } else if (IS_SS_ID_XGBE(gbe_dev)) {
3099                 /* Initialize  slave port register offsets */
3100                 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
3101                 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
3102                 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
3103                 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
3104                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
3105                 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
3106                 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
3107                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
3108                 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
3109
3110                 /* Initialize EMAC register offsets */
3111                 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
3112                 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
3113                 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
3114         }
3115
3116         atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
3117
3118         init_slave_ts_ctl(slave);
3119         return 0;
3120 }
3121
3122 static void init_secondary_ports(struct gbe_priv *gbe_dev,
3123                                  struct device_node *node)
3124 {
3125         struct device *dev = gbe_dev->dev;
3126         phy_interface_t phy_mode;
3127         struct gbe_priv **priv;
3128         struct device_node *port;
3129         struct gbe_slave *slave;
3130         bool mac_phy_link = false;
3131
3132         for_each_child_of_node(node, port) {
3133                 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
3134                 if (!slave) {
3135                         dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
3136                                 port);
3137                         continue;
3138                 }
3139
3140                 if (init_slave(gbe_dev, slave, port)) {
3141                         dev_err(dev,
3142                                 "Failed to initialize secondary port(%pOFn), skipping...\n",
3143                                 port);
3144                         devm_kfree(dev, slave);
3145                         continue;
3146                 }
3147
3148                 if (!IS_SS_ID_2U(gbe_dev))
3149                         gbe_sgmii_config(gbe_dev, slave);
3150                 gbe_port_reset(slave);
3151                 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
3152                 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
3153                 gbe_dev->num_slaves++;
3154                 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
3155                     (slave->link_interface == XGMII_LINK_MAC_PHY))
3156                         mac_phy_link = true;
3157
3158                 slave->open = true;
3159                 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
3160                         of_node_put(port);
3161                         break;
3162                 }
3163         }
3164
3165         /* of_phy_connect() is needed only for MAC-PHY interface */
3166         if (!mac_phy_link)
3167                 return;
3168
3169         /* Allocate dummy netdev device for attaching to phy device */
3170         gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
3171                                         NET_NAME_UNKNOWN, ether_setup);
3172         if (!gbe_dev->dummy_ndev) {
3173                 dev_err(dev,
3174                         "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
3175                 return;
3176         }
3177         priv = netdev_priv(gbe_dev->dummy_ndev);
3178         *priv = gbe_dev;
3179
3180         if (slave->link_interface == SGMII_LINK_MAC_PHY) {
3181                 phy_mode = PHY_INTERFACE_MODE_SGMII;
3182                 slave->phy_port_t = PORT_MII;
3183         } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
3184                 phy_mode = PHY_INTERFACE_MODE_RGMII;
3185                 slave->phy_port_t = PORT_MII;
3186         } else {
3187                 phy_mode = PHY_INTERFACE_MODE_NA;
3188                 slave->phy_port_t = PORT_FIBRE;
3189         }
3190
3191         for_each_sec_slave(slave, gbe_dev) {
3192                 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
3193                     (slave->link_interface != RGMII_LINK_MAC_PHY) &&
3194                     (slave->link_interface != XGMII_LINK_MAC_PHY))
3195                         continue;
3196                 slave->phy =
3197                         of_phy_connect(gbe_dev->dummy_ndev,
3198                                        slave->phy_node,
3199                                        gbe_adjust_link_sec_slaves,
3200                                        0, phy_mode);
3201                 if (!slave->phy) {
3202                         dev_err(dev, "phy not found for slave %d\n",
3203                                 slave->slave_num);
3204                 } else {
3205                         dev_dbg(dev, "phy found: id is: 0x%s\n",
3206                                 phydev_name(slave->phy));
3207                         phy_start(slave->phy);
3208                 }
3209         }
3210 }
3211
3212 static void free_secondary_ports(struct gbe_priv *gbe_dev)
3213 {
3214         struct gbe_slave *slave;
3215
3216         while (!list_empty(&gbe_dev->secondary_slaves)) {
3217                 slave = first_sec_slave(gbe_dev);
3218
3219                 if (slave->phy)
3220                         phy_disconnect(slave->phy);
3221                 list_del(&slave->slave_list);
3222         }
3223         if (gbe_dev->dummy_ndev)
3224                 free_netdev(gbe_dev->dummy_ndev);
3225 }
3226
3227 static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
3228                                  struct device_node *node)
3229 {
3230         struct resource res;
3231         void __iomem *regs;
3232         int ret, i;
3233
3234         ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
3235         if (ret) {
3236                 dev_err(gbe_dev->dev,
3237                         "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
3238                         node, XGBE_SS_REG_INDEX);
3239                 return ret;
3240         }
3241
3242         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3243         if (IS_ERR(regs)) {
3244                 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
3245                 return PTR_ERR(regs);
3246         }
3247         gbe_dev->ss_regs = regs;
3248
3249         ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
3250         if (ret) {
3251                 dev_err(gbe_dev->dev,
3252                         "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
3253                         node, XGBE_SM_REG_INDEX);
3254                 return ret;
3255         }
3256
3257         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3258         if (IS_ERR(regs)) {
3259                 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
3260                 return PTR_ERR(regs);
3261         }
3262         gbe_dev->switch_regs = regs;
3263
3264         ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
3265         if (ret) {
3266                 dev_err(gbe_dev->dev,
3267                         "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
3268                         node, XGBE_SERDES_REG_INDEX);
3269                 return ret;
3270         }
3271
3272         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3273         if (IS_ERR(regs)) {
3274                 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
3275                 return PTR_ERR(regs);
3276         }
3277         gbe_dev->xgbe_serdes_regs = regs;
3278
3279         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3280         gbe_dev->et_stats = xgbe10_et_stats;
3281         gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
3282
3283         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3284                                          gbe_dev->num_et_stats, sizeof(u64),
3285                                          GFP_KERNEL);
3286         if (!gbe_dev->hw_stats) {
3287                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3288                 return -ENOMEM;
3289         }
3290
3291         gbe_dev->hw_stats_prev =
3292                 devm_kcalloc(gbe_dev->dev,
3293                              gbe_dev->num_et_stats, sizeof(u32),
3294                              GFP_KERNEL);
3295         if (!gbe_dev->hw_stats_prev) {
3296                 dev_err(gbe_dev->dev,
3297                         "hw_stats_prev memory allocation failed\n");
3298                 return -ENOMEM;
3299         }
3300
3301         gbe_dev->ss_version = XGBE_SS_VERSION_10;
3302         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
3303                                         XGBE10_SGMII_MODULE_OFFSET;
3304         gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
3305
3306         for (i = 0; i < gbe_dev->max_num_ports; i++)
3307                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3308                         XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
3309
3310         gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
3311         gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
3312         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3313         gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
3314         gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
3315         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3316
3317         /* Subsystem registers */
3318         XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3319         XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
3320
3321         /* Switch module registers */
3322         XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3323         XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3324         XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3325         XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3326         XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3327
3328         /* Host port registers */
3329         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3330         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3331         XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3332         return 0;
3333 }
3334
3335 static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
3336                                     struct device_node *node)
3337 {
3338         struct resource res;
3339         void __iomem *regs;
3340         int ret;
3341
3342         ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
3343         if (ret) {
3344                 dev_err(gbe_dev->dev,
3345                         "Can't translate of node(%pOFn) of gbe ss address at %d\n",
3346                         node, GBE_SS_REG_INDEX);
3347                 return ret;
3348         }
3349
3350         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3351         if (IS_ERR(regs)) {
3352                 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
3353                 return PTR_ERR(regs);
3354         }
3355         gbe_dev->ss_regs = regs;
3356         gbe_dev->ss_version = readl(gbe_dev->ss_regs);
3357         return 0;
3358 }
3359
3360 static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
3361                                 struct device_node *node)
3362 {
3363         struct resource res;
3364         void __iomem *regs;
3365         int i, ret;
3366
3367         ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
3368         if (ret) {
3369                 dev_err(gbe_dev->dev,
3370                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3371                         node, GBE_SGMII34_REG_INDEX);
3372                 return ret;
3373         }
3374
3375         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3376         if (IS_ERR(regs)) {
3377                 dev_err(gbe_dev->dev,
3378                         "Failed to map gbe sgmii port34 register base\n");
3379                 return PTR_ERR(regs);
3380         }
3381         gbe_dev->sgmii_port34_regs = regs;
3382
3383         ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
3384         if (ret) {
3385                 dev_err(gbe_dev->dev,
3386                         "Can't translate of gbe node(%pOFn) address at index %d\n",
3387                         node, GBE_SM_REG_INDEX);
3388                 return ret;
3389         }
3390
3391         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3392         if (IS_ERR(regs)) {
3393                 dev_err(gbe_dev->dev,
3394                         "Failed to map gbe switch module register base\n");
3395                 return PTR_ERR(regs);
3396         }
3397         gbe_dev->switch_regs = regs;
3398
3399         gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
3400         gbe_dev->et_stats = gbe13_et_stats;
3401         gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
3402
3403         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3404                                          gbe_dev->num_et_stats, sizeof(u64),
3405                                          GFP_KERNEL);
3406         if (!gbe_dev->hw_stats) {
3407                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3408                 return -ENOMEM;
3409         }
3410
3411         gbe_dev->hw_stats_prev =
3412                 devm_kcalloc(gbe_dev->dev,
3413                              gbe_dev->num_et_stats, sizeof(u32),
3414                              GFP_KERNEL);
3415         if (!gbe_dev->hw_stats_prev) {
3416                 dev_err(gbe_dev->dev,
3417                         "hw_stats_prev memory allocation failed\n");
3418                 return -ENOMEM;
3419         }
3420
3421         gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
3422         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
3423
3424         /* K2HK has only 2 hw stats modules visible at a time, so
3425          * module 0 & 2 points to one base and
3426          * module 1 & 3 points to the other base
3427          */
3428         for (i = 0; i < gbe_dev->max_num_slaves; i++) {
3429                 gbe_dev->hw_stats_regs[i] =
3430                         gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
3431                         (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
3432         }
3433
3434         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
3435         gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
3436         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3437         gbe_dev->host_port = GBE13_HOST_PORT_NUM;
3438         gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
3439         gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
3440
3441         /* Subsystem registers */
3442         GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3443
3444         /* Switch module registers */
3445         GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3446         GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
3447         GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
3448         GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3449         GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3450         GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
3451
3452         /* Host port registers */
3453         GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3454         GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3455         return 0;
3456 }
3457
3458 static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
3459                                 struct device_node *node)
3460 {
3461         struct resource res;
3462         void __iomem *regs;
3463         int i, ret;
3464
3465         gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
3466         gbe_dev->et_stats = gbenu_et_stats;
3467
3468         if (IS_SS_ID_MU(gbe_dev))
3469                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3470                         (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
3471         else
3472                 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
3473                                         GBENU_ET_STATS_PORT_SIZE;
3474
3475         gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
3476                                          gbe_dev->num_et_stats, sizeof(u64),
3477                                          GFP_KERNEL);
3478         if (!gbe_dev->hw_stats) {
3479                 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
3480                 return -ENOMEM;
3481         }
3482
3483         gbe_dev->hw_stats_prev =
3484                 devm_kcalloc(gbe_dev->dev,
3485                              gbe_dev->num_et_stats, sizeof(u32),
3486                              GFP_KERNEL);
3487         if (!gbe_dev->hw_stats_prev) {
3488                 dev_err(gbe_dev->dev,
3489                         "hw_stats_prev memory allocation failed\n");
3490                 return -ENOMEM;
3491         }
3492
3493         ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
3494         if (ret) {
3495                 dev_err(gbe_dev->dev,
3496                         "Can't translate of gbenu node(%pOFn) addr at index %d\n",
3497                         node, GBENU_SM_REG_INDEX);
3498                 return ret;
3499         }
3500
3501         regs = devm_ioremap_resource(gbe_dev->dev, &res);
3502         if (IS_ERR(regs)) {
3503                 dev_err(gbe_dev->dev,
3504                         "Failed to map gbenu switch module register base\n");
3505                 return PTR_ERR(regs);
3506         }
3507         gbe_dev->switch_regs = regs;
3508
3509         if (!IS_SS_ID_2U(gbe_dev))
3510                 gbe_dev->sgmii_port_regs =
3511                        gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
3512
3513         /* Although sgmii modules are mem mapped to one contiguous
3514          * region on GBENU devices, setting sgmii_port34_regs allows
3515          * consistent code when accessing sgmii api
3516          */
3517         gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
3518                                      (2 * GBENU_SGMII_MODULE_SIZE);
3519
3520         gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
3521
3522         for (i = 0; i < (gbe_dev->max_num_ports); i++)
3523                 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
3524                         GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
3525
3526         gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
3527         gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
3528         gbe_dev->ale_ports = gbe_dev->max_num_ports;
3529         gbe_dev->host_port = GBENU_HOST_PORT_NUM;
3530         gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
3531
3532         /* Subsystem registers */
3533         GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
3534         /* ok to set for MU, but used by 2U only */
3535         GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
3536
3537         /* Switch module registers */
3538         GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
3539         GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
3540         GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
3541         GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
3542
3543         /* Host port registers */
3544         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
3545         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
3546
3547         /* For NU only.  2U does not need tx_pri_map.
3548          * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
3549          * while 2U has only 1 such thread
3550          */
3551         GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
3552         return 0;
3553 }