Merge tag 'fsnotify_for_v6.5-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / marvell / octeontx2 / af / cgx.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell.
5  *
6  */
7
8 #include <linux/acpi.h>
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/phy.h>
16 #include <linux/of.h>
17 #include <linux/of_mdio.h>
18 #include <linux/of_net.h>
19
20 #include "cgx.h"
21 #include "rvu.h"
22 #include "lmac_common.h"
23
24 #define DRV_NAME        "Marvell-CGX/RPM"
25 #define DRV_STRING      "Marvell CGX/RPM Driver"
26
27 static LIST_HEAD(cgx_list);
28
29 /* Convert firmware speed encoding to user format(Mbps) */
30 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
31         [CGX_LINK_NONE] = 0,
32         [CGX_LINK_10M] = 10,
33         [CGX_LINK_100M] = 100,
34         [CGX_LINK_1G] = 1000,
35         [CGX_LINK_2HG] = 2500,
36         [CGX_LINK_5G] = 5000,
37         [CGX_LINK_10G] = 10000,
38         [CGX_LINK_20G] = 20000,
39         [CGX_LINK_25G] = 25000,
40         [CGX_LINK_40G] = 40000,
41         [CGX_LINK_50G] = 50000,
42         [CGX_LINK_80G] = 80000,
43         [CGX_LINK_100G] = 100000,
44 };
45
46 /* Convert firmware lmac type encoding to string */
47 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
48         [LMAC_MODE_SGMII] = "SGMII",
49         [LMAC_MODE_XAUI] = "XAUI",
50         [LMAC_MODE_RXAUI] = "RXAUI",
51         [LMAC_MODE_10G_R] = "10G_R",
52         [LMAC_MODE_40G_R] = "40G_R",
53         [LMAC_MODE_QSGMII] = "QSGMII",
54         [LMAC_MODE_25G_R] = "25G_R",
55         [LMAC_MODE_50G_R] = "50G_R",
56         [LMAC_MODE_100G_R] = "100G_R",
57         [LMAC_MODE_USXGMII] = "USXGMII",
58 };
59
60 /* CGX PHY management internal APIs */
61 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
62
63 /* Supported devices */
64 static const struct pci_device_id cgx_id_table[] = {
65         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
66         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
67         { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10KB_RPM) },
68         { 0, }  /* end of table */
69 };
70
71 MODULE_DEVICE_TABLE(pci, cgx_id_table);
72
73 static bool is_dev_rpm(void *cgxd)
74 {
75         struct cgx *cgx = cgxd;
76
77         return (cgx->pdev->device == PCI_DEVID_CN10K_RPM) ||
78                (cgx->pdev->device == PCI_DEVID_CN10KB_RPM);
79 }
80
81 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
82 {
83         if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
84                 return false;
85         return test_bit(lmac_id, &cgx->lmac_bmap);
86 }
87
88 /* Helper function to get sequential index
89  * given the enabled LMAC of a CGX
90  */
91 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
92 {
93         int tmp, id = 0;
94
95         for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
96                 if (tmp == lmac_id)
97                         break;
98                 id++;
99         }
100
101         return id;
102 }
103
104 struct mac_ops *get_mac_ops(void *cgxd)
105 {
106         if (!cgxd)
107                 return cgxd;
108
109         return ((struct cgx *)cgxd)->mac_ops;
110 }
111
112 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
113 {
114         writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
115                offset);
116 }
117
118 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
119 {
120         return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
121                      offset);
122 }
123
124 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
125 {
126         if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
127                 return NULL;
128
129         return cgx->lmac_idmap[lmac_id];
130 }
131
132 int cgx_get_cgxcnt_max(void)
133 {
134         struct cgx *cgx_dev;
135         int idmax = -ENODEV;
136
137         list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
138                 if (cgx_dev->cgx_id > idmax)
139                         idmax = cgx_dev->cgx_id;
140
141         if (idmax < 0)
142                 return 0;
143
144         return idmax + 1;
145 }
146
147 int cgx_get_lmac_cnt(void *cgxd)
148 {
149         struct cgx *cgx = cgxd;
150
151         if (!cgx)
152                 return -ENODEV;
153
154         return cgx->lmac_count;
155 }
156
157 void *cgx_get_pdata(int cgx_id)
158 {
159         struct cgx *cgx_dev;
160
161         list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
162                 if (cgx_dev->cgx_id == cgx_id)
163                         return cgx_dev;
164         }
165         return NULL;
166 }
167
168 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
169 {
170         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
171
172         /* Software must not access disabled LMAC registers */
173         if (!is_lmac_valid(cgx_dev, lmac_id))
174                 return;
175         cgx_write(cgx_dev, lmac_id, offset, val);
176 }
177
178 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
179 {
180         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
181
182         /* Software must not access disabled LMAC registers */
183         if (!is_lmac_valid(cgx_dev, lmac_id))
184                 return 0;
185
186         return cgx_read(cgx_dev, lmac_id, offset);
187 }
188
189 int cgx_get_cgxid(void *cgxd)
190 {
191         struct cgx *cgx = cgxd;
192
193         if (!cgx)
194                 return -EINVAL;
195
196         return cgx->cgx_id;
197 }
198
199 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
200 {
201         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
202         u64 cfg;
203
204         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
205
206         return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
207 }
208
209 /* Ensure the required lock for event queue(where asynchronous events are
210  * posted) is acquired before calling this API. Else an asynchronous event(with
211  * latest link status) can reach the destination before this function returns
212  * and could make the link status appear wrong.
213  */
214 int cgx_get_link_info(void *cgxd, int lmac_id,
215                       struct cgx_link_user_info *linfo)
216 {
217         struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
218
219         if (!lmac)
220                 return -ENODEV;
221
222         *linfo = lmac->link_info;
223         return 0;
224 }
225
226 static u64 mac2u64 (u8 *mac_addr)
227 {
228         u64 mac = 0;
229         int index;
230
231         for (index = ETH_ALEN - 1; index >= 0; index--)
232                 mac |= ((u64)*mac_addr++) << (8 * index);
233         return mac;
234 }
235
236 static void cfg2mac(u64 cfg, u8 *mac_addr)
237 {
238         int i, index = 0;
239
240         for (i = ETH_ALEN - 1; i >= 0; i--, index++)
241                 mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
242 }
243
244 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
245 {
246         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
247         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
248         struct mac_ops *mac_ops;
249         int index, id;
250         u64 cfg;
251
252         /* access mac_ops to know csr_offset */
253         mac_ops = cgx_dev->mac_ops;
254
255         /* copy 6bytes from macaddr */
256         /* memcpy(&cfg, mac_addr, 6); */
257
258         cfg = mac2u64 (mac_addr);
259
260         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
261
262         index = id * lmac->mac_to_index_bmap.max;
263
264         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
265                   cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
266
267         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
268         cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
269                 CGX_DMAC_MCAST_MODE);
270         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
271
272         return 0;
273 }
274
275 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
276 {
277         struct mac_ops *mac_ops;
278         struct cgx *cgx = cgxd;
279
280         if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
281                 return 0;
282
283         cgx = cgxd;
284         /* Get mac_ops to know csr offset */
285         mac_ops = cgx->mac_ops;
286
287         return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
288 }
289
290 u64 cgx_read_dmac_entry(void *cgxd, int index)
291 {
292         struct mac_ops *mac_ops;
293         struct cgx *cgx;
294
295         if (!cgxd)
296                 return 0;
297
298         cgx = cgxd;
299         mac_ops = cgx->mac_ops;
300         return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
301 }
302
303 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
304 {
305         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
306         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
307         struct mac_ops *mac_ops;
308         int index, idx;
309         u64 cfg = 0;
310         int id;
311
312         if (!lmac)
313                 return -ENODEV;
314
315         mac_ops = cgx_dev->mac_ops;
316         /* Get available index where entry is to be installed */
317         idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
318         if (idx < 0)
319                 return idx;
320
321         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
322
323         index = id * lmac->mac_to_index_bmap.max + idx;
324
325         cfg = mac2u64 (mac_addr);
326         cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
327         cfg |= ((u64)lmac_id << 49);
328         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
329
330         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
331         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
332
333         if (is_multicast_ether_addr(mac_addr)) {
334                 cfg &= ~GENMASK_ULL(2, 1);
335                 cfg |= CGX_DMAC_MCAST_MODE_CAM;
336                 lmac->mcast_filters_count++;
337         } else if (!lmac->mcast_filters_count) {
338                 cfg |= CGX_DMAC_MCAST_MODE;
339         }
340
341         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
342
343         return idx;
344 }
345
346 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
347 {
348         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
349         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
350         struct mac_ops *mac_ops;
351         u8 index = 0, id;
352         u64 cfg;
353
354         if (!lmac)
355                 return -ENODEV;
356
357         mac_ops = cgx_dev->mac_ops;
358         /* Restore index 0 to its default init value as done during
359          * cgx_lmac_init
360          */
361         set_bit(0, lmac->mac_to_index_bmap.bmap);
362
363         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
364
365         index = id * lmac->mac_to_index_bmap.max + index;
366         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
367
368         /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
369         cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
370         cfg &= ~CGX_DMAC_CAM_ACCEPT;
371         cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
372         cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
373
374         return 0;
375 }
376
377 /* Allows caller to change macaddress associated with index
378  * in dmac filter table including index 0 reserved for
379  * interface mac address
380  */
381 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
382 {
383         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
384         struct mac_ops *mac_ops;
385         struct lmac *lmac;
386         u64 cfg;
387         int id;
388
389         lmac = lmac_pdata(lmac_id, cgx_dev);
390         if (!lmac)
391                 return -ENODEV;
392
393         mac_ops = cgx_dev->mac_ops;
394         /* Validate the index */
395         if (index >= lmac->mac_to_index_bmap.max)
396                 return -EINVAL;
397
398         /* ensure index is already set */
399         if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
400                 return -EINVAL;
401
402         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
403
404         index = id * lmac->mac_to_index_bmap.max + index;
405
406         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
407         cfg &= ~CGX_RX_DMAC_ADR_MASK;
408         cfg |= mac2u64 (mac_addr);
409
410         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
411         return 0;
412 }
413
414 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
415 {
416         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
417         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
418         struct mac_ops *mac_ops;
419         u8 mac[ETH_ALEN];
420         u64 cfg;
421         int id;
422
423         if (!lmac)
424                 return -ENODEV;
425
426         mac_ops = cgx_dev->mac_ops;
427         /* Validate the index */
428         if (index >= lmac->mac_to_index_bmap.max)
429                 return -EINVAL;
430
431         /* Skip deletion for reserved index i.e. index 0 */
432         if (index == 0)
433                 return 0;
434
435         rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
436
437         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
438
439         index = id * lmac->mac_to_index_bmap.max + index;
440
441         /* Read MAC address to check whether it is ucast or mcast */
442         cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
443
444         cfg2mac(cfg, mac);
445         if (is_multicast_ether_addr(mac))
446                 lmac->mcast_filters_count--;
447
448         if (!lmac->mcast_filters_count) {
449                 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
450                 cfg &= ~GENMASK_ULL(2, 1);
451                 cfg |= CGX_DMAC_MCAST_MODE;
452                 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
453         }
454
455         cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
456
457         return 0;
458 }
459
460 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
461 {
462         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
463         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
464
465         if (lmac)
466                 return lmac->mac_to_index_bmap.max;
467
468         return 0;
469 }
470
471 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
472 {
473         struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
474         struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
475         struct mac_ops *mac_ops;
476         int index;
477         u64 cfg;
478         int id;
479
480         mac_ops = cgx_dev->mac_ops;
481
482         id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
483
484         index = id * lmac->mac_to_index_bmap.max;
485
486         cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
487         return cfg & CGX_RX_DMAC_ADR_MASK;
488 }
489
490 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
491 {
492         struct cgx *cgx = cgxd;
493
494         if (!is_lmac_valid(cgx, lmac_id))
495                 return -ENODEV;
496
497         cgx_write(cgx, lmac_id, cgx->mac_ops->rxid_map_offset, (pkind & 0x3F));
498         return 0;
499 }
500
501 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
502 {
503         struct cgx *cgx = cgxd;
504         u64 cfg;
505
506         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
507         return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
508 }
509
510 static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
511 {
512         struct cgx *cgx = cgxd;
513         u8 num_lmacs;
514         u32 fifo_len;
515
516         fifo_len = cgx->mac_ops->fifo_len;
517         num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
518
519         switch (num_lmacs) {
520         case 1:
521                 return fifo_len;
522         case 2:
523                 return fifo_len / 2;
524         case 3:
525                 /* LMAC0 gets half of the FIFO, reset 1/4th */
526                 if (lmac_id == 0)
527                         return fifo_len / 2;
528                 return fifo_len / 4;
529         case 4:
530         default:
531                 return fifo_len / 4;
532         }
533         return 0;
534 }
535
536 /* Configure CGX LMAC in internal loopback mode */
537 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
538 {
539         struct cgx *cgx = cgxd;
540         struct lmac *lmac;
541         u64 cfg;
542
543         if (!is_lmac_valid(cgx, lmac_id))
544                 return -ENODEV;
545
546         lmac = lmac_pdata(lmac_id, cgx);
547         if (lmac->lmac_type == LMAC_MODE_SGMII ||
548             lmac->lmac_type == LMAC_MODE_QSGMII) {
549                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
550                 if (enable)
551                         cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
552                 else
553                         cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
554                 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
555         } else {
556                 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
557                 if (enable)
558                         cfg |= CGXX_SPUX_CONTROL1_LBK;
559                 else
560                         cfg &= ~CGXX_SPUX_CONTROL1_LBK;
561                 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
562         }
563         return 0;
564 }
565
566 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
567 {
568         struct cgx *cgx = cgx_get_pdata(cgx_id);
569         struct lmac *lmac = lmac_pdata(lmac_id, cgx);
570         u16 max_dmac = lmac->mac_to_index_bmap.max;
571         struct mac_ops *mac_ops;
572         int index, i;
573         u64 cfg = 0;
574         int id;
575
576         if (!cgx)
577                 return;
578
579         id = get_sequence_id_of_lmac(cgx, lmac_id);
580
581         mac_ops = cgx->mac_ops;
582         if (enable) {
583                 /* Enable promiscuous mode on LMAC */
584                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
585                 cfg &= ~CGX_DMAC_CAM_ACCEPT;
586                 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
587                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
588
589                 for (i = 0; i < max_dmac; i++) {
590                         index = id * max_dmac + i;
591                         cfg = cgx_read(cgx, 0,
592                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
593                         cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
594                         cgx_write(cgx, 0,
595                                   (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
596                 }
597         } else {
598                 /* Disable promiscuous mode */
599                 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
600                 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
601                 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
602                 for (i = 0; i < max_dmac; i++) {
603                         index = id * max_dmac + i;
604                         cfg = cgx_read(cgx, 0,
605                                        (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
606                         if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
607                                 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
608                                 cgx_write(cgx, 0,
609                                           (CGXX_CMRX_RX_DMAC_CAM0 +
610                                            index * 0x8),
611                                           cfg);
612                         }
613                 }
614         }
615 }
616
617 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
618                                          u8 *tx_pause, u8 *rx_pause)
619 {
620         struct cgx *cgx = cgxd;
621         u64 cfg;
622
623         if (is_dev_rpm(cgx))
624                 return 0;
625
626         if (!is_lmac_valid(cgx, lmac_id))
627                 return -ENODEV;
628
629         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
630         *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
631
632         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
633         *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
634         return 0;
635 }
636
637 /* Enable or disable forwarding received pause frames to Tx block */
638 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
639 {
640         struct cgx *cgx = cgxd;
641         u8 rx_pause, tx_pause;
642         bool is_pfc_enabled;
643         struct lmac *lmac;
644         u64 cfg;
645
646         if (!cgx)
647                 return;
648
649         lmac = lmac_pdata(lmac_id, cgx);
650         if (!lmac)
651                 return;
652
653         /* Pause frames are not enabled just return */
654         if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
655                 return;
656
657         cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
658         is_pfc_enabled = rx_pause ? false : true;
659
660         if (enable) {
661                 if (!is_pfc_enabled) {
662                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
663                         cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
664                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
665
666                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
667                         cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
668                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
669                 } else {
670                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
671                         cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
672                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
673                 }
674         } else {
675
676                 if (!is_pfc_enabled) {
677                         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
678                         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
679                         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
680
681                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
682                         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
683                         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
684                 } else {
685                         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
686                         cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
687                         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
688                 }
689         }
690 }
691
692 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
693 {
694         struct cgx *cgx = cgxd;
695
696         if (!is_lmac_valid(cgx, lmac_id))
697                 return -ENODEV;
698         *rx_stat =  cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
699         return 0;
700 }
701
702 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
703 {
704         struct cgx *cgx = cgxd;
705
706         if (!is_lmac_valid(cgx, lmac_id))
707                 return -ENODEV;
708         *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
709         return 0;
710 }
711
712 u64 cgx_features_get(void *cgxd)
713 {
714         return ((struct cgx *)cgxd)->hw_features;
715 }
716
717 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
718 {
719         if (!linfo->fec)
720                 return 0;
721
722         switch (linfo->lmac_type_id) {
723         case LMAC_MODE_SGMII:
724         case LMAC_MODE_XAUI:
725         case LMAC_MODE_RXAUI:
726         case LMAC_MODE_QSGMII:
727                 return 0;
728         case LMAC_MODE_10G_R:
729         case LMAC_MODE_25G_R:
730         case LMAC_MODE_100G_R:
731         case LMAC_MODE_USXGMII:
732                 return 1;
733         case LMAC_MODE_40G_R:
734                 return 4;
735         case LMAC_MODE_50G_R:
736                 if (linfo->fec == OTX2_FEC_BASER)
737                         return 2;
738                 else
739                         return 1;
740         default:
741                 return 0;
742         }
743 }
744
745 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
746 {
747         int stats, fec_stats_count = 0;
748         int corr_reg, uncorr_reg;
749         struct cgx *cgx = cgxd;
750
751         if (!cgx || lmac_id >= cgx->lmac_count)
752                 return -ENODEV;
753
754         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_NONE)
755                 return 0;
756
757         fec_stats_count =
758                 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
759         if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
760                 corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
761                 uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
762         } else {
763                 corr_reg = CGXX_SPUX_RSFEC_CORR;
764                 uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
765         }
766         for (stats = 0; stats < fec_stats_count; stats++) {
767                 rsp->fec_corr_blks +=
768                         cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
769                 rsp->fec_uncorr_blks +=
770                         cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
771         }
772         return 0;
773 }
774
775 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
776 {
777         struct cgx *cgx = cgxd;
778         u64 cfg;
779
780         if (!is_lmac_valid(cgx, lmac_id))
781                 return -ENODEV;
782
783         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
784         if (enable)
785                 cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
786         else
787                 cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
788         cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
789         return 0;
790 }
791
792 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
793 {
794         struct cgx *cgx = cgxd;
795         u64 cfg, last;
796
797         if (!is_lmac_valid(cgx, lmac_id))
798                 return -ENODEV;
799
800         cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
801         last = cfg;
802         if (enable)
803                 cfg |= DATA_PKT_TX_EN;
804         else
805                 cfg &= ~DATA_PKT_TX_EN;
806
807         if (cfg != last)
808                 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
809         return !!(last & DATA_PKT_TX_EN);
810 }
811
812 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
813                                      u8 tx_pause, u8 rx_pause)
814 {
815         struct cgx *cgx = cgxd;
816         u64 cfg;
817
818         if (is_dev_rpm(cgx))
819                 return 0;
820
821         if (!is_lmac_valid(cgx, lmac_id))
822                 return -ENODEV;
823
824         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
825         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
826         cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
827         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
828
829         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
830         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
831         cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
832         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
833
834         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
835         if (tx_pause) {
836                 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
837         } else {
838                 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
839                 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
840         }
841         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
842         return 0;
843 }
844
845 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
846 {
847         struct cgx *cgx = cgxd;
848         u64 cfg;
849
850         if (!is_lmac_valid(cgx, lmac_id))
851                 return;
852
853         if (enable) {
854                 /* Set pause time and interval */
855                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
856                           DEFAULT_PAUSE_TIME);
857                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
858                 cfg &= ~0xFFFFULL;
859                 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
860                           cfg | (DEFAULT_PAUSE_TIME / 2));
861
862                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
863                           DEFAULT_PAUSE_TIME);
864
865                 cfg = cgx_read(cgx, lmac_id,
866                                CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
867                 cfg &= ~0xFFFFULL;
868                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
869                           cfg | (DEFAULT_PAUSE_TIME / 2));
870         }
871
872         /* ALL pause frames received are completely ignored */
873         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
874         cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
875         cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
876
877         cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
878         cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
879         cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
880
881         /* Disable pause frames transmission */
882         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
883         cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
884         cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
885
886         cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
887         cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
888         cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
889         cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
890
891         /* Disable all PFC classes by default */
892         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
893         cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
894         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
895 }
896
897 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
898                        int pfvf_idx)
899 {
900         struct cgx *cgx = cgxd;
901         struct lmac *lmac;
902
903         lmac = lmac_pdata(lmac_id, cgx);
904         if (!lmac)
905                 return -ENODEV;
906
907         if (!rx_pause)
908                 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
909         else
910                 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
911
912         if (!tx_pause)
913                 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
914         else
915                 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
916
917         /* check if other pfvfs are using flow control */
918         if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
919                 dev_warn(&cgx->pdev->dev,
920                          "Receive Flow control disable not permitted as its used by other PFVFs\n");
921                 return -EPERM;
922         }
923
924         if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
925                 dev_warn(&cgx->pdev->dev,
926                          "Transmit Flow control disable not permitted as its used by other PFVFs\n");
927                 return -EPERM;
928         }
929
930         return 0;
931 }
932
933 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
934                         u8 rx_pause, u16 pfc_en)
935 {
936         struct cgx *cgx = cgxd;
937         u64 cfg;
938
939         if (!is_lmac_valid(cgx, lmac_id))
940                 return -ENODEV;
941
942         /* Return as no traffic classes are requested */
943         if (tx_pause && !pfc_en)
944                 return 0;
945
946         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
947         pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
948
949         if (rx_pause) {
950                 cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
951                         CGXX_SMUX_CBFC_CTL_BCK_EN |
952                         CGXX_SMUX_CBFC_CTL_DRP_EN);
953         } else {
954                 cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
955                         CGXX_SMUX_CBFC_CTL_BCK_EN |
956                         CGXX_SMUX_CBFC_CTL_DRP_EN);
957         }
958
959         if (tx_pause) {
960                 cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
961                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
962         } else {
963                 cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
964                 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
965         }
966
967         cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
968
969         /* Write source MAC address which will be filled into PFC packet */
970         cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
971         cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
972
973         return 0;
974 }
975
976 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
977                              u8 *rx_pause)
978 {
979         struct cgx *cgx = cgxd;
980         u64 cfg;
981
982         if (!is_lmac_valid(cgx, lmac_id))
983                 return -ENODEV;
984
985         cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
986
987         *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
988         *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
989
990         return 0;
991 }
992
993 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
994 {
995         struct cgx *cgx = cgxd;
996         u64 cfg;
997
998         if (!cgx)
999                 return;
1000
1001         if (enable) {
1002                 /* Enable inbound PTP timestamping */
1003                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1004                 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1005                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1006
1007                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1008                 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1009                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1010         } else {
1011                 /* Disable inbound PTP stamping */
1012                 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
1013                 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1014                 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1015
1016                 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1017                 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1018                 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1019         }
1020 }
1021
1022 /* CGX Firmware interface low level support */
1023 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1024 {
1025         struct cgx *cgx = lmac->cgx;
1026         struct device *dev;
1027         int err = 0;
1028         u64 cmd;
1029
1030         /* Ensure no other command is in progress */
1031         err = mutex_lock_interruptible(&lmac->cmd_lock);
1032         if (err)
1033                 return err;
1034
1035         /* Ensure command register is free */
1036         cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
1037         if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1038                 err = -EBUSY;
1039                 goto unlock;
1040         }
1041
1042         /* Update ownership in command request */
1043         req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1044
1045         /* Mark this lmac as pending, before we start */
1046         lmac->cmd_pend = true;
1047
1048         /* Start command in hardware */
1049         cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1050
1051         /* Ensure command is completed without errors */
1052         if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1053                                 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1054                 dev = &cgx->pdev->dev;
1055                 dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1056                         cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1057                 err = LMAC_AF_ERR_CMD_TIMEOUT;
1058                 goto unlock;
1059         }
1060
1061         /* we have a valid command response */
1062         smp_rmb(); /* Ensure the latest updates are visible */
1063         *resp = lmac->resp;
1064
1065 unlock:
1066         mutex_unlock(&lmac->cmd_lock);
1067
1068         return err;
1069 }
1070
1071 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1072 {
1073         struct lmac *lmac;
1074         int err;
1075
1076         lmac = lmac_pdata(lmac_id, cgx);
1077         if (!lmac)
1078                 return -ENODEV;
1079
1080         err = cgx_fwi_cmd_send(req, resp, lmac);
1081
1082         /* Check for valid response */
1083         if (!err) {
1084                 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1085                         return -EIO;
1086                 else
1087                         return 0;
1088         }
1089
1090         return err;
1091 }
1092
1093 static int cgx_link_usertable_index_map(int speed)
1094 {
1095         switch (speed) {
1096         case SPEED_10:
1097                 return CGX_LINK_10M;
1098         case SPEED_100:
1099                 return CGX_LINK_100M;
1100         case SPEED_1000:
1101                 return CGX_LINK_1G;
1102         case SPEED_2500:
1103                 return CGX_LINK_2HG;
1104         case SPEED_5000:
1105                 return CGX_LINK_5G;
1106         case SPEED_10000:
1107                 return CGX_LINK_10G;
1108         case SPEED_20000:
1109                 return CGX_LINK_20G;
1110         case SPEED_25000:
1111                 return CGX_LINK_25G;
1112         case SPEED_40000:
1113                 return CGX_LINK_40G;
1114         case SPEED_50000:
1115                 return CGX_LINK_50G;
1116         case 80000:
1117                 return CGX_LINK_80G;
1118         case SPEED_100000:
1119                 return CGX_LINK_100G;
1120         case SPEED_UNKNOWN:
1121                 return CGX_LINK_NONE;
1122         }
1123         return CGX_LINK_NONE;
1124 }
1125
1126 static void set_mod_args(struct cgx_set_link_mode_args *args,
1127                          u32 speed, u8 duplex, u8 autoneg, u64 mode)
1128 {
1129         /* Fill default values incase of user did not pass
1130          * valid parameters
1131          */
1132         if (args->duplex == DUPLEX_UNKNOWN)
1133                 args->duplex = duplex;
1134         if (args->speed == SPEED_UNKNOWN)
1135                 args->speed = speed;
1136         if (args->an == AUTONEG_UNKNOWN)
1137                 args->an = autoneg;
1138         args->mode = mode;
1139         args->ports = 0;
1140 }
1141
1142 static void otx2_map_ethtool_link_modes(u64 bitmask,
1143                                         struct cgx_set_link_mode_args *args)
1144 {
1145         switch (bitmask) {
1146         case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1147                 set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1148                 break;
1149         case  ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1150                 set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1151                 break;
1152         case  ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1153                 set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1154                 break;
1155         case  ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1156                 set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1157                 break;
1158         case  ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1159                 set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1160                 break;
1161         case  ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1162                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1163                 break;
1164         case  ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1165                 set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1166                 break;
1167         case  ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1168                 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1169                 break;
1170         case  ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1171                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1172                 break;
1173         case  ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1174                 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1175                 break;
1176         case  ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1177                 set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1178                 break;
1179         case  ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1180                 set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1181                 break;
1182         case  ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1183                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1184                 break;
1185         case  ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1186                 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1187                 break;
1188         case  ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1189                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1190                 break;
1191         case  ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1192                 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1193                 break;
1194         case  ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1195                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1196                 break;
1197         case  ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1198                 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1199                 break;
1200         case  ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1201                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1202                 break;
1203         case  ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1204                 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1205                 break;
1206         case  ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1207                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1208                 break;
1209         case  ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1210                 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1211                 break;
1212         case  ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1213                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1214                 break;
1215         case  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1216                 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1217                 break;
1218         case  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1219                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1220                 break;
1221         case  ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1222                 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1223                 break;
1224         default:
1225                 set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1226                 break;
1227         }
1228 }
1229
1230 static inline void link_status_user_format(u64 lstat,
1231                                            struct cgx_link_user_info *linfo,
1232                                            struct cgx *cgx, u8 lmac_id)
1233 {
1234         const char *lmac_string;
1235
1236         linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1237         linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1238         linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1239         linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1240         linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1241         linfo->lmac_type_id = FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, lstat);
1242
1243         if (linfo->lmac_type_id >= LMAC_MODE_MAX) {
1244                 dev_err(&cgx->pdev->dev, "Unknown lmac_type_id %d reported by firmware on cgx port%d:%d",
1245                         linfo->lmac_type_id, cgx->cgx_id, lmac_id);
1246                 strncpy(linfo->lmac_type, "Unknown", LMACTYPE_STR_LEN - 1);
1247                 return;
1248         }
1249
1250         lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1251         strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1252 }
1253
1254 /* Hardware event handlers */
1255 static inline void cgx_link_change_handler(u64 lstat,
1256                                            struct lmac *lmac)
1257 {
1258         struct cgx_link_user_info *linfo;
1259         struct cgx *cgx = lmac->cgx;
1260         struct cgx_link_event event;
1261         struct device *dev;
1262         int err_type;
1263
1264         dev = &cgx->pdev->dev;
1265
1266         link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1267         err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1268
1269         event.cgx_id = cgx->cgx_id;
1270         event.lmac_id = lmac->lmac_id;
1271
1272         /* update the local copy of link status */
1273         lmac->link_info = event.link_uinfo;
1274         linfo = &lmac->link_info;
1275
1276         if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1277                 return;
1278
1279         /* Ensure callback doesn't get unregistered until we finish it */
1280         spin_lock(&lmac->event_cb_lock);
1281
1282         if (!lmac->event_cb.notify_link_chg) {
1283                 dev_dbg(dev, "cgx port %d:%d Link change handler null",
1284                         cgx->cgx_id, lmac->lmac_id);
1285                 if (err_type != CGX_ERR_NONE) {
1286                         dev_err(dev, "cgx port %d:%d Link error %d\n",
1287                                 cgx->cgx_id, lmac->lmac_id, err_type);
1288                 }
1289                 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1290                          cgx->cgx_id, lmac->lmac_id,
1291                          linfo->link_up ? "UP" : "DOWN", linfo->speed);
1292                 goto err;
1293         }
1294
1295         if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1296                 dev_err(dev, "event notification failure\n");
1297 err:
1298         spin_unlock(&lmac->event_cb_lock);
1299 }
1300
1301 static inline bool cgx_cmdresp_is_linkevent(u64 event)
1302 {
1303         u8 id;
1304
1305         id = FIELD_GET(EVTREG_ID, event);
1306         if (id == CGX_CMD_LINK_BRING_UP ||
1307             id == CGX_CMD_LINK_BRING_DOWN ||
1308             id == CGX_CMD_MODE_CHANGE)
1309                 return true;
1310         else
1311                 return false;
1312 }
1313
1314 static inline bool cgx_event_is_linkevent(u64 event)
1315 {
1316         if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1317                 return true;
1318         else
1319                 return false;
1320 }
1321
1322 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1323 {
1324         u64 event, offset, clear_bit;
1325         struct lmac *lmac = data;
1326         struct cgx *cgx;
1327
1328         cgx = lmac->cgx;
1329
1330         /* Clear SW_INT for RPM and CMR_INT for CGX */
1331         offset     = cgx->mac_ops->int_register;
1332         clear_bit  = cgx->mac_ops->int_ena_bit;
1333
1334         event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1335
1336         if (!FIELD_GET(EVTREG_ACK, event))
1337                 return IRQ_NONE;
1338
1339         switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1340         case CGX_EVT_CMD_RESP:
1341                 /* Copy the response. Since only one command is active at a
1342                  * time, there is no way a response can get overwritten
1343                  */
1344                 lmac->resp = event;
1345                 /* Ensure response is updated before thread context starts */
1346                 smp_wmb();
1347
1348                 /* There wont be separate events for link change initiated from
1349                  * software; Hence report the command responses as events
1350                  */
1351                 if (cgx_cmdresp_is_linkevent(event))
1352                         cgx_link_change_handler(event, lmac);
1353
1354                 /* Release thread waiting for completion  */
1355                 lmac->cmd_pend = false;
1356                 wake_up_interruptible(&lmac->wq_cmd_cmplt);
1357                 break;
1358         case CGX_EVT_ASYNC:
1359                 if (cgx_event_is_linkevent(event))
1360                         cgx_link_change_handler(event, lmac);
1361                 break;
1362         }
1363
1364         /* Any new event or command response will be posted by firmware
1365          * only after the current status is acked.
1366          * Ack the interrupt register as well.
1367          */
1368         cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1369         cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1370
1371         return IRQ_HANDLED;
1372 }
1373
1374 /* APIs for PHY management using CGX firmware interface */
1375
1376 /* callback registration for hardware events like link change */
1377 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1378 {
1379         struct cgx *cgx = cgxd;
1380         struct lmac *lmac;
1381
1382         lmac = lmac_pdata(lmac_id, cgx);
1383         if (!lmac)
1384                 return -ENODEV;
1385
1386         lmac->event_cb = *cb;
1387
1388         return 0;
1389 }
1390
1391 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1392 {
1393         struct lmac *lmac;
1394         unsigned long flags;
1395         struct cgx *cgx = cgxd;
1396
1397         lmac = lmac_pdata(lmac_id, cgx);
1398         if (!lmac)
1399                 return -ENODEV;
1400
1401         spin_lock_irqsave(&lmac->event_cb_lock, flags);
1402         lmac->event_cb.notify_link_chg = NULL;
1403         lmac->event_cb.data = NULL;
1404         spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1405
1406         return 0;
1407 }
1408
1409 int cgx_get_fwdata_base(u64 *base)
1410 {
1411         u64 req = 0, resp;
1412         struct cgx *cgx;
1413         int first_lmac;
1414         int err;
1415
1416         cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1417         if (!cgx)
1418                 return -ENXIO;
1419
1420         first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1421         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1422         err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1423         if (!err)
1424                 *base = FIELD_GET(RESP_FWD_BASE, resp);
1425
1426         return err;
1427 }
1428
1429 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1430                       int cgx_id, int lmac_id)
1431 {
1432         struct cgx *cgx = cgxd;
1433         u64 req = 0, resp;
1434
1435         if (!cgx)
1436                 return -ENODEV;
1437
1438         if (args.mode)
1439                 otx2_map_ethtool_link_modes(args.mode, &args);
1440         if (!args.speed && args.duplex && !args.an)
1441                 return -EINVAL;
1442
1443         req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1444         req = FIELD_SET(CMDMODECHANGE_SPEED,
1445                         cgx_link_usertable_index_map(args.speed), req);
1446         req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1447         req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1448         req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1449         req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1450
1451         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1452 }
1453 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1454 {
1455         u64 req = 0, resp;
1456         struct cgx *cgx;
1457         int err = 0;
1458
1459         cgx = cgx_get_pdata(cgx_id);
1460         if (!cgx)
1461                 return -ENXIO;
1462
1463         req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1464         req = FIELD_SET(CMDSETFEC, fec, req);
1465         err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1466         if (err)
1467                 return err;
1468
1469         cgx->lmac_idmap[lmac_id]->link_info.fec =
1470                         FIELD_GET(RESP_LINKSTAT_FEC, resp);
1471         return cgx->lmac_idmap[lmac_id]->link_info.fec;
1472 }
1473
1474 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1475 {
1476         struct cgx *cgx = cgxd;
1477         u64 req = 0, resp;
1478
1479         if (!cgx)
1480                 return -ENODEV;
1481
1482         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1483         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1484 }
1485
1486 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1487 {
1488         u64 req = 0;
1489         u64 resp;
1490
1491         if (enable) {
1492                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1493                 /* On CN10K firmware offloads link bring up/down operations to ECP
1494                  * On Octeontx2 link operations are handled by firmware itself
1495                  * which can cause mbox errors so configure maximum time firmware
1496                  * poll for Link as 1000 ms
1497                  */
1498                 if (!is_dev_rpm(cgx))
1499                         req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1500
1501         } else {
1502                 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1503         }
1504         return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1505 }
1506
1507 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1508 {
1509         int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
1510         u64 req = 0;
1511
1512         req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1513         return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1514 }
1515
1516 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1517 {
1518         struct device *dev = &cgx->pdev->dev;
1519         int major_ver, minor_ver;
1520         u64 resp;
1521         int err;
1522
1523         if (!cgx->lmac_count)
1524                 return 0;
1525
1526         err = cgx_fwi_read_version(&resp, cgx);
1527         if (err)
1528                 return err;
1529
1530         major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1531         minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1532         dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1533                 major_ver, minor_ver);
1534         if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1535                 return -EIO;
1536         else
1537                 return 0;
1538 }
1539
1540 static void cgx_lmac_linkup_work(struct work_struct *work)
1541 {
1542         struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1543         struct device *dev = &cgx->pdev->dev;
1544         int i, err;
1545
1546         /* Do Link up for all the enabled lmacs */
1547         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1548                 err = cgx_fwi_link_change(cgx, i, true);
1549                 if (err)
1550                         dev_info(dev, "cgx port %d:%d Link up command failed\n",
1551                                  cgx->cgx_id, i);
1552         }
1553 }
1554
1555 int cgx_lmac_linkup_start(void *cgxd)
1556 {
1557         struct cgx *cgx = cgxd;
1558
1559         if (!cgx)
1560                 return -ENODEV;
1561
1562         queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1563
1564         return 0;
1565 }
1566
1567 int cgx_lmac_reset(void *cgxd, int lmac_id, u8 pf_req_flr)
1568 {
1569         struct cgx *cgx = cgxd;
1570         u64 cfg;
1571
1572         if (!is_lmac_valid(cgx, lmac_id))
1573                 return -ENODEV;
1574
1575         /* Resetting PFC related CSRs */
1576         cfg = 0xff;
1577         cgx_write(cgxd, lmac_id, CGXX_CMRX_RX_LOGL_XON, cfg);
1578
1579         if (pf_req_flr)
1580                 cgx_lmac_internal_loopback(cgxd, lmac_id, false);
1581         return 0;
1582 }
1583
1584 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1585                                    int cnt, bool req_free)
1586 {
1587         struct mac_ops *mac_ops = cgx->mac_ops;
1588         u64 offset, ena_bit;
1589         unsigned int irq;
1590         int err;
1591
1592         irq      = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1593                                   cnt * mac_ops->irq_offset);
1594         offset   = mac_ops->int_set_reg;
1595         ena_bit  = mac_ops->int_ena_bit;
1596
1597         if (req_free) {
1598                 free_irq(irq, lmac);
1599                 return 0;
1600         }
1601
1602         err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1603         if (err)
1604                 return err;
1605
1606         /* Enable interrupt */
1607         cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1608         return 0;
1609 }
1610
1611 int cgx_get_nr_lmacs(void *cgxd)
1612 {
1613         struct cgx *cgx = cgxd;
1614
1615         return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1616 }
1617
1618 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1619 {
1620         struct cgx *cgx = cgxd;
1621
1622         return cgx->lmac_idmap[lmac_index]->lmac_id;
1623 }
1624
1625 unsigned long cgx_get_lmac_bmap(void *cgxd)
1626 {
1627         struct cgx *cgx = cgxd;
1628
1629         return cgx->lmac_bmap;
1630 }
1631
1632 static int cgx_lmac_init(struct cgx *cgx)
1633 {
1634         struct lmac *lmac;
1635         u64 lmac_list;
1636         int i, err;
1637
1638         /* lmac_list specifies which lmacs are enabled
1639          * when bit n is set to 1, LMAC[n] is enabled
1640          */
1641         if (cgx->mac_ops->non_contiguous_serdes_lane) {
1642                 if (is_dev_rpm2(cgx))
1643                         lmac_list =
1644                                 cgx_read(cgx, 0, RPM2_CMRX_RX_LMACS) & 0xFFULL;
1645                 else
1646                         lmac_list =
1647                                 cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1648         }
1649
1650         if (cgx->lmac_count > cgx->max_lmac_per_mac)
1651                 cgx->lmac_count = cgx->max_lmac_per_mac;
1652
1653         for (i = 0; i < cgx->lmac_count; i++) {
1654                 lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1655                 if (!lmac)
1656                         return -ENOMEM;
1657                 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1658                 if (!lmac->name) {
1659                         err = -ENOMEM;
1660                         goto err_lmac_free;
1661                 }
1662                 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1663                 if (cgx->mac_ops->non_contiguous_serdes_lane) {
1664                         lmac->lmac_id = __ffs64(lmac_list);
1665                         lmac_list   &= ~BIT_ULL(lmac->lmac_id);
1666                 } else {
1667                         lmac->lmac_id = i;
1668                 }
1669
1670                 lmac->cgx = cgx;
1671                 lmac->mac_to_index_bmap.max =
1672                                 cgx->mac_ops->dmac_filter_count /
1673                                 cgx->lmac_count;
1674
1675                 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1676                 if (err)
1677                         goto err_name_free;
1678
1679                 /* Reserve first entry for default MAC address */
1680                 set_bit(0, lmac->mac_to_index_bmap.bmap);
1681
1682                 lmac->rx_fc_pfvf_bmap.max = 128;
1683                 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1684                 if (err)
1685                         goto err_dmac_bmap_free;
1686
1687                 lmac->tx_fc_pfvf_bmap.max = 128;
1688                 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1689                 if (err)
1690                         goto err_rx_fc_bmap_free;
1691
1692                 init_waitqueue_head(&lmac->wq_cmd_cmplt);
1693                 mutex_init(&lmac->cmd_lock);
1694                 spin_lock_init(&lmac->event_cb_lock);
1695                 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1696                 if (err)
1697                         goto err_bitmap_free;
1698
1699                 /* Add reference */
1700                 cgx->lmac_idmap[lmac->lmac_id] = lmac;
1701                 set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1702                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1703                 lmac->lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac->lmac_id);
1704         }
1705
1706         return cgx_lmac_verify_fwi_version(cgx);
1707
1708 err_bitmap_free:
1709         rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1710 err_rx_fc_bmap_free:
1711         rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1712 err_dmac_bmap_free:
1713         rvu_free_bitmap(&lmac->mac_to_index_bmap);
1714 err_name_free:
1715         kfree(lmac->name);
1716 err_lmac_free:
1717         kfree(lmac);
1718         return err;
1719 }
1720
1721 static int cgx_lmac_exit(struct cgx *cgx)
1722 {
1723         struct lmac *lmac;
1724         int i;
1725
1726         if (cgx->cgx_cmd_workq) {
1727                 destroy_workqueue(cgx->cgx_cmd_workq);
1728                 cgx->cgx_cmd_workq = NULL;
1729         }
1730
1731         /* Free all lmac related resources */
1732         for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
1733                 lmac = cgx->lmac_idmap[i];
1734                 if (!lmac)
1735                         continue;
1736                 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1737                 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1738                 kfree(lmac->mac_to_index_bmap.bmap);
1739                 kfree(lmac->name);
1740                 kfree(lmac);
1741         }
1742
1743         return 0;
1744 }
1745
1746 static void cgx_populate_features(struct cgx *cgx)
1747 {
1748         u64 cfg;
1749
1750         cfg = cgx_read(cgx, 0, CGX_CONST);
1751         cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1752         cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
1753
1754         if (is_dev_rpm(cgx))
1755                 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1756                                     RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1757         else
1758                 cgx->hw_features = (RVU_LMAC_FEAT_FC  | RVU_LMAC_FEAT_HIGIG2 |
1759                                     RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1760 }
1761
1762 static u8 cgx_get_rxid_mapoffset(struct cgx *cgx)
1763 {
1764         if (cgx->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10KB_RPM ||
1765             is_dev_rpm2(cgx))
1766                 return 0x80;
1767         else
1768                 return 0x60;
1769 }
1770
1771 static struct mac_ops   cgx_mac_ops    = {
1772         .name           =       "cgx",
1773         .csr_offset     =       0,
1774         .lmac_offset    =       18,
1775         .int_register   =       CGXX_CMRX_INT,
1776         .int_set_reg    =       CGXX_CMRX_INT_ENA_W1S,
1777         .irq_offset     =       9,
1778         .int_ena_bit    =       FW_CGX_INT,
1779         .lmac_fwi       =       CGX_LMAC_FWI,
1780         .non_contiguous_serdes_lane = false,
1781         .rx_stats_cnt   =       9,
1782         .tx_stats_cnt   =       18,
1783         .dmac_filter_count =    32,
1784         .get_nr_lmacs   =       cgx_get_nr_lmacs,
1785         .get_lmac_type  =       cgx_get_lmac_type,
1786         .lmac_fifo_len  =       cgx_get_lmac_fifo_len,
1787         .mac_lmac_intl_lbk =    cgx_lmac_internal_loopback,
1788         .mac_get_rx_stats  =    cgx_get_rx_stats,
1789         .mac_get_tx_stats  =    cgx_get_tx_stats,
1790         .get_fec_stats     =    cgx_get_fec_stats,
1791         .mac_enadis_rx_pause_fwding =   cgx_lmac_enadis_rx_pause_fwding,
1792         .mac_get_pause_frm_status =     cgx_lmac_get_pause_frm_status,
1793         .mac_enadis_pause_frm =         cgx_lmac_enadis_pause_frm,
1794         .mac_pause_frm_config =         cgx_lmac_pause_frm_config,
1795         .mac_enadis_ptp_config =        cgx_lmac_ptp_config,
1796         .mac_rx_tx_enable =             cgx_lmac_rx_tx_enable,
1797         .mac_tx_enable =                cgx_lmac_tx_enable,
1798         .pfc_config =                   cgx_lmac_pfc_config,
1799         .mac_get_pfc_frm_cfg   =        cgx_lmac_get_pfc_frm_cfg,
1800         .mac_reset   =                  cgx_lmac_reset,
1801 };
1802
1803 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1804 {
1805         struct device *dev = &pdev->dev;
1806         struct cgx *cgx;
1807         int err, nvec;
1808
1809         cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1810         if (!cgx)
1811                 return -ENOMEM;
1812         cgx->pdev = pdev;
1813
1814         pci_set_drvdata(pdev, cgx);
1815
1816         /* Use mac_ops to get MAC specific features */
1817         if (is_dev_rpm(cgx))
1818                 cgx->mac_ops = rpm_get_mac_ops(cgx);
1819         else
1820                 cgx->mac_ops = &cgx_mac_ops;
1821
1822         cgx->mac_ops->rxid_map_offset = cgx_get_rxid_mapoffset(cgx);
1823
1824         err = pci_enable_device(pdev);
1825         if (err) {
1826                 dev_err(dev, "Failed to enable PCI device\n");
1827                 pci_set_drvdata(pdev, NULL);
1828                 return err;
1829         }
1830
1831         err = pci_request_regions(pdev, DRV_NAME);
1832         if (err) {
1833                 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1834                 goto err_disable_device;
1835         }
1836
1837         /* MAP configuration registers */
1838         cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1839         if (!cgx->reg_base) {
1840                 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1841                 err = -ENOMEM;
1842                 goto err_release_regions;
1843         }
1844
1845         cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1846         if (!cgx->lmac_count) {
1847                 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1848                 err = -EOPNOTSUPP;
1849                 goto err_release_regions;
1850         }
1851
1852         nvec = pci_msix_vec_count(cgx->pdev);
1853         err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1854         if (err < 0 || err != nvec) {
1855                 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1856                         nvec, err);
1857                 goto err_release_regions;
1858         }
1859
1860         cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1861                 & CGX_ID_MASK;
1862
1863         /* init wq for processing linkup requests */
1864         INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1865         cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1866         if (!cgx->cgx_cmd_workq) {
1867                 dev_err(dev, "alloc workqueue failed for cgx cmd");
1868                 err = -ENOMEM;
1869                 goto err_free_irq_vectors;
1870         }
1871
1872         list_add(&cgx->cgx_list, &cgx_list);
1873
1874
1875         cgx_populate_features(cgx);
1876
1877         mutex_init(&cgx->lock);
1878
1879         err = cgx_lmac_init(cgx);
1880         if (err)
1881                 goto err_release_lmac;
1882
1883         return 0;
1884
1885 err_release_lmac:
1886         cgx_lmac_exit(cgx);
1887         list_del(&cgx->cgx_list);
1888 err_free_irq_vectors:
1889         pci_free_irq_vectors(pdev);
1890 err_release_regions:
1891         pci_release_regions(pdev);
1892 err_disable_device:
1893         pci_disable_device(pdev);
1894         pci_set_drvdata(pdev, NULL);
1895         return err;
1896 }
1897
1898 static void cgx_remove(struct pci_dev *pdev)
1899 {
1900         struct cgx *cgx = pci_get_drvdata(pdev);
1901
1902         if (cgx) {
1903                 cgx_lmac_exit(cgx);
1904                 list_del(&cgx->cgx_list);
1905         }
1906         pci_free_irq_vectors(pdev);
1907         pci_release_regions(pdev);
1908         pci_disable_device(pdev);
1909         pci_set_drvdata(pdev, NULL);
1910 }
1911
1912 struct pci_driver cgx_driver = {
1913         .name = DRV_NAME,
1914         .id_table = cgx_id_table,
1915         .probe = cgx_probe,
1916         .remove = cgx_remove,
1917 };