Merge tag 'iommu-updates-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_vfr.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2016-2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/jhash.h>
14 #include <net/pkt_cls.h>
15
16 #include "bnxt_hsi.h"
17 #include "bnxt.h"
18 #include "bnxt_vfr.h"
19 #include "bnxt_devlink.h"
20 #include "bnxt_tc.h"
21
22 #ifdef CONFIG_BNXT_SRIOV
23
24 #define CFA_HANDLE_INVALID              0xffff
25 #define VF_IDX_INVALID                  0xffff
26
27 static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
28                               u16 *tx_cfa_action, u16 *rx_cfa_code)
29 {
30         struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
31         struct hwrm_cfa_vfr_alloc_input req = { 0 };
32         int rc;
33
34         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
35         req.vf_id = cpu_to_le16(vf_idx);
36         sprintf(req.vfr_name, "vfr%d", vf_idx);
37
38         mutex_lock(&bp->hwrm_cmd_lock);
39         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
40         if (!rc) {
41                 *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
42                 *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
43                 netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
44                            *tx_cfa_action, *rx_cfa_code);
45         } else {
46                 netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
47         }
48
49         mutex_unlock(&bp->hwrm_cmd_lock);
50         return rc;
51 }
52
53 static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
54 {
55         struct hwrm_cfa_vfr_free_input req = { 0 };
56         int rc;
57
58         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
59         sprintf(req.vfr_name, "vfr%d", vf_idx);
60
61         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
62         if (rc)
63                 netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
64         return rc;
65 }
66
67 static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
68                               u16 *max_mtu)
69 {
70         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
71         struct hwrm_func_qcfg_input req = {0};
72         u16 mtu;
73         int rc;
74
75         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
76         req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
77
78         mutex_lock(&bp->hwrm_cmd_lock);
79
80         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
81         if (!rc) {
82                 mtu = le16_to_cpu(resp->max_mtu_configured);
83                 if (!mtu)
84                         *max_mtu = BNXT_MAX_MTU;
85                 else
86                         *max_mtu = mtu;
87         }
88         mutex_unlock(&bp->hwrm_cmd_lock);
89         return rc;
90 }
91
92 static int bnxt_vf_rep_open(struct net_device *dev)
93 {
94         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
95         struct bnxt *bp = vf_rep->bp;
96
97         /* Enable link and TX only if the parent PF is open. */
98         if (netif_running(bp->dev)) {
99                 netif_carrier_on(dev);
100                 netif_tx_start_all_queues(dev);
101         }
102         return 0;
103 }
104
105 static int bnxt_vf_rep_close(struct net_device *dev)
106 {
107         netif_carrier_off(dev);
108         netif_tx_disable(dev);
109
110         return 0;
111 }
112
113 static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
114                                     struct net_device *dev)
115 {
116         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
117         int rc, len = skb->len;
118
119         skb_dst_drop(skb);
120         dst_hold((struct dst_entry *)vf_rep->dst);
121         skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
122         skb->dev = vf_rep->dst->u.port_info.lower_dev;
123
124         rc = dev_queue_xmit(skb);
125         if (!rc) {
126                 vf_rep->tx_stats.packets++;
127                 vf_rep->tx_stats.bytes += len;
128         }
129         return rc;
130 }
131
132 static void
133 bnxt_vf_rep_get_stats64(struct net_device *dev,
134                         struct rtnl_link_stats64 *stats)
135 {
136         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
137
138         stats->rx_packets = vf_rep->rx_stats.packets;
139         stats->rx_bytes = vf_rep->rx_stats.bytes;
140         stats->tx_packets = vf_rep->tx_stats.packets;
141         stats->tx_bytes = vf_rep->tx_stats.bytes;
142 }
143
144 static int bnxt_vf_rep_setup_tc_block_cb(enum tc_setup_type type,
145                                          void *type_data,
146                                          void *cb_priv)
147 {
148         struct bnxt_vf_rep *vf_rep = cb_priv;
149         struct bnxt *bp = vf_rep->bp;
150         int vf_fid = bp->pf.vf[vf_rep->vf_idx].fw_fid;
151
152         if (!bnxt_tc_flower_enabled(vf_rep->bp) ||
153             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
154                 return -EOPNOTSUPP;
155
156         switch (type) {
157         case TC_SETUP_CLSFLOWER:
158                 return bnxt_tc_setup_flower(bp, vf_fid, type_data);
159         default:
160                 return -EOPNOTSUPP;
161         }
162 }
163
164 static int bnxt_vf_rep_setup_tc_block(struct net_device *dev,
165                                       struct tc_block_offload *f)
166 {
167         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
168
169         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
170                 return -EOPNOTSUPP;
171
172         switch (f->command) {
173         case TC_BLOCK_BIND:
174                 return tcf_block_cb_register(f->block,
175                                              bnxt_vf_rep_setup_tc_block_cb,
176                                              vf_rep, vf_rep, f->extack);
177         case TC_BLOCK_UNBIND:
178                 tcf_block_cb_unregister(f->block,
179                                         bnxt_vf_rep_setup_tc_block_cb, vf_rep);
180                 return 0;
181         default:
182                 return -EOPNOTSUPP;
183         }
184 }
185
186 static int bnxt_vf_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
187                                 void *type_data)
188 {
189         switch (type) {
190         case TC_SETUP_BLOCK:
191                 return bnxt_vf_rep_setup_tc_block(dev, type_data);
192         default:
193                 return -EOPNOTSUPP;
194         }
195 }
196
197 struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
198 {
199         u16 vf_idx;
200
201         if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) {
202                 vf_idx = bp->cfa_code_map[cfa_code];
203                 if (vf_idx != VF_IDX_INVALID)
204                         return bp->vf_reps[vf_idx]->dev;
205         }
206         return NULL;
207 }
208
209 void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
210 {
211         struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
212         struct bnxt_vf_rep_stats *rx_stats;
213
214         rx_stats = &vf_rep->rx_stats;
215         vf_rep->rx_stats.bytes += skb->len;
216         vf_rep->rx_stats.packets++;
217
218         netif_receive_skb(skb);
219 }
220
221 static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
222                                           size_t len)
223 {
224         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
225         struct pci_dev *pf_pdev = vf_rep->bp->pdev;
226         int rc;
227
228         rc = snprintf(buf, len, "pf%dvf%d", PCI_FUNC(pf_pdev->devfn),
229                       vf_rep->vf_idx);
230         if (rc >= len)
231                 return -EOPNOTSUPP;
232         return 0;
233 }
234
235 static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
236                                     struct ethtool_drvinfo *info)
237 {
238         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
239         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
240 }
241
242 static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
243                                      struct switchdev_attr *attr)
244 {
245         struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
246
247         /* as only PORT_PARENT_ID is supported currently use common code
248          * between PF and VF-rep for now.
249          */
250         return bnxt_port_attr_get(vf_rep->bp, attr);
251 }
252
253 static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
254         .switchdev_port_attr_get        = bnxt_vf_rep_port_attr_get
255 };
256
257 static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
258         .get_drvinfo            = bnxt_vf_rep_get_drvinfo
259 };
260
261 static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
262         .ndo_open               = bnxt_vf_rep_open,
263         .ndo_stop               = bnxt_vf_rep_close,
264         .ndo_start_xmit         = bnxt_vf_rep_xmit,
265         .ndo_get_stats64        = bnxt_vf_rep_get_stats64,
266         .ndo_setup_tc           = bnxt_vf_rep_setup_tc,
267         .ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
268 };
269
270 bool bnxt_dev_is_vf_rep(struct net_device *dev)
271 {
272         return dev->netdev_ops == &bnxt_vf_rep_netdev_ops;
273 }
274
275 /* Called when the parent PF interface is closed:
276  * As the mode transition from SWITCHDEV to LEGACY
277  * happens under the rtnl_lock() this routine is safe
278  * under the rtnl_lock()
279  */
280 void bnxt_vf_reps_close(struct bnxt *bp)
281 {
282         struct bnxt_vf_rep *vf_rep;
283         u16 num_vfs, i;
284
285         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
286                 return;
287
288         num_vfs = pci_num_vf(bp->pdev);
289         for (i = 0; i < num_vfs; i++) {
290                 vf_rep = bp->vf_reps[i];
291                 if (netif_running(vf_rep->dev))
292                         bnxt_vf_rep_close(vf_rep->dev);
293         }
294 }
295
296 /* Called when the parent PF interface is opened (re-opened):
297  * As the mode transition from SWITCHDEV to LEGACY
298  * happen under the rtnl_lock() this routine is safe
299  * under the rtnl_lock()
300  */
301 void bnxt_vf_reps_open(struct bnxt *bp)
302 {
303         int i;
304
305         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
306                 return;
307
308         for (i = 0; i < pci_num_vf(bp->pdev); i++)
309                 bnxt_vf_rep_open(bp->vf_reps[i]->dev);
310 }
311
312 static void __bnxt_vf_reps_destroy(struct bnxt *bp)
313 {
314         u16 num_vfs = pci_num_vf(bp->pdev);
315         struct bnxt_vf_rep *vf_rep;
316         int i;
317
318         for (i = 0; i < num_vfs; i++) {
319                 vf_rep = bp->vf_reps[i];
320                 if (vf_rep) {
321                         dst_release((struct dst_entry *)vf_rep->dst);
322
323                         if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
324                                 hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
325
326                         if (vf_rep->dev) {
327                                 /* if register_netdev failed, then netdev_ops
328                                  * would have been set to NULL
329                                  */
330                                 if (vf_rep->dev->netdev_ops)
331                                         unregister_netdev(vf_rep->dev);
332                                 free_netdev(vf_rep->dev);
333                         }
334                 }
335         }
336
337         kfree(bp->vf_reps);
338         bp->vf_reps = NULL;
339 }
340
341 void bnxt_vf_reps_destroy(struct bnxt *bp)
342 {
343         bool closed = false;
344
345         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
346                 return;
347
348         if (!bp->vf_reps)
349                 return;
350
351         /* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
352          * before proceeding with VF-rep cleanup.
353          */
354         rtnl_lock();
355         if (netif_running(bp->dev)) {
356                 bnxt_close_nic(bp, false, false);
357                 closed = true;
358         }
359         /* un-publish cfa_code_map so that RX path can't see it anymore */
360         kfree(bp->cfa_code_map);
361         bp->cfa_code_map = NULL;
362         bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
363
364         if (closed)
365                 bnxt_open_nic(bp, false, false);
366         rtnl_unlock();
367
368         /* Need to call vf_reps_destroy() outside of rntl_lock
369          * as unregister_netdev takes rtnl_lock
370          */
371         __bnxt_vf_reps_destroy(bp);
372 }
373
374 /* Use the OUI of the PF's perm addr and report the same mac addr
375  * for the same VF-rep each time
376  */
377 static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac)
378 {
379         u32 addr;
380
381         ether_addr_copy(mac, src_mac);
382
383         addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx;
384         mac[3] = (u8)(addr & 0xFF);
385         mac[4] = (u8)((addr >> 8) & 0xFF);
386         mac[5] = (u8)((addr >> 16) & 0xFF);
387 }
388
389 static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
390                                     struct net_device *dev)
391 {
392         struct net_device *pf_dev = bp->dev;
393         u16 max_mtu;
394
395         dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
396         dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
397         SWITCHDEV_SET_OPS(dev, &bnxt_vf_rep_switchdev_ops);
398         /* Just inherit all the featues of the parent PF as the VF-R
399          * uses the RX/TX rings of the parent PF
400          */
401         dev->hw_features = pf_dev->hw_features;
402         dev->gso_partial_features = pf_dev->gso_partial_features;
403         dev->vlan_features = pf_dev->vlan_features;
404         dev->hw_enc_features = pf_dev->hw_enc_features;
405         dev->features |= pf_dev->features;
406         bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
407                                  dev->perm_addr);
408         ether_addr_copy(dev->dev_addr, dev->perm_addr);
409         /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
410         if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
411                 dev->max_mtu = max_mtu;
412         dev->min_mtu = ETH_ZLEN;
413 }
414
415 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
416 {
417         struct pci_dev *pdev = bp->pdev;
418         int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
419         u32 dw;
420
421         if (!pos) {
422                 netdev_info(bp->dev, "Unable do read adapter's DSN");
423                 return -EOPNOTSUPP;
424         }
425
426         /* DSN (two dw) is at an offset of 4 from the cap pos */
427         pos += 4;
428         pci_read_config_dword(pdev, pos, &dw);
429         put_unaligned_le32(dw, &dsn[0]);
430         pci_read_config_dword(pdev, pos + 4, &dw);
431         put_unaligned_le32(dw, &dsn[4]);
432         return 0;
433 }
434
435 static int bnxt_vf_reps_create(struct bnxt *bp)
436 {
437         u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
438         struct bnxt_vf_rep *vf_rep;
439         struct net_device *dev;
440         int rc, i;
441
442         bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
443         if (!bp->vf_reps)
444                 return -ENOMEM;
445
446         /* storage for cfa_code to vf-idx mapping */
447         cfa_code_map = kmalloc_array(MAX_CFA_CODE, sizeof(*bp->cfa_code_map),
448                                      GFP_KERNEL);
449         if (!cfa_code_map) {
450                 rc = -ENOMEM;
451                 goto err;
452         }
453         for (i = 0; i < MAX_CFA_CODE; i++)
454                 cfa_code_map[i] = VF_IDX_INVALID;
455
456         for (i = 0; i < num_vfs; i++) {
457                 dev = alloc_etherdev(sizeof(*vf_rep));
458                 if (!dev) {
459                         rc = -ENOMEM;
460                         goto err;
461                 }
462
463                 vf_rep = netdev_priv(dev);
464                 bp->vf_reps[i] = vf_rep;
465                 vf_rep->dev = dev;
466                 vf_rep->bp = bp;
467                 vf_rep->vf_idx = i;
468                 vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
469
470                 /* get cfa handles from FW */
471                 rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
472                                         &vf_rep->tx_cfa_action,
473                                         &vf_rep->rx_cfa_code);
474                 if (rc) {
475                         rc = -ENOLINK;
476                         goto err;
477                 }
478                 cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
479
480                 vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
481                                                  GFP_KERNEL);
482                 if (!vf_rep->dst) {
483                         rc = -ENOMEM;
484                         goto err;
485                 }
486                 /* only cfa_action is needed to mux a packet while TXing */
487                 vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
488                 vf_rep->dst->u.port_info.lower_dev = bp->dev;
489
490                 bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
491                 rc = register_netdev(dev);
492                 if (rc) {
493                         /* no need for unregister_netdev in cleanup */
494                         dev->netdev_ops = NULL;
495                         goto err;
496                 }
497         }
498
499         /* Read the adapter's DSN to use as the eswitch switch_id */
500         rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
501         if (rc)
502                 goto err;
503
504         /* publish cfa_code_map only after all VF-reps have been initialized */
505         bp->cfa_code_map = cfa_code_map;
506         bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
507         netif_keep_dst(bp->dev);
508         return 0;
509
510 err:
511         netdev_info(bp->dev, "%s error=%d", __func__, rc);
512         kfree(cfa_code_map);
513         __bnxt_vf_reps_destroy(bp);
514         return rc;
515 }
516
517 /* Devlink related routines */
518 int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
519 {
520         struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
521
522         *mode = bp->eswitch_mode;
523         return 0;
524 }
525
526 int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
527 {
528         struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
529         int rc = 0;
530
531         mutex_lock(&bp->sriov_lock);
532         if (bp->eswitch_mode == mode) {
533                 netdev_info(bp->dev, "already in %s eswitch mode",
534                             mode == DEVLINK_ESWITCH_MODE_LEGACY ?
535                             "legacy" : "switchdev");
536                 rc = -EINVAL;
537                 goto done;
538         }
539
540         switch (mode) {
541         case DEVLINK_ESWITCH_MODE_LEGACY:
542                 bnxt_vf_reps_destroy(bp);
543                 break;
544
545         case DEVLINK_ESWITCH_MODE_SWITCHDEV:
546                 if (bp->hwrm_spec_code < 0x10803) {
547                         netdev_warn(bp->dev, "FW does not support SRIOV E-Switch SWITCHDEV mode\n");
548                         rc = -ENOTSUPP;
549                         goto done;
550                 }
551
552                 if (pci_num_vf(bp->pdev) == 0) {
553                         netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
554                         rc = -EPERM;
555                         goto done;
556                 }
557                 rc = bnxt_vf_reps_create(bp);
558                 break;
559
560         default:
561                 rc = -EINVAL;
562                 goto done;
563         }
564 done:
565         mutex_unlock(&bp->sriov_lock);
566         return rc;
567 }
568
569 #endif