Merge tag 'gpio-v4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / hisilicon / hns3 / hns3_enet.c
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3
4 #include <linux/dma-mapping.h>
5 #include <linux/etherdevice.h>
6 #include <linux/interrupt.h>
7 #include <linux/if_vlan.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/aer.h>
13 #include <linux/skbuff.h>
14 #include <linux/sctp.h>
15 #include <linux/vermagic.h>
16 #include <net/gre.h>
17 #include <net/pkt_cls.h>
18 #include <net/vxlan.h>
19
20 #include "hnae3.h"
21 #include "hns3_enet.h"
22
23 static void hns3_clear_all_ring(struct hnae3_handle *h);
24 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
25 static void hns3_remove_hw_addr(struct net_device *netdev);
26
27 static const char hns3_driver_name[] = "hns3";
28 const char hns3_driver_version[] = VERMAGIC_STRING;
29 static const char hns3_driver_string[] =
30                         "Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client;
33
34 /* hns3_pci_tbl - PCI Device ID Table
35  *
36  * Last entry must be all 0s
37  *
38  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39  *   Class, Class Mask, private data (not used) }
40  */
41 static const struct pci_device_id hns3_pci_tbl[] = {
42         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
45          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
47          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
49          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
51          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
53          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
55         {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
56          HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
57         /* required last entry */
58         {0, }
59 };
60 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61
62 static irqreturn_t hns3_irq_handle(int irq, void *vector)
63 {
64         struct hns3_enet_tqp_vector *tqp_vector = vector;
65
66         napi_schedule(&tqp_vector->napi);
67
68         return IRQ_HANDLED;
69 }
70
71 /* This callback function is used to set affinity changes to the irq affinity
72  * masks when the irq_set_affinity_notifier function is used.
73  */
74 static void hns3_nic_irq_affinity_notify(struct irq_affinity_notify *notify,
75                                          const cpumask_t *mask)
76 {
77         struct hns3_enet_tqp_vector *tqp_vectors =
78                 container_of(notify, struct hns3_enet_tqp_vector,
79                              affinity_notify);
80
81         tqp_vectors->affinity_mask = *mask;
82 }
83
84 static void hns3_nic_irq_affinity_release(struct kref *ref)
85 {
86 }
87
88 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
89 {
90         struct hns3_enet_tqp_vector *tqp_vectors;
91         unsigned int i;
92
93         for (i = 0; i < priv->vector_num; i++) {
94                 tqp_vectors = &priv->tqp_vector[i];
95
96                 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
97                         continue;
98
99                 /* clear the affinity notifier and affinity mask */
100                 irq_set_affinity_notifier(tqp_vectors->vector_irq, NULL);
101                 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
102
103                 /* release the irq resource */
104                 free_irq(tqp_vectors->vector_irq, tqp_vectors);
105                 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
106         }
107 }
108
109 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
110 {
111         struct hns3_enet_tqp_vector *tqp_vectors;
112         int txrx_int_idx = 0;
113         int rx_int_idx = 0;
114         int tx_int_idx = 0;
115         unsigned int i;
116         int ret;
117
118         for (i = 0; i < priv->vector_num; i++) {
119                 tqp_vectors = &priv->tqp_vector[i];
120
121                 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
122                         continue;
123
124                 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
125                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
126                                  "%s-%s-%d", priv->netdev->name, "TxRx",
127                                  txrx_int_idx++);
128                         txrx_int_idx++;
129                 } else if (tqp_vectors->rx_group.ring) {
130                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
131                                  "%s-%s-%d", priv->netdev->name, "Rx",
132                                  rx_int_idx++);
133                 } else if (tqp_vectors->tx_group.ring) {
134                         snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
135                                  "%s-%s-%d", priv->netdev->name, "Tx",
136                                  tx_int_idx++);
137                 } else {
138                         /* Skip this unused q_vector */
139                         continue;
140                 }
141
142                 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
143
144                 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
145                                   tqp_vectors->name,
146                                        tqp_vectors);
147                 if (ret) {
148                         netdev_err(priv->netdev, "request irq(%d) fail\n",
149                                    tqp_vectors->vector_irq);
150                         return ret;
151                 }
152
153                 tqp_vectors->affinity_notify.notify =
154                                         hns3_nic_irq_affinity_notify;
155                 tqp_vectors->affinity_notify.release =
156                                         hns3_nic_irq_affinity_release;
157                 irq_set_affinity_notifier(tqp_vectors->vector_irq,
158                                           &tqp_vectors->affinity_notify);
159                 irq_set_affinity_hint(tqp_vectors->vector_irq,
160                                       &tqp_vectors->affinity_mask);
161
162                 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
163         }
164
165         return 0;
166 }
167
168 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
169                                  u32 mask_en)
170 {
171         writel(mask_en, tqp_vector->mask_addr);
172 }
173
174 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
175 {
176         napi_enable(&tqp_vector->napi);
177
178         /* enable vector */
179         hns3_mask_vector_irq(tqp_vector, 1);
180 }
181
182 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
183 {
184         /* disable vector */
185         hns3_mask_vector_irq(tqp_vector, 0);
186
187         disable_irq(tqp_vector->vector_irq);
188         napi_disable(&tqp_vector->napi);
189 }
190
191 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
192                                  u32 rl_value)
193 {
194         u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
195
196         /* this defines the configuration for RL (Interrupt Rate Limiter).
197          * Rl defines rate of interrupts i.e. number of interrupts-per-second
198          * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
199          */
200
201         if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
202             !tqp_vector->rx_group.coal.gl_adapt_enable)
203                 /* According to the hardware, the range of rl_reg is
204                  * 0-59 and the unit is 4.
205                  */
206                 rl_reg |=  HNS3_INT_RL_ENABLE_MASK;
207
208         writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
209 }
210
211 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
212                                     u32 gl_value)
213 {
214         u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
215
216         writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
217 }
218
219 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
220                                     u32 gl_value)
221 {
222         u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
223
224         writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
225 }
226
227 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
228                                    struct hns3_nic_priv *priv)
229 {
230         /* initialize the configuration for interrupt coalescing.
231          * 1. GL (Interrupt Gap Limiter)
232          * 2. RL (Interrupt Rate Limiter)
233          */
234
235         /* Default: enable interrupt coalescing self-adaptive and GL */
236         tqp_vector->tx_group.coal.gl_adapt_enable = 1;
237         tqp_vector->rx_group.coal.gl_adapt_enable = 1;
238
239         tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
240         tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
241
242         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
243         tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
244         tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
245 }
246
247 static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
248                                       struct hns3_nic_priv *priv)
249 {
250         struct hnae3_handle *h = priv->ae_handle;
251
252         hns3_set_vector_coalesce_tx_gl(tqp_vector,
253                                        tqp_vector->tx_group.coal.int_gl);
254         hns3_set_vector_coalesce_rx_gl(tqp_vector,
255                                        tqp_vector->rx_group.coal.int_gl);
256         hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
257 }
258
259 static int hns3_nic_set_real_num_queue(struct net_device *netdev)
260 {
261         struct hnae3_handle *h = hns3_get_handle(netdev);
262         struct hnae3_knic_private_info *kinfo = &h->kinfo;
263         unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
264         int i, ret;
265
266         if (kinfo->num_tc <= 1) {
267                 netdev_reset_tc(netdev);
268         } else {
269                 ret = netdev_set_num_tc(netdev, kinfo->num_tc);
270                 if (ret) {
271                         netdev_err(netdev,
272                                    "netdev_set_num_tc fail, ret=%d!\n", ret);
273                         return ret;
274                 }
275
276                 for (i = 0; i < HNAE3_MAX_TC; i++) {
277                         if (!kinfo->tc_info[i].enable)
278                                 continue;
279
280                         netdev_set_tc_queue(netdev,
281                                             kinfo->tc_info[i].tc,
282                                             kinfo->tc_info[i].tqp_count,
283                                             kinfo->tc_info[i].tqp_offset);
284                 }
285         }
286
287         ret = netif_set_real_num_tx_queues(netdev, queue_size);
288         if (ret) {
289                 netdev_err(netdev,
290                            "netif_set_real_num_tx_queues fail, ret=%d!\n",
291                            ret);
292                 return ret;
293         }
294
295         ret = netif_set_real_num_rx_queues(netdev, queue_size);
296         if (ret) {
297                 netdev_err(netdev,
298                            "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
299                 return ret;
300         }
301
302         return 0;
303 }
304
305 static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
306 {
307         u16 alloc_tqps, max_rss_size, rss_size;
308
309         h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
310         rss_size = alloc_tqps / h->kinfo.num_tc;
311
312         return min_t(u16, rss_size, max_rss_size);
313 }
314
315 static int hns3_nic_net_up(struct net_device *netdev)
316 {
317         struct hns3_nic_priv *priv = netdev_priv(netdev);
318         struct hnae3_handle *h = priv->ae_handle;
319         int i, j;
320         int ret;
321
322         ret = hns3_nic_reset_all_ring(h);
323         if (ret)
324                 return ret;
325
326         /* get irq resource for all vectors */
327         ret = hns3_nic_init_irq(priv);
328         if (ret) {
329                 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
330                 return ret;
331         }
332
333         /* enable the vectors */
334         for (i = 0; i < priv->vector_num; i++)
335                 hns3_vector_enable(&priv->tqp_vector[i]);
336
337         /* start the ae_dev */
338         ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
339         if (ret)
340                 goto out_start_err;
341
342         clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
343
344         return 0;
345
346 out_start_err:
347         for (j = i - 1; j >= 0; j--)
348                 hns3_vector_disable(&priv->tqp_vector[j]);
349
350         hns3_nic_uninit_irq(priv);
351
352         return ret;
353 }
354
355 static int hns3_nic_net_open(struct net_device *netdev)
356 {
357         struct hns3_nic_priv *priv = netdev_priv(netdev);
358         struct hnae3_handle *h = hns3_get_handle(netdev);
359         struct hnae3_knic_private_info *kinfo;
360         int i, ret;
361
362         netif_carrier_off(netdev);
363
364         ret = hns3_nic_set_real_num_queue(netdev);
365         if (ret)
366                 return ret;
367
368         ret = hns3_nic_net_up(netdev);
369         if (ret) {
370                 netdev_err(netdev,
371                            "hns net up fail, ret=%d!\n", ret);
372                 return ret;
373         }
374
375         kinfo = &h->kinfo;
376         for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
377                 netdev_set_prio_tc_map(netdev, i,
378                                        kinfo->prio_tc[i]);
379         }
380
381         priv->ae_handle->last_reset_time = jiffies;
382         return 0;
383 }
384
385 static void hns3_nic_net_down(struct net_device *netdev)
386 {
387         struct hns3_nic_priv *priv = netdev_priv(netdev);
388         const struct hnae3_ae_ops *ops;
389         int i;
390
391         if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
392                 return;
393
394         /* disable vectors */
395         for (i = 0; i < priv->vector_num; i++)
396                 hns3_vector_disable(&priv->tqp_vector[i]);
397
398         /* stop ae_dev */
399         ops = priv->ae_handle->ae_algo->ops;
400         if (ops->stop)
401                 ops->stop(priv->ae_handle);
402
403         /* free irq resources */
404         hns3_nic_uninit_irq(priv);
405
406         hns3_clear_all_ring(priv->ae_handle);
407 }
408
409 static int hns3_nic_net_stop(struct net_device *netdev)
410 {
411         netif_tx_stop_all_queues(netdev);
412         netif_carrier_off(netdev);
413
414         hns3_nic_net_down(netdev);
415
416         return 0;
417 }
418
419 static int hns3_nic_uc_sync(struct net_device *netdev,
420                             const unsigned char *addr)
421 {
422         struct hnae3_handle *h = hns3_get_handle(netdev);
423
424         if (h->ae_algo->ops->add_uc_addr)
425                 return h->ae_algo->ops->add_uc_addr(h, addr);
426
427         return 0;
428 }
429
430 static int hns3_nic_uc_unsync(struct net_device *netdev,
431                               const unsigned char *addr)
432 {
433         struct hnae3_handle *h = hns3_get_handle(netdev);
434
435         if (h->ae_algo->ops->rm_uc_addr)
436                 return h->ae_algo->ops->rm_uc_addr(h, addr);
437
438         return 0;
439 }
440
441 static int hns3_nic_mc_sync(struct net_device *netdev,
442                             const unsigned char *addr)
443 {
444         struct hnae3_handle *h = hns3_get_handle(netdev);
445
446         if (h->ae_algo->ops->add_mc_addr)
447                 return h->ae_algo->ops->add_mc_addr(h, addr);
448
449         return 0;
450 }
451
452 static int hns3_nic_mc_unsync(struct net_device *netdev,
453                               const unsigned char *addr)
454 {
455         struct hnae3_handle *h = hns3_get_handle(netdev);
456
457         if (h->ae_algo->ops->rm_mc_addr)
458                 return h->ae_algo->ops->rm_mc_addr(h, addr);
459
460         return 0;
461 }
462
463 static u8 hns3_get_netdev_flags(struct net_device *netdev)
464 {
465         u8 flags = 0;
466
467         if (netdev->flags & IFF_PROMISC) {
468                 flags = HNAE3_USER_UPE | HNAE3_USER_MPE;
469         } else {
470                 flags |= HNAE3_VLAN_FLTR;
471                 if (netdev->flags & IFF_ALLMULTI)
472                         flags |= HNAE3_USER_MPE;
473         }
474
475         return flags;
476 }
477
478 static void hns3_nic_set_rx_mode(struct net_device *netdev)
479 {
480         struct hnae3_handle *h = hns3_get_handle(netdev);
481         u8 new_flags;
482         int ret;
483
484         new_flags = hns3_get_netdev_flags(netdev);
485
486         ret = __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
487         if (ret) {
488                 netdev_err(netdev, "sync uc address fail\n");
489                 if (ret == -ENOSPC)
490                         new_flags |= HNAE3_OVERFLOW_UPE;
491         }
492
493         if (netdev->flags & IFF_MULTICAST) {
494                 ret = __dev_mc_sync(netdev, hns3_nic_mc_sync,
495                                     hns3_nic_mc_unsync);
496                 if (ret) {
497                         netdev_err(netdev, "sync mc address fail\n");
498                         if (ret == -ENOSPC)
499                                 new_flags |= HNAE3_OVERFLOW_MPE;
500                 }
501         }
502
503         hns3_update_promisc_mode(netdev, new_flags);
504         /* User mode Promisc mode enable and vlan filtering is disabled to
505          * let all packets in. MAC-VLAN Table overflow Promisc enabled and
506          * vlan fitering is enabled
507          */
508         hns3_enable_vlan_filter(netdev, new_flags & HNAE3_VLAN_FLTR);
509         h->netdev_flags = new_flags;
510 }
511
512 int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
513 {
514         struct hns3_nic_priv *priv = netdev_priv(netdev);
515         struct hnae3_handle *h = priv->ae_handle;
516
517         if (h->ae_algo->ops->set_promisc_mode) {
518                 return h->ae_algo->ops->set_promisc_mode(h,
519                                                 promisc_flags & HNAE3_UPE,
520                                                 promisc_flags & HNAE3_MPE);
521         }
522
523         return 0;
524 }
525
526 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
527 {
528         struct hns3_nic_priv *priv = netdev_priv(netdev);
529         struct hnae3_handle *h = priv->ae_handle;
530         bool last_state;
531
532         if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
533                 last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
534                 if (enable != last_state) {
535                         netdev_info(netdev,
536                                     "%s vlan filter\n",
537                                     enable ? "enable" : "disable");
538                         h->ae_algo->ops->enable_vlan_filter(h, enable);
539                 }
540         }
541 }
542
543 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
544                         u16 *mss, u32 *type_cs_vlan_tso)
545 {
546         u32 l4_offset, hdr_len;
547         union l3_hdr_info l3;
548         union l4_hdr_info l4;
549         u32 l4_paylen;
550         int ret;
551
552         if (!skb_is_gso(skb))
553                 return 0;
554
555         ret = skb_cow_head(skb, 0);
556         if (ret)
557                 return ret;
558
559         l3.hdr = skb_network_header(skb);
560         l4.hdr = skb_transport_header(skb);
561
562         /* Software should clear the IPv4's checksum field when tso is
563          * needed.
564          */
565         if (l3.v4->version == 4)
566                 l3.v4->check = 0;
567
568         /* tunnel packet.*/
569         if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
570                                          SKB_GSO_GRE_CSUM |
571                                          SKB_GSO_UDP_TUNNEL |
572                                          SKB_GSO_UDP_TUNNEL_CSUM)) {
573                 if ((!(skb_shinfo(skb)->gso_type &
574                     SKB_GSO_PARTIAL)) &&
575                     (skb_shinfo(skb)->gso_type &
576                     SKB_GSO_UDP_TUNNEL_CSUM)) {
577                         /* Software should clear the udp's checksum
578                          * field when tso is needed.
579                          */
580                         l4.udp->check = 0;
581                 }
582                 /* reset l3&l4 pointers from outer to inner headers */
583                 l3.hdr = skb_inner_network_header(skb);
584                 l4.hdr = skb_inner_transport_header(skb);
585
586                 /* Software should clear the IPv4's checksum field when
587                  * tso is needed.
588                  */
589                 if (l3.v4->version == 4)
590                         l3.v4->check = 0;
591         }
592
593         /* normal or tunnel packet*/
594         l4_offset = l4.hdr - skb->data;
595         hdr_len = (l4.tcp->doff * 4) + l4_offset;
596
597         /* remove payload length from inner pseudo checksum when tso*/
598         l4_paylen = skb->len - l4_offset;
599         csum_replace_by_diff(&l4.tcp->check,
600                              (__force __wsum)htonl(l4_paylen));
601
602         /* find the txbd field values */
603         *paylen = skb->len - hdr_len;
604         hnae3_set_bit(*type_cs_vlan_tso,
605                       HNS3_TXD_TSO_B, 1);
606
607         /* get MSS for TSO */
608         *mss = skb_shinfo(skb)->gso_size;
609
610         return 0;
611 }
612
613 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
614                                 u8 *il4_proto)
615 {
616         union {
617                 struct iphdr *v4;
618                 struct ipv6hdr *v6;
619                 unsigned char *hdr;
620         } l3;
621         unsigned char *l4_hdr;
622         unsigned char *exthdr;
623         u8 l4_proto_tmp;
624         __be16 frag_off;
625
626         /* find outer header point */
627         l3.hdr = skb_network_header(skb);
628         l4_hdr = skb_transport_header(skb);
629
630         if (skb->protocol == htons(ETH_P_IPV6)) {
631                 exthdr = l3.hdr + sizeof(*l3.v6);
632                 l4_proto_tmp = l3.v6->nexthdr;
633                 if (l4_hdr != exthdr)
634                         ipv6_skip_exthdr(skb, exthdr - skb->data,
635                                          &l4_proto_tmp, &frag_off);
636         } else if (skb->protocol == htons(ETH_P_IP)) {
637                 l4_proto_tmp = l3.v4->protocol;
638         } else {
639                 return -EINVAL;
640         }
641
642         *ol4_proto = l4_proto_tmp;
643
644         /* tunnel packet */
645         if (!skb->encapsulation) {
646                 *il4_proto = 0;
647                 return 0;
648         }
649
650         /* find inner header point */
651         l3.hdr = skb_inner_network_header(skb);
652         l4_hdr = skb_inner_transport_header(skb);
653
654         if (l3.v6->version == 6) {
655                 exthdr = l3.hdr + sizeof(*l3.v6);
656                 l4_proto_tmp = l3.v6->nexthdr;
657                 if (l4_hdr != exthdr)
658                         ipv6_skip_exthdr(skb, exthdr - skb->data,
659                                          &l4_proto_tmp, &frag_off);
660         } else if (l3.v4->version == 4) {
661                 l4_proto_tmp = l3.v4->protocol;
662         }
663
664         *il4_proto = l4_proto_tmp;
665
666         return 0;
667 }
668
669 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
670                                 u8 il4_proto, u32 *type_cs_vlan_tso,
671                                 u32 *ol_type_vlan_len_msec)
672 {
673         union {
674                 struct iphdr *v4;
675                 struct ipv6hdr *v6;
676                 unsigned char *hdr;
677         } l3;
678         union {
679                 struct tcphdr *tcp;
680                 struct udphdr *udp;
681                 struct gre_base_hdr *gre;
682                 unsigned char *hdr;
683         } l4;
684         unsigned char *l2_hdr;
685         u8 l4_proto = ol4_proto;
686         u32 ol2_len;
687         u32 ol3_len;
688         u32 ol4_len;
689         u32 l2_len;
690         u32 l3_len;
691
692         l3.hdr = skb_network_header(skb);
693         l4.hdr = skb_transport_header(skb);
694
695         /* compute L2 header size for normal packet, defined in 2 Bytes */
696         l2_len = l3.hdr - skb->data;
697         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
698                         HNS3_TXD_L2LEN_S, l2_len >> 1);
699
700         /* tunnel packet*/
701         if (skb->encapsulation) {
702                 /* compute OL2 header size, defined in 2 Bytes */
703                 ol2_len = l2_len;
704                 hnae3_set_field(*ol_type_vlan_len_msec,
705                                 HNS3_TXD_L2LEN_M,
706                                 HNS3_TXD_L2LEN_S, ol2_len >> 1);
707
708                 /* compute OL3 header size, defined in 4 Bytes */
709                 ol3_len = l4.hdr - l3.hdr;
710                 hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
711                                 HNS3_TXD_L3LEN_S, ol3_len >> 2);
712
713                 /* MAC in UDP, MAC in GRE (0x6558)*/
714                 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
715                         /* switch MAC header ptr from outer to inner header.*/
716                         l2_hdr = skb_inner_mac_header(skb);
717
718                         /* compute OL4 header size, defined in 4 Bytes. */
719                         ol4_len = l2_hdr - l4.hdr;
720                         hnae3_set_field(*ol_type_vlan_len_msec,
721                                         HNS3_TXD_L4LEN_M, HNS3_TXD_L4LEN_S,
722                                         ol4_len >> 2);
723
724                         /* switch IP header ptr from outer to inner header */
725                         l3.hdr = skb_inner_network_header(skb);
726
727                         /* compute inner l2 header size, defined in 2 Bytes. */
728                         l2_len = l3.hdr - l2_hdr;
729                         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
730                                         HNS3_TXD_L2LEN_S, l2_len >> 1);
731                 } else {
732                         /* skb packet types not supported by hardware,
733                          * txbd len fild doesn't be filled.
734                          */
735                         return;
736                 }
737
738                 /* switch L4 header pointer from outer to inner */
739                 l4.hdr = skb_inner_transport_header(skb);
740
741                 l4_proto = il4_proto;
742         }
743
744         /* compute inner(/normal) L3 header size, defined in 4 Bytes */
745         l3_len = l4.hdr - l3.hdr;
746         hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
747                         HNS3_TXD_L3LEN_S, l3_len >> 2);
748
749         /* compute inner(/normal) L4 header size, defined in 4 Bytes */
750         switch (l4_proto) {
751         case IPPROTO_TCP:
752                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
753                                 HNS3_TXD_L4LEN_S, l4.tcp->doff);
754                 break;
755         case IPPROTO_SCTP:
756                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
757                                 HNS3_TXD_L4LEN_S,
758                                 (sizeof(struct sctphdr) >> 2));
759                 break;
760         case IPPROTO_UDP:
761                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
762                                 HNS3_TXD_L4LEN_S,
763                                 (sizeof(struct udphdr) >> 2));
764                 break;
765         default:
766                 /* skb packet types not supported by hardware,
767                  * txbd len fild doesn't be filled.
768                  */
769                 return;
770         }
771 }
772
773 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
774  * and it is udp packet, which has a dest port as the IANA assigned.
775  * the hardware is expected to do the checksum offload, but the
776  * hardware will not do the checksum offload when udp dest port is
777  * 4789.
778  */
779 static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
780 {
781 #define IANA_VXLAN_PORT 4789
782         union {
783                 struct tcphdr *tcp;
784                 struct udphdr *udp;
785                 struct gre_base_hdr *gre;
786                 unsigned char *hdr;
787         } l4;
788
789         l4.hdr = skb_transport_header(skb);
790
791         if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
792                 return false;
793
794         skb_checksum_help(skb);
795
796         return true;
797 }
798
799 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
800                                    u8 il4_proto, u32 *type_cs_vlan_tso,
801                                    u32 *ol_type_vlan_len_msec)
802 {
803         union {
804                 struct iphdr *v4;
805                 struct ipv6hdr *v6;
806                 unsigned char *hdr;
807         } l3;
808         u32 l4_proto = ol4_proto;
809
810         l3.hdr = skb_network_header(skb);
811
812         /* define OL3 type and tunnel type(OL4).*/
813         if (skb->encapsulation) {
814                 /* define outer network header type.*/
815                 if (skb->protocol == htons(ETH_P_IP)) {
816                         if (skb_is_gso(skb))
817                                 hnae3_set_field(*ol_type_vlan_len_msec,
818                                                 HNS3_TXD_OL3T_M,
819                                                 HNS3_TXD_OL3T_S,
820                                                 HNS3_OL3T_IPV4_CSUM);
821                         else
822                                 hnae3_set_field(*ol_type_vlan_len_msec,
823                                                 HNS3_TXD_OL3T_M,
824                                                 HNS3_TXD_OL3T_S,
825                                                 HNS3_OL3T_IPV4_NO_CSUM);
826
827                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
828                         hnae3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
829                                         HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
830                 }
831
832                 /* define tunnel type(OL4).*/
833                 switch (l4_proto) {
834                 case IPPROTO_UDP:
835                         hnae3_set_field(*ol_type_vlan_len_msec,
836                                         HNS3_TXD_TUNTYPE_M,
837                                         HNS3_TXD_TUNTYPE_S,
838                                         HNS3_TUN_MAC_IN_UDP);
839                         break;
840                 case IPPROTO_GRE:
841                         hnae3_set_field(*ol_type_vlan_len_msec,
842                                         HNS3_TXD_TUNTYPE_M,
843                                         HNS3_TXD_TUNTYPE_S,
844                                         HNS3_TUN_NVGRE);
845                         break;
846                 default:
847                         /* drop the skb tunnel packet if hardware don't support,
848                          * because hardware can't calculate csum when TSO.
849                          */
850                         if (skb_is_gso(skb))
851                                 return -EDOM;
852
853                         /* the stack computes the IP header already,
854                          * driver calculate l4 checksum when not TSO.
855                          */
856                         skb_checksum_help(skb);
857                         return 0;
858                 }
859
860                 l3.hdr = skb_inner_network_header(skb);
861                 l4_proto = il4_proto;
862         }
863
864         if (l3.v4->version == 4) {
865                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
866                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
867
868                 /* the stack computes the IP header already, the only time we
869                  * need the hardware to recompute it is in the case of TSO.
870                  */
871                 if (skb_is_gso(skb))
872                         hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
873         } else if (l3.v6->version == 6) {
874                 hnae3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
875                                 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
876         }
877
878         switch (l4_proto) {
879         case IPPROTO_TCP:
880                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
881                 hnae3_set_field(*type_cs_vlan_tso,
882                                 HNS3_TXD_L4T_M,
883                                 HNS3_TXD_L4T_S,
884                                 HNS3_L4T_TCP);
885                 break;
886         case IPPROTO_UDP:
887                 if (hns3_tunnel_csum_bug(skb))
888                         break;
889
890                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
891                 hnae3_set_field(*type_cs_vlan_tso,
892                                 HNS3_TXD_L4T_M,
893                                 HNS3_TXD_L4T_S,
894                                 HNS3_L4T_UDP);
895                 break;
896         case IPPROTO_SCTP:
897                 hnae3_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
898                 hnae3_set_field(*type_cs_vlan_tso,
899                                 HNS3_TXD_L4T_M,
900                                 HNS3_TXD_L4T_S,
901                                 HNS3_L4T_SCTP);
902                 break;
903         default:
904                 /* drop the skb tunnel packet if hardware don't support,
905                  * because hardware can't calculate csum when TSO.
906                  */
907                 if (skb_is_gso(skb))
908                         return -EDOM;
909
910                 /* the stack computes the IP header already,
911                  * driver calculate l4 checksum when not TSO.
912                  */
913                 skb_checksum_help(skb);
914                 return 0;
915         }
916
917         return 0;
918 }
919
920 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
921 {
922         /* Config bd buffer end */
923         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
924                         HNS3_TXD_BDTYPE_S, 0);
925         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
926         hnae3_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
927         hnae3_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
928 }
929
930 static int hns3_fill_desc_vtags(struct sk_buff *skb,
931                                 struct hns3_enet_ring *tx_ring,
932                                 u32 *inner_vlan_flag,
933                                 u32 *out_vlan_flag,
934                                 u16 *inner_vtag,
935                                 u16 *out_vtag)
936 {
937 #define HNS3_TX_VLAN_PRIO_SHIFT 13
938
939         if (skb->protocol == htons(ETH_P_8021Q) &&
940             !(tx_ring->tqp->handle->kinfo.netdev->features &
941             NETIF_F_HW_VLAN_CTAG_TX)) {
942                 /* When HW VLAN acceleration is turned off, and the stack
943                  * sets the protocol to 802.1q, the driver just need to
944                  * set the protocol to the encapsulated ethertype.
945                  */
946                 skb->protocol = vlan_get_protocol(skb);
947                 return 0;
948         }
949
950         if (skb_vlan_tag_present(skb)) {
951                 u16 vlan_tag;
952
953                 vlan_tag = skb_vlan_tag_get(skb);
954                 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
955
956                 /* Based on hw strategy, use out_vtag in two layer tag case,
957                  * and use inner_vtag in one tag case.
958                  */
959                 if (skb->protocol == htons(ETH_P_8021Q)) {
960                         hnae3_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
961                         *out_vtag = vlan_tag;
962                 } else {
963                         hnae3_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
964                         *inner_vtag = vlan_tag;
965                 }
966         } else if (skb->protocol == htons(ETH_P_8021Q)) {
967                 struct vlan_ethhdr *vhdr;
968                 int rc;
969
970                 rc = skb_cow_head(skb, 0);
971                 if (rc < 0)
972                         return rc;
973                 vhdr = (struct vlan_ethhdr *)skb->data;
974                 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
975                                         << HNS3_TX_VLAN_PRIO_SHIFT);
976         }
977
978         skb->protocol = vlan_get_protocol(skb);
979         return 0;
980 }
981
982 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
983                           int size, int frag_end, enum hns_desc_type type)
984 {
985         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
986         struct hns3_desc *desc = &ring->desc[ring->next_to_use];
987         struct device *dev = ring_to_dev(ring);
988         u32 ol_type_vlan_len_msec = 0;
989         u16 bdtp_fe_sc_vld_ra_ri = 0;
990         struct skb_frag_struct *frag;
991         unsigned int frag_buf_num;
992         u32 type_cs_vlan_tso = 0;
993         struct sk_buff *skb;
994         u16 inner_vtag = 0;
995         u16 out_vtag = 0;
996         unsigned int k;
997         int sizeoflast;
998         u32 paylen = 0;
999         dma_addr_t dma;
1000         u16 mss = 0;
1001         u8 ol4_proto;
1002         u8 il4_proto;
1003         int ret;
1004
1005         if (type == DESC_TYPE_SKB) {
1006                 skb = (struct sk_buff *)priv;
1007                 paylen = skb->len;
1008
1009                 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
1010                                            &ol_type_vlan_len_msec,
1011                                            &inner_vtag, &out_vtag);
1012                 if (unlikely(ret))
1013                         return ret;
1014
1015                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1016                         skb_reset_mac_len(skb);
1017
1018                         ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
1019                         if (ret)
1020                                 return ret;
1021                         hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
1022                                             &type_cs_vlan_tso,
1023                                             &ol_type_vlan_len_msec);
1024                         ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
1025                                                       &type_cs_vlan_tso,
1026                                                       &ol_type_vlan_len_msec);
1027                         if (ret)
1028                                 return ret;
1029
1030                         ret = hns3_set_tso(skb, &paylen, &mss,
1031                                            &type_cs_vlan_tso);
1032                         if (ret)
1033                                 return ret;
1034                 }
1035
1036                 /* Set txbd */
1037                 desc->tx.ol_type_vlan_len_msec =
1038                         cpu_to_le32(ol_type_vlan_len_msec);
1039                 desc->tx.type_cs_vlan_tso_len =
1040                         cpu_to_le32(type_cs_vlan_tso);
1041                 desc->tx.paylen = cpu_to_le32(paylen);
1042                 desc->tx.mss = cpu_to_le16(mss);
1043                 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1044                 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1045
1046                 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1047         } else {
1048                 frag = (struct skb_frag_struct *)priv;
1049                 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1050         }
1051
1052         if (dma_mapping_error(ring->dev, dma)) {
1053                 ring->stats.sw_err_cnt++;
1054                 return -ENOMEM;
1055         }
1056
1057         desc_cb->length = size;
1058
1059         frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1060         sizeoflast = size % HNS3_MAX_BD_SIZE;
1061         sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1062
1063         /* When frag size is bigger than hardware limit, split this frag */
1064         for (k = 0; k < frag_buf_num; k++) {
1065                 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1066                 desc_cb->priv = priv;
1067                 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1068                 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1069                                         DESC_TYPE_SKB : DESC_TYPE_PAGE;
1070
1071                 /* now, fill the descriptor */
1072                 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1073                 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1074                                 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1075                 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1076                                        frag_end && (k == frag_buf_num - 1) ?
1077                                                 1 : 0);
1078                 desc->tx.bdtp_fe_sc_vld_ra_ri =
1079                                 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1080
1081                 /* move ring pointer to next.*/
1082                 ring_ptr_move_fw(ring, next_to_use);
1083
1084                 desc_cb = &ring->desc_cb[ring->next_to_use];
1085                 desc = &ring->desc[ring->next_to_use];
1086         }
1087
1088         return 0;
1089 }
1090
1091 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1092                                    struct hns3_enet_ring *ring)
1093 {
1094         struct sk_buff *skb = *out_skb;
1095         struct skb_frag_struct *frag;
1096         int bdnum_for_frag;
1097         int frag_num;
1098         int buf_num;
1099         int size;
1100         int i;
1101
1102         size = skb_headlen(skb);
1103         buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1104
1105         frag_num = skb_shinfo(skb)->nr_frags;
1106         for (i = 0; i < frag_num; i++) {
1107                 frag = &skb_shinfo(skb)->frags[i];
1108                 size = skb_frag_size(frag);
1109                 bdnum_for_frag =
1110                         (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1111                 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
1112                         return -ENOMEM;
1113
1114                 buf_num += bdnum_for_frag;
1115         }
1116
1117         if (buf_num > ring_space(ring))
1118                 return -EBUSY;
1119
1120         *bnum = buf_num;
1121         return 0;
1122 }
1123
1124 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1125                                   struct hns3_enet_ring *ring)
1126 {
1127         struct sk_buff *skb = *out_skb;
1128         int buf_num;
1129
1130         /* No. of segments (plus a header) */
1131         buf_num = skb_shinfo(skb)->nr_frags + 1;
1132
1133         if (unlikely(ring_space(ring) < buf_num))
1134                 return -EBUSY;
1135
1136         *bnum = buf_num;
1137
1138         return 0;
1139 }
1140
1141 static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1142 {
1143         struct device *dev = ring_to_dev(ring);
1144         unsigned int i;
1145
1146         for (i = 0; i < ring->desc_num; i++) {
1147                 /* check if this is where we started */
1148                 if (ring->next_to_use == next_to_use_orig)
1149                         break;
1150
1151                 /* unmap the descriptor dma address */
1152                 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
1153                         dma_unmap_single(dev,
1154                                          ring->desc_cb[ring->next_to_use].dma,
1155                                         ring->desc_cb[ring->next_to_use].length,
1156                                         DMA_TO_DEVICE);
1157                 else if (ring->desc_cb[ring->next_to_use].length)
1158                         dma_unmap_page(dev,
1159                                        ring->desc_cb[ring->next_to_use].dma,
1160                                        ring->desc_cb[ring->next_to_use].length,
1161                                        DMA_TO_DEVICE);
1162
1163                 ring->desc_cb[ring->next_to_use].length = 0;
1164
1165                 /* rollback one */
1166                 ring_ptr_move_bw(ring, next_to_use);
1167         }
1168 }
1169
1170 netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1171 {
1172         struct hns3_nic_priv *priv = netdev_priv(netdev);
1173         struct hns3_nic_ring_data *ring_data =
1174                 &tx_ring_data(priv, skb->queue_mapping);
1175         struct hns3_enet_ring *ring = ring_data->ring;
1176         struct netdev_queue *dev_queue;
1177         struct skb_frag_struct *frag;
1178         int next_to_use_head;
1179         int next_to_use_frag;
1180         int buf_num;
1181         int seg_num;
1182         int size;
1183         int ret;
1184         int i;
1185
1186         /* Prefetch the data used later */
1187         prefetch(skb->data);
1188
1189         switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1190         case -EBUSY:
1191                 u64_stats_update_begin(&ring->syncp);
1192                 ring->stats.tx_busy++;
1193                 u64_stats_update_end(&ring->syncp);
1194
1195                 goto out_net_tx_busy;
1196         case -ENOMEM:
1197                 u64_stats_update_begin(&ring->syncp);
1198                 ring->stats.sw_err_cnt++;
1199                 u64_stats_update_end(&ring->syncp);
1200                 netdev_err(netdev, "no memory to xmit!\n");
1201
1202                 goto out_err_tx_ok;
1203         default:
1204                 break;
1205         }
1206
1207         /* No. of segments (plus a header) */
1208         seg_num = skb_shinfo(skb)->nr_frags + 1;
1209         /* Fill the first part */
1210         size = skb_headlen(skb);
1211
1212         next_to_use_head = ring->next_to_use;
1213
1214         ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1215                                   DESC_TYPE_SKB);
1216         if (ret)
1217                 goto head_fill_err;
1218
1219         next_to_use_frag = ring->next_to_use;
1220         /* Fill the fragments */
1221         for (i = 1; i < seg_num; i++) {
1222                 frag = &skb_shinfo(skb)->frags[i - 1];
1223                 size = skb_frag_size(frag);
1224
1225                 ret = priv->ops.fill_desc(ring, frag, size,
1226                                           seg_num - 1 == i ? 1 : 0,
1227                                           DESC_TYPE_PAGE);
1228
1229                 if (ret)
1230                         goto frag_fill_err;
1231         }
1232
1233         /* Complete translate all packets */
1234         dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1235         netdev_tx_sent_queue(dev_queue, skb->len);
1236
1237         wmb(); /* Commit all data before submit */
1238
1239         hnae3_queue_xmit(ring->tqp, buf_num);
1240
1241         return NETDEV_TX_OK;
1242
1243 frag_fill_err:
1244         hns3_clear_desc(ring, next_to_use_frag);
1245
1246 head_fill_err:
1247         hns3_clear_desc(ring, next_to_use_head);
1248
1249 out_err_tx_ok:
1250         dev_kfree_skb_any(skb);
1251         return NETDEV_TX_OK;
1252
1253 out_net_tx_busy:
1254         netif_stop_subqueue(netdev, ring_data->queue_index);
1255         smp_mb(); /* Commit all data before submit */
1256
1257         return NETDEV_TX_BUSY;
1258 }
1259
1260 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1261 {
1262         struct hnae3_handle *h = hns3_get_handle(netdev);
1263         struct sockaddr *mac_addr = p;
1264         int ret;
1265
1266         if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1267                 return -EADDRNOTAVAIL;
1268
1269         if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
1270                 netdev_info(netdev, "already using mac address %pM\n",
1271                             mac_addr->sa_data);
1272                 return 0;
1273         }
1274
1275         ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
1276         if (ret) {
1277                 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1278                 return ret;
1279         }
1280
1281         ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1282
1283         return 0;
1284 }
1285
1286 static int hns3_nic_do_ioctl(struct net_device *netdev,
1287                              struct ifreq *ifr, int cmd)
1288 {
1289         struct hnae3_handle *h = hns3_get_handle(netdev);
1290
1291         if (!netif_running(netdev))
1292                 return -EINVAL;
1293
1294         if (!h->ae_algo->ops->do_ioctl)
1295                 return -EOPNOTSUPP;
1296
1297         return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
1298 }
1299
1300 static int hns3_nic_set_features(struct net_device *netdev,
1301                                  netdev_features_t features)
1302 {
1303         netdev_features_t changed = netdev->features ^ features;
1304         struct hns3_nic_priv *priv = netdev_priv(netdev);
1305         struct hnae3_handle *h = priv->ae_handle;
1306         int ret;
1307
1308         if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1309                 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1310                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1311                 else
1312                         priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1313         }
1314
1315         if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1316             h->ae_algo->ops->enable_vlan_filter) {
1317                 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1318                         h->ae_algo->ops->enable_vlan_filter(h, true);
1319                 else
1320                         h->ae_algo->ops->enable_vlan_filter(h, false);
1321         }
1322
1323         if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1324             h->ae_algo->ops->enable_hw_strip_rxvtag) {
1325                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1326                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1327                 else
1328                         ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1329
1330                 if (ret)
1331                         return ret;
1332         }
1333
1334         if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
1335                 if (features & NETIF_F_NTUPLE)
1336                         h->ae_algo->ops->enable_fd(h, true);
1337                 else
1338                         h->ae_algo->ops->enable_fd(h, false);
1339         }
1340
1341         netdev->features = features;
1342         return 0;
1343 }
1344
1345 static void hns3_nic_get_stats64(struct net_device *netdev,
1346                                  struct rtnl_link_stats64 *stats)
1347 {
1348         struct hns3_nic_priv *priv = netdev_priv(netdev);
1349         int queue_num = priv->ae_handle->kinfo.num_tqps;
1350         struct hnae3_handle *handle = priv->ae_handle;
1351         struct hns3_enet_ring *ring;
1352         unsigned int start;
1353         unsigned int idx;
1354         u64 tx_bytes = 0;
1355         u64 rx_bytes = 0;
1356         u64 tx_pkts = 0;
1357         u64 rx_pkts = 0;
1358         u64 tx_drop = 0;
1359         u64 rx_drop = 0;
1360
1361         if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1362                 return;
1363
1364         handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1365
1366         for (idx = 0; idx < queue_num; idx++) {
1367                 /* fetch the tx stats */
1368                 ring = priv->ring_data[idx].ring;
1369                 do {
1370                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1371                         tx_bytes += ring->stats.tx_bytes;
1372                         tx_pkts += ring->stats.tx_pkts;
1373                         tx_drop += ring->stats.tx_busy;
1374                         tx_drop += ring->stats.sw_err_cnt;
1375                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1376
1377                 /* fetch the rx stats */
1378                 ring = priv->ring_data[idx + queue_num].ring;
1379                 do {
1380                         start = u64_stats_fetch_begin_irq(&ring->syncp);
1381                         rx_bytes += ring->stats.rx_bytes;
1382                         rx_pkts += ring->stats.rx_pkts;
1383                         rx_drop += ring->stats.non_vld_descs;
1384                         rx_drop += ring->stats.err_pkt_len;
1385                         rx_drop += ring->stats.l2_err;
1386                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1387         }
1388
1389         stats->tx_bytes = tx_bytes;
1390         stats->tx_packets = tx_pkts;
1391         stats->rx_bytes = rx_bytes;
1392         stats->rx_packets = rx_pkts;
1393
1394         stats->rx_errors = netdev->stats.rx_errors;
1395         stats->multicast = netdev->stats.multicast;
1396         stats->rx_length_errors = netdev->stats.rx_length_errors;
1397         stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1398         stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1399
1400         stats->tx_errors = netdev->stats.tx_errors;
1401         stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1402         stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1403         stats->collisions = netdev->stats.collisions;
1404         stats->rx_over_errors = netdev->stats.rx_over_errors;
1405         stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1406         stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1407         stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1408         stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1409         stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1410         stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1411         stats->tx_window_errors = netdev->stats.tx_window_errors;
1412         stats->rx_compressed = netdev->stats.rx_compressed;
1413         stats->tx_compressed = netdev->stats.tx_compressed;
1414 }
1415
1416 static int hns3_setup_tc(struct net_device *netdev, void *type_data)
1417 {
1418         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
1419         struct hnae3_handle *h = hns3_get_handle(netdev);
1420         struct hnae3_knic_private_info *kinfo = &h->kinfo;
1421         u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1422         u8 tc = mqprio_qopt->qopt.num_tc;
1423         u16 mode = mqprio_qopt->mode;
1424         u8 hw = mqprio_qopt->qopt.hw;
1425         bool if_running;
1426         int ret;
1427
1428         if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1429                mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1430                 return -EOPNOTSUPP;
1431
1432         if (tc > HNAE3_MAX_TC)
1433                 return -EINVAL;
1434
1435         if (!netdev)
1436                 return -EINVAL;
1437
1438         if_running = netif_running(netdev);
1439         if (if_running) {
1440                 hns3_nic_net_stop(netdev);
1441                 msleep(100);
1442         }
1443
1444         ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1445                 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
1446         if (ret)
1447                 goto out;
1448
1449         ret = hns3_nic_set_real_num_queue(netdev);
1450
1451 out:
1452         if (if_running)
1453                 hns3_nic_net_open(netdev);
1454
1455         return ret;
1456 }
1457
1458 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1459                              void *type_data)
1460 {
1461         if (type != TC_SETUP_QDISC_MQPRIO)
1462                 return -EOPNOTSUPP;
1463
1464         return hns3_setup_tc(dev, type_data);
1465 }
1466
1467 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1468                                 __be16 proto, u16 vid)
1469 {
1470         struct hnae3_handle *h = hns3_get_handle(netdev);
1471         struct hns3_nic_priv *priv = netdev_priv(netdev);
1472         int ret = -EIO;
1473
1474         if (h->ae_algo->ops->set_vlan_filter)
1475                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1476
1477         if (!ret)
1478                 set_bit(vid, priv->active_vlans);
1479
1480         return ret;
1481 }
1482
1483 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1484                                  __be16 proto, u16 vid)
1485 {
1486         struct hnae3_handle *h = hns3_get_handle(netdev);
1487         struct hns3_nic_priv *priv = netdev_priv(netdev);
1488         int ret = -EIO;
1489
1490         if (h->ae_algo->ops->set_vlan_filter)
1491                 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1492
1493         if (!ret)
1494                 clear_bit(vid, priv->active_vlans);
1495
1496         return ret;
1497 }
1498
1499 static int hns3_restore_vlan(struct net_device *netdev)
1500 {
1501         struct hns3_nic_priv *priv = netdev_priv(netdev);
1502         int ret = 0;
1503         u16 vid;
1504
1505         for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1506                 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1507                 if (ret) {
1508                         netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
1509                                    vid, ret);
1510                         return ret;
1511                 }
1512         }
1513
1514         return ret;
1515 }
1516
1517 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1518                                 u8 qos, __be16 vlan_proto)
1519 {
1520         struct hnae3_handle *h = hns3_get_handle(netdev);
1521         int ret = -EIO;
1522
1523         if (h->ae_algo->ops->set_vf_vlan_filter)
1524                 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1525                                                    qos, vlan_proto);
1526
1527         return ret;
1528 }
1529
1530 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1531 {
1532         struct hnae3_handle *h = hns3_get_handle(netdev);
1533         bool if_running = netif_running(netdev);
1534         int ret;
1535
1536         if (!h->ae_algo->ops->set_mtu)
1537                 return -EOPNOTSUPP;
1538
1539         /* if this was called with netdev up then bring netdevice down */
1540         if (if_running) {
1541                 (void)hns3_nic_net_stop(netdev);
1542                 msleep(100);
1543         }
1544
1545         ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1546         if (ret)
1547                 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1548                            ret);
1549         else
1550                 netdev->mtu = new_mtu;
1551
1552         /* if the netdev was running earlier, bring it up again */
1553         if (if_running && hns3_nic_net_open(netdev))
1554                 ret = -EINVAL;
1555
1556         return ret;
1557 }
1558
1559 static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1560 {
1561         struct hns3_nic_priv *priv = netdev_priv(ndev);
1562         struct hns3_enet_ring *tx_ring = NULL;
1563         int timeout_queue = 0;
1564         int hw_head, hw_tail;
1565         int i;
1566
1567         /* Find the stopped queue the same way the stack does */
1568         for (i = 0; i < ndev->real_num_tx_queues; i++) {
1569                 struct netdev_queue *q;
1570                 unsigned long trans_start;
1571
1572                 q = netdev_get_tx_queue(ndev, i);
1573                 trans_start = q->trans_start;
1574                 if (netif_xmit_stopped(q) &&
1575                     time_after(jiffies,
1576                                (trans_start + ndev->watchdog_timeo))) {
1577                         timeout_queue = i;
1578                         break;
1579                 }
1580         }
1581
1582         if (i == ndev->num_tx_queues) {
1583                 netdev_info(ndev,
1584                             "no netdev TX timeout queue found, timeout count: %llu\n",
1585                             priv->tx_timeout_count);
1586                 return false;
1587         }
1588
1589         tx_ring = priv->ring_data[timeout_queue].ring;
1590
1591         hw_head = readl_relaxed(tx_ring->tqp->io_base +
1592                                 HNS3_RING_TX_RING_HEAD_REG);
1593         hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1594                                 HNS3_RING_TX_RING_TAIL_REG);
1595         netdev_info(ndev,
1596                     "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1597                     priv->tx_timeout_count,
1598                     timeout_queue,
1599                     tx_ring->next_to_use,
1600                     tx_ring->next_to_clean,
1601                     hw_head,
1602                     hw_tail,
1603                     readl(tx_ring->tqp_vector->mask_addr));
1604
1605         return true;
1606 }
1607
1608 static void hns3_nic_net_timeout(struct net_device *ndev)
1609 {
1610         struct hns3_nic_priv *priv = netdev_priv(ndev);
1611         struct hnae3_handle *h = priv->ae_handle;
1612
1613         if (!hns3_get_tx_timeo_queue_info(ndev))
1614                 return;
1615
1616         priv->tx_timeout_count++;
1617
1618         if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
1619                 return;
1620
1621         /* request the reset */
1622         if (h->ae_algo->ops->reset_event)
1623                 h->ae_algo->ops->reset_event(h->pdev, h);
1624 }
1625
1626 static const struct net_device_ops hns3_nic_netdev_ops = {
1627         .ndo_open               = hns3_nic_net_open,
1628         .ndo_stop               = hns3_nic_net_stop,
1629         .ndo_start_xmit         = hns3_nic_net_xmit,
1630         .ndo_tx_timeout         = hns3_nic_net_timeout,
1631         .ndo_set_mac_address    = hns3_nic_net_set_mac_address,
1632         .ndo_do_ioctl           = hns3_nic_do_ioctl,
1633         .ndo_change_mtu         = hns3_nic_change_mtu,
1634         .ndo_set_features       = hns3_nic_set_features,
1635         .ndo_get_stats64        = hns3_nic_get_stats64,
1636         .ndo_setup_tc           = hns3_nic_setup_tc,
1637         .ndo_set_rx_mode        = hns3_nic_set_rx_mode,
1638         .ndo_vlan_rx_add_vid    = hns3_vlan_rx_add_vid,
1639         .ndo_vlan_rx_kill_vid   = hns3_vlan_rx_kill_vid,
1640         .ndo_set_vf_vlan        = hns3_ndo_set_vf_vlan,
1641 };
1642
1643 static bool hns3_is_phys_func(struct pci_dev *pdev)
1644 {
1645         u32 dev_id = pdev->device;
1646
1647         switch (dev_id) {
1648         case HNAE3_DEV_ID_GE:
1649         case HNAE3_DEV_ID_25GE:
1650         case HNAE3_DEV_ID_25GE_RDMA:
1651         case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1652         case HNAE3_DEV_ID_50GE_RDMA:
1653         case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1654         case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1655                 return true;
1656         case HNAE3_DEV_ID_100G_VF:
1657         case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1658                 return false;
1659         default:
1660                 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1661                          dev_id);
1662         }
1663
1664         return false;
1665 }
1666
1667 static void hns3_disable_sriov(struct pci_dev *pdev)
1668 {
1669         /* If our VFs are assigned we cannot shut down SR-IOV
1670          * without causing issues, so just leave the hardware
1671          * available but disabled
1672          */
1673         if (pci_vfs_assigned(pdev)) {
1674                 dev_warn(&pdev->dev,
1675                          "disabling driver while VFs are assigned\n");
1676                 return;
1677         }
1678
1679         pci_disable_sriov(pdev);
1680 }
1681
1682 static void hns3_get_dev_capability(struct pci_dev *pdev,
1683                                     struct hnae3_ae_dev *ae_dev)
1684 {
1685         if (pdev->revision >= 0x21)
1686                 hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
1687 }
1688
1689 /* hns3_probe - Device initialization routine
1690  * @pdev: PCI device information struct
1691  * @ent: entry in hns3_pci_tbl
1692  *
1693  * hns3_probe initializes a PF identified by a pci_dev structure.
1694  * The OS initialization, configuring of the PF private structure,
1695  * and a hardware reset occur.
1696  *
1697  * Returns 0 on success, negative on failure
1698  */
1699 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1700 {
1701         struct hnae3_ae_dev *ae_dev;
1702         int ret;
1703
1704         ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1705                               GFP_KERNEL);
1706         if (!ae_dev) {
1707                 ret = -ENOMEM;
1708                 return ret;
1709         }
1710
1711         ae_dev->pdev = pdev;
1712         ae_dev->flag = ent->driver_data;
1713         ae_dev->dev_type = HNAE3_DEV_KNIC;
1714         ae_dev->reset_type = HNAE3_NONE_RESET;
1715         hns3_get_dev_capability(pdev, ae_dev);
1716         pci_set_drvdata(pdev, ae_dev);
1717
1718         hnae3_register_ae_dev(ae_dev);
1719
1720         return 0;
1721 }
1722
1723 /* hns3_remove - Device removal routine
1724  * @pdev: PCI device information struct
1725  */
1726 static void hns3_remove(struct pci_dev *pdev)
1727 {
1728         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1729
1730         if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1731                 hns3_disable_sriov(pdev);
1732
1733         hnae3_unregister_ae_dev(ae_dev);
1734 }
1735
1736 /**
1737  * hns3_pci_sriov_configure
1738  * @pdev: pointer to a pci_dev structure
1739  * @num_vfs: number of VFs to allocate
1740  *
1741  * Enable or change the number of VFs. Called when the user updates the number
1742  * of VFs in sysfs.
1743  **/
1744 static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
1745 {
1746         int ret;
1747
1748         if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
1749                 dev_warn(&pdev->dev, "Can not config SRIOV\n");
1750                 return -EINVAL;
1751         }
1752
1753         if (num_vfs) {
1754                 ret = pci_enable_sriov(pdev, num_vfs);
1755                 if (ret)
1756                         dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1757                 else
1758                         return num_vfs;
1759         } else if (!pci_vfs_assigned(pdev)) {
1760                 pci_disable_sriov(pdev);
1761         } else {
1762                 dev_warn(&pdev->dev,
1763                          "Unable to free VFs because some are assigned to VMs.\n");
1764         }
1765
1766         return 0;
1767 }
1768
1769 static void hns3_shutdown(struct pci_dev *pdev)
1770 {
1771         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1772
1773         hnae3_unregister_ae_dev(ae_dev);
1774         devm_kfree(&pdev->dev, ae_dev);
1775         pci_set_drvdata(pdev, NULL);
1776
1777         if (system_state == SYSTEM_POWER_OFF)
1778                 pci_set_power_state(pdev, PCI_D3hot);
1779 }
1780
1781 static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
1782                                             pci_channel_state_t state)
1783 {
1784         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1785         pci_ers_result_t ret;
1786
1787         dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
1788
1789         if (state == pci_channel_io_perm_failure)
1790                 return PCI_ERS_RESULT_DISCONNECT;
1791
1792         if (!ae_dev) {
1793                 dev_err(&pdev->dev,
1794                         "Can't recover - error happened during device init\n");
1795                 return PCI_ERS_RESULT_NONE;
1796         }
1797
1798         if (ae_dev->ops->process_hw_error)
1799                 ret = ae_dev->ops->process_hw_error(ae_dev);
1800         else
1801                 return PCI_ERS_RESULT_NONE;
1802
1803         return ret;
1804 }
1805
1806 static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
1807 {
1808         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1809         struct device *dev = &pdev->dev;
1810
1811         dev_info(dev, "requesting reset due to PCI error\n");
1812
1813         /* request the reset */
1814         if (ae_dev->ops->reset_event) {
1815                 ae_dev->ops->reset_event(pdev, NULL);
1816                 return PCI_ERS_RESULT_RECOVERED;
1817         }
1818
1819         return PCI_ERS_RESULT_DISCONNECT;
1820 }
1821
1822 static const struct pci_error_handlers hns3_err_handler = {
1823         .error_detected = hns3_error_detected,
1824         .slot_reset     = hns3_slot_reset,
1825 };
1826
1827 static struct pci_driver hns3_driver = {
1828         .name     = hns3_driver_name,
1829         .id_table = hns3_pci_tbl,
1830         .probe    = hns3_probe,
1831         .remove   = hns3_remove,
1832         .shutdown = hns3_shutdown,
1833         .sriov_configure = hns3_pci_sriov_configure,
1834         .err_handler    = &hns3_err_handler,
1835 };
1836
1837 /* set default feature to hns3 */
1838 static void hns3_set_default_feature(struct net_device *netdev)
1839 {
1840         struct hnae3_handle *h = hns3_get_handle(netdev);
1841         struct pci_dev *pdev = h->pdev;
1842
1843         netdev->priv_flags |= IFF_UNICAST_FLT;
1844
1845         netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1846                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1847                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1848                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1849                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1850
1851         netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1852
1853         netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1854
1855         netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1856                 NETIF_F_HW_VLAN_CTAG_FILTER |
1857                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1858                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1859                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1860                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1861                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1862
1863         netdev->vlan_features |=
1864                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1865                 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1866                 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1867                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1868                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1869
1870         netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1871                 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
1872                 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1873                 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1874                 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1875                 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
1876
1877         if (pdev->revision >= 0x21) {
1878                 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1879
1880                 if (!(h->flags & HNAE3_SUPPORT_VF)) {
1881                         netdev->hw_features |= NETIF_F_NTUPLE;
1882                         netdev->features |= NETIF_F_NTUPLE;
1883                 }
1884         }
1885 }
1886
1887 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1888                              struct hns3_desc_cb *cb)
1889 {
1890         unsigned int order = hnae3_page_order(ring);
1891         struct page *p;
1892
1893         p = dev_alloc_pages(order);
1894         if (!p)
1895                 return -ENOMEM;
1896
1897         cb->priv = p;
1898         cb->page_offset = 0;
1899         cb->reuse_flag = 0;
1900         cb->buf  = page_address(p);
1901         cb->length = hnae3_page_size(ring);
1902         cb->type = DESC_TYPE_PAGE;
1903
1904         return 0;
1905 }
1906
1907 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1908                              struct hns3_desc_cb *cb)
1909 {
1910         if (cb->type == DESC_TYPE_SKB)
1911                 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1912         else if (!HNAE3_IS_TX_RING(ring))
1913                 put_page((struct page *)cb->priv);
1914         memset(cb, 0, sizeof(*cb));
1915 }
1916
1917 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1918 {
1919         cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1920                                cb->length, ring_to_dma_dir(ring));
1921
1922         if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
1923                 return -EIO;
1924
1925         return 0;
1926 }
1927
1928 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1929                               struct hns3_desc_cb *cb)
1930 {
1931         if (cb->type == DESC_TYPE_SKB)
1932                 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1933                                  ring_to_dma_dir(ring));
1934         else if (cb->length)
1935                 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1936                                ring_to_dma_dir(ring));
1937 }
1938
1939 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1940 {
1941         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1942         ring->desc[i].addr = 0;
1943 }
1944
1945 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1946 {
1947         struct hns3_desc_cb *cb = &ring->desc_cb[i];
1948
1949         if (!ring->desc_cb[i].dma)
1950                 return;
1951
1952         hns3_buffer_detach(ring, i);
1953         hns3_free_buffer(ring, cb);
1954 }
1955
1956 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1957 {
1958         int i;
1959
1960         for (i = 0; i < ring->desc_num; i++)
1961                 hns3_free_buffer_detach(ring, i);
1962 }
1963
1964 /* free desc along with its attached buffer */
1965 static void hns3_free_desc(struct hns3_enet_ring *ring)
1966 {
1967         int size = ring->desc_num * sizeof(ring->desc[0]);
1968
1969         hns3_free_buffers(ring);
1970
1971         if (ring->desc) {
1972                 dma_free_coherent(ring_to_dev(ring), size,
1973                                   ring->desc, ring->desc_dma_addr);
1974                 ring->desc = NULL;
1975         }
1976 }
1977
1978 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1979 {
1980         int size = ring->desc_num * sizeof(ring->desc[0]);
1981
1982         ring->desc = dma_zalloc_coherent(ring_to_dev(ring), size,
1983                                          &ring->desc_dma_addr,
1984                                          GFP_KERNEL);
1985         if (!ring->desc)
1986                 return -ENOMEM;
1987
1988         return 0;
1989 }
1990
1991 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1992                                    struct hns3_desc_cb *cb)
1993 {
1994         int ret;
1995
1996         ret = hns3_alloc_buffer(ring, cb);
1997         if (ret)
1998                 goto out;
1999
2000         ret = hns3_map_buffer(ring, cb);
2001         if (ret)
2002                 goto out_with_buf;
2003
2004         return 0;
2005
2006 out_with_buf:
2007         hns3_free_buffer(ring, cb);
2008 out:
2009         return ret;
2010 }
2011
2012 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
2013 {
2014         int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
2015
2016         if (ret)
2017                 return ret;
2018
2019         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2020
2021         return 0;
2022 }
2023
2024 /* Allocate memory for raw pkg, and map with dma */
2025 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
2026 {
2027         int i, j, ret;
2028
2029         for (i = 0; i < ring->desc_num; i++) {
2030                 ret = hns3_alloc_buffer_attach(ring, i);
2031                 if (ret)
2032                         goto out_buffer_fail;
2033         }
2034
2035         return 0;
2036
2037 out_buffer_fail:
2038         for (j = i - 1; j >= 0; j--)
2039                 hns3_free_buffer_detach(ring, j);
2040         return ret;
2041 }
2042
2043 /* detach a in-used buffer and replace with a reserved one  */
2044 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
2045                                 struct hns3_desc_cb *res_cb)
2046 {
2047         hns3_unmap_buffer(ring, &ring->desc_cb[i]);
2048         ring->desc_cb[i] = *res_cb;
2049         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
2050         ring->desc[i].rx.bd_base_info = 0;
2051 }
2052
2053 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
2054 {
2055         ring->desc_cb[i].reuse_flag = 0;
2056         ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
2057                 + ring->desc_cb[i].page_offset);
2058         ring->desc[i].rx.bd_base_info = 0;
2059 }
2060
2061 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
2062                                       int *pkts)
2063 {
2064         struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
2065
2066         (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
2067         (*bytes) += desc_cb->length;
2068         /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
2069         hns3_free_buffer_detach(ring, ring->next_to_clean);
2070
2071         ring_ptr_move_fw(ring, next_to_clean);
2072 }
2073
2074 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
2075 {
2076         int u = ring->next_to_use;
2077         int c = ring->next_to_clean;
2078
2079         if (unlikely(h > ring->desc_num))
2080                 return 0;
2081
2082         return u > c ? (h > c && h <= u) : (h > c || h <= u);
2083 }
2084
2085 void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
2086 {
2087         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2088         struct hns3_nic_priv *priv = netdev_priv(netdev);
2089         struct netdev_queue *dev_queue;
2090         int bytes, pkts;
2091         int head;
2092
2093         head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
2094         rmb(); /* Make sure head is ready before touch any data */
2095
2096         if (is_ring_empty(ring) || head == ring->next_to_clean)
2097                 return; /* no data to poll */
2098
2099         if (unlikely(!is_valid_clean_head(ring, head))) {
2100                 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
2101                            ring->next_to_use, ring->next_to_clean);
2102
2103                 u64_stats_update_begin(&ring->syncp);
2104                 ring->stats.io_err_cnt++;
2105                 u64_stats_update_end(&ring->syncp);
2106                 return;
2107         }
2108
2109         bytes = 0;
2110         pkts = 0;
2111         while (head != ring->next_to_clean) {
2112                 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
2113                 /* Issue prefetch for next Tx descriptor */
2114                 prefetch(&ring->desc_cb[ring->next_to_clean]);
2115         }
2116
2117         ring->tqp_vector->tx_group.total_bytes += bytes;
2118         ring->tqp_vector->tx_group.total_packets += pkts;
2119
2120         u64_stats_update_begin(&ring->syncp);
2121         ring->stats.tx_bytes += bytes;
2122         ring->stats.tx_pkts += pkts;
2123         u64_stats_update_end(&ring->syncp);
2124
2125         dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
2126         netdev_tx_completed_queue(dev_queue, pkts, bytes);
2127
2128         if (unlikely(pkts && netif_carrier_ok(netdev) &&
2129                      (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
2130                 /* Make sure that anybody stopping the queue after this
2131                  * sees the new next_to_clean.
2132                  */
2133                 smp_mb();
2134                 if (netif_tx_queue_stopped(dev_queue) &&
2135                     !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
2136                         netif_tx_wake_queue(dev_queue);
2137                         ring->stats.restart_queue++;
2138                 }
2139         }
2140 }
2141
2142 static int hns3_desc_unused(struct hns3_enet_ring *ring)
2143 {
2144         int ntc = ring->next_to_clean;
2145         int ntu = ring->next_to_use;
2146
2147         return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
2148 }
2149
2150 static void
2151 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
2152 {
2153         struct hns3_desc_cb *desc_cb;
2154         struct hns3_desc_cb res_cbs;
2155         int i, ret;
2156
2157         for (i = 0; i < cleand_count; i++) {
2158                 desc_cb = &ring->desc_cb[ring->next_to_use];
2159                 if (desc_cb->reuse_flag) {
2160                         u64_stats_update_begin(&ring->syncp);
2161                         ring->stats.reuse_pg_cnt++;
2162                         u64_stats_update_end(&ring->syncp);
2163
2164                         hns3_reuse_buffer(ring, ring->next_to_use);
2165                 } else {
2166                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
2167                         if (ret) {
2168                                 u64_stats_update_begin(&ring->syncp);
2169                                 ring->stats.sw_err_cnt++;
2170                                 u64_stats_update_end(&ring->syncp);
2171
2172                                 netdev_err(ring->tqp->handle->kinfo.netdev,
2173                                            "hnae reserve buffer map failed.\n");
2174                                 break;
2175                         }
2176                         hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
2177                 }
2178
2179                 ring_ptr_move_fw(ring, next_to_use);
2180         }
2181
2182         wmb(); /* Make all data has been write before submit */
2183         writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
2184 }
2185
2186 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2187                                 struct hns3_enet_ring *ring, int pull_len,
2188                                 struct hns3_desc_cb *desc_cb)
2189 {
2190         struct hns3_desc *desc;
2191         u32 truesize;
2192         int size;
2193         int last_offset;
2194         bool twobufs;
2195
2196         twobufs = ((PAGE_SIZE < 8192) &&
2197                 hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2198
2199         desc = &ring->desc[ring->next_to_clean];
2200         size = le16_to_cpu(desc->rx.size);
2201
2202         truesize = hnae3_buf_size(ring);
2203
2204         if (!twobufs)
2205                 last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
2206
2207         skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2208                         size - pull_len, truesize);
2209
2210          /* Avoid re-using remote pages,flag default unreuse */
2211         if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2212                 return;
2213
2214         if (twobufs) {
2215                 /* If we are only owner of page we can reuse it */
2216                 if (likely(page_count(desc_cb->priv) == 1)) {
2217                         /* Flip page offset to other buffer */
2218                         desc_cb->page_offset ^= truesize;
2219
2220                         desc_cb->reuse_flag = 1;
2221                         /* bump ref count on page before it is given*/
2222                         get_page(desc_cb->priv);
2223                 }
2224                 return;
2225         }
2226
2227         /* Move offset up to the next cache line */
2228         desc_cb->page_offset += truesize;
2229
2230         if (desc_cb->page_offset <= last_offset) {
2231                 desc_cb->reuse_flag = 1;
2232                 /* Bump ref count on page before it is given*/
2233                 get_page(desc_cb->priv);
2234         }
2235 }
2236
2237 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2238                              struct hns3_desc *desc)
2239 {
2240         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2241         int l3_type, l4_type;
2242         u32 bd_base_info;
2243         int ol4_type;
2244         u32 l234info;
2245
2246         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2247         l234info = le32_to_cpu(desc->rx.l234_info);
2248
2249         skb->ip_summed = CHECKSUM_NONE;
2250
2251         skb_checksum_none_assert(skb);
2252
2253         if (!(netdev->features & NETIF_F_RXCSUM))
2254                 return;
2255
2256         /* check if hardware has done checksum */
2257         if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2258                 return;
2259
2260         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L3E_B) ||
2261                      hnae3_get_bit(l234info, HNS3_RXD_L4E_B) ||
2262                      hnae3_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2263                      hnae3_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2264                 u64_stats_update_begin(&ring->syncp);
2265                 ring->stats.l3l4_csum_err++;
2266                 u64_stats_update_end(&ring->syncp);
2267
2268                 return;
2269         }
2270
2271         l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
2272                                   HNS3_RXD_L3ID_S);
2273         l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
2274                                   HNS3_RXD_L4ID_S);
2275
2276         ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
2277                                    HNS3_RXD_OL4ID_S);
2278         switch (ol4_type) {
2279         case HNS3_OL4_TYPE_MAC_IN_UDP:
2280         case HNS3_OL4_TYPE_NVGRE:
2281                 skb->csum_level = 1;
2282                 /* fall through */
2283         case HNS3_OL4_TYPE_NO_TUN:
2284                 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2285                 if ((l3_type == HNS3_L3_TYPE_IPV4 ||
2286                      l3_type == HNS3_L3_TYPE_IPV6) &&
2287                     (l4_type == HNS3_L4_TYPE_UDP ||
2288                      l4_type == HNS3_L4_TYPE_TCP ||
2289                      l4_type == HNS3_L4_TYPE_SCTP))
2290                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2291                 break;
2292         default:
2293                 break;
2294         }
2295 }
2296
2297 static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2298 {
2299         napi_gro_receive(&ring->tqp_vector->napi, skb);
2300 }
2301
2302 static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
2303                                 struct hns3_desc *desc, u32 l234info,
2304                                 u16 *vlan_tag)
2305 {
2306         struct pci_dev *pdev = ring->tqp->handle->pdev;
2307
2308         if (pdev->revision == 0x20) {
2309                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2310                 if (!(*vlan_tag & VLAN_VID_MASK))
2311                         *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2312
2313                 return (*vlan_tag != 0);
2314         }
2315
2316 #define HNS3_STRP_OUTER_VLAN    0x1
2317 #define HNS3_STRP_INNER_VLAN    0x2
2318
2319         switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
2320                                 HNS3_RXD_STRP_TAGP_S)) {
2321         case HNS3_STRP_OUTER_VLAN:
2322                 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2323                 return true;
2324         case HNS3_STRP_INNER_VLAN:
2325                 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2326                 return true;
2327         default:
2328                 return false;
2329         }
2330 }
2331
2332 static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
2333                                      struct sk_buff *skb)
2334 {
2335         struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
2336         struct hnae3_handle *handle = ring->tqp->handle;
2337         enum pkt_hash_types rss_type;
2338
2339         if (le32_to_cpu(desc->rx.rss_hash))
2340                 rss_type = handle->kinfo.rss_type;
2341         else
2342                 rss_type = PKT_HASH_TYPE_NONE;
2343
2344         skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
2345 }
2346
2347 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2348                              struct sk_buff **out_skb, int *out_bnum)
2349 {
2350         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2351         struct hns3_desc_cb *desc_cb;
2352         struct hns3_desc *desc;
2353         struct sk_buff *skb;
2354         unsigned char *va;
2355         u32 bd_base_info;
2356         int pull_len;
2357         u32 l234info;
2358         int length;
2359         int bnum;
2360
2361         desc = &ring->desc[ring->next_to_clean];
2362         desc_cb = &ring->desc_cb[ring->next_to_clean];
2363
2364         prefetch(desc);
2365
2366         length = le16_to_cpu(desc->rx.size);
2367         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2368
2369         /* Check valid BD */
2370         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
2371                 return -EFAULT;
2372
2373         va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2374
2375         /* Prefetch first cache line of first page
2376          * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2377          * line size is 64B so need to prefetch twice to make it 128B. But in
2378          * actual we can have greater size of caches with 128B Level 1 cache
2379          * lines. In such a case, single fetch would suffice to cache in the
2380          * relevant part of the header.
2381          */
2382         prefetch(va);
2383 #if L1_CACHE_BYTES < 128
2384         prefetch(va + L1_CACHE_BYTES);
2385 #endif
2386
2387         skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2388                                         HNS3_RX_HEAD_SIZE);
2389         if (unlikely(!skb)) {
2390                 netdev_err(netdev, "alloc rx skb fail\n");
2391
2392                 u64_stats_update_begin(&ring->syncp);
2393                 ring->stats.sw_err_cnt++;
2394                 u64_stats_update_end(&ring->syncp);
2395
2396                 return -ENOMEM;
2397         }
2398
2399         prefetchw(skb->data);
2400
2401         bnum = 1;
2402         if (length <= HNS3_RX_HEAD_SIZE) {
2403                 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2404
2405                 /* We can reuse buffer as-is, just make sure it is local */
2406                 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2407                         desc_cb->reuse_flag = 1;
2408                 else /* This page cannot be reused so discard it */
2409                         put_page(desc_cb->priv);
2410
2411                 ring_ptr_move_fw(ring, next_to_clean);
2412         } else {
2413                 u64_stats_update_begin(&ring->syncp);
2414                 ring->stats.seg_pkt_cnt++;
2415                 u64_stats_update_end(&ring->syncp);
2416
2417                 pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
2418
2419                 memcpy(__skb_put(skb, pull_len), va,
2420                        ALIGN(pull_len, sizeof(long)));
2421
2422                 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2423                 ring_ptr_move_fw(ring, next_to_clean);
2424
2425                 while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2426                         desc = &ring->desc[ring->next_to_clean];
2427                         desc_cb = &ring->desc_cb[ring->next_to_clean];
2428                         bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2429                         hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2430                         ring_ptr_move_fw(ring, next_to_clean);
2431                         bnum++;
2432                 }
2433         }
2434
2435         *out_bnum = bnum;
2436
2437         l234info = le32_to_cpu(desc->rx.l234_info);
2438
2439         /* Based on hw strategy, the tag offloaded will be stored at
2440          * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2441          * in one layer tag case.
2442          */
2443         if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2444                 u16 vlan_tag;
2445
2446                 if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
2447                         __vlan_hwaccel_put_tag(skb,
2448                                                htons(ETH_P_8021Q),
2449                                                vlan_tag);
2450         }
2451
2452         if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2453                 u64_stats_update_begin(&ring->syncp);
2454                 ring->stats.non_vld_descs++;
2455                 u64_stats_update_end(&ring->syncp);
2456
2457                 dev_kfree_skb_any(skb);
2458                 return -EINVAL;
2459         }
2460
2461         if (unlikely((!desc->rx.pkt_len) ||
2462                      hnae3_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2463                 u64_stats_update_begin(&ring->syncp);
2464                 ring->stats.err_pkt_len++;
2465                 u64_stats_update_end(&ring->syncp);
2466
2467                 dev_kfree_skb_any(skb);
2468                 return -EFAULT;
2469         }
2470
2471         if (unlikely(hnae3_get_bit(l234info, HNS3_RXD_L2E_B))) {
2472                 u64_stats_update_begin(&ring->syncp);
2473                 ring->stats.l2_err++;
2474                 u64_stats_update_end(&ring->syncp);
2475
2476                 dev_kfree_skb_any(skb);
2477                 return -EFAULT;
2478         }
2479
2480         u64_stats_update_begin(&ring->syncp);
2481         ring->stats.rx_pkts++;
2482         ring->stats.rx_bytes += skb->len;
2483         u64_stats_update_end(&ring->syncp);
2484
2485         ring->tqp_vector->rx_group.total_bytes += skb->len;
2486
2487         hns3_rx_checksum(ring, skb, desc);
2488         hns3_set_rx_skb_rss_type(ring, skb);
2489
2490         return 0;
2491 }
2492
2493 int hns3_clean_rx_ring(
2494                 struct hns3_enet_ring *ring, int budget,
2495                 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
2496 {
2497 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2498         struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2499         int recv_pkts, recv_bds, clean_count, err;
2500         int unused_count = hns3_desc_unused(ring);
2501         struct sk_buff *skb = NULL;
2502         int num, bnum = 0;
2503
2504         num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2505         rmb(); /* Make sure num taken effect before the other data is touched */
2506
2507         recv_pkts = 0, recv_bds = 0, clean_count = 0;
2508         num -= unused_count;
2509
2510         while (recv_pkts < budget && recv_bds < num) {
2511                 /* Reuse or realloc buffers */
2512                 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2513                         hns3_nic_alloc_rx_buffers(ring,
2514                                                   clean_count + unused_count);
2515                         clean_count = 0;
2516                         unused_count = hns3_desc_unused(ring);
2517                 }
2518
2519                 /* Poll one pkt */
2520                 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2521                 if (unlikely(!skb)) /* This fault cannot be repaired */
2522                         goto out;
2523
2524                 recv_bds += bnum;
2525                 clean_count += bnum;
2526                 if (unlikely(err)) {  /* Do jump the err */
2527                         recv_pkts++;
2528                         continue;
2529                 }
2530
2531                 /* Do update ip stack process */
2532                 skb->protocol = eth_type_trans(skb, netdev);
2533                 rx_fn(ring, skb);
2534
2535                 recv_pkts++;
2536         }
2537
2538 out:
2539         /* Make all data has been write before submit */
2540         if (clean_count + unused_count > 0)
2541                 hns3_nic_alloc_rx_buffers(ring,
2542                                           clean_count + unused_count);
2543
2544         return recv_pkts;
2545 }
2546
2547 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2548 {
2549         struct hns3_enet_tqp_vector *tqp_vector =
2550                                         ring_group->ring->tqp_vector;
2551         enum hns3_flow_level_range new_flow_level;
2552         int packets_per_msecs;
2553         int bytes_per_msecs;
2554         u32 time_passed_ms;
2555         u16 new_int_gl;
2556
2557         if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
2558                 return false;
2559
2560         if (ring_group->total_packets == 0) {
2561                 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2562                 ring_group->coal.flow_level = HNS3_FLOW_LOW;
2563                 return true;
2564         }
2565
2566         /* Simple throttlerate management
2567          * 0-10MB/s   lower     (50000 ints/s)
2568          * 10-20MB/s   middle    (20000 ints/s)
2569          * 20-1249MB/s high      (18000 ints/s)
2570          * > 40000pps  ultra     (8000 ints/s)
2571          */
2572         new_flow_level = ring_group->coal.flow_level;
2573         new_int_gl = ring_group->coal.int_gl;
2574         time_passed_ms =
2575                 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2576
2577         if (!time_passed_ms)
2578                 return false;
2579
2580         do_div(ring_group->total_packets, time_passed_ms);
2581         packets_per_msecs = ring_group->total_packets;
2582
2583         do_div(ring_group->total_bytes, time_passed_ms);
2584         bytes_per_msecs = ring_group->total_bytes;
2585
2586 #define HNS3_RX_LOW_BYTE_RATE 10000
2587 #define HNS3_RX_MID_BYTE_RATE 20000
2588
2589         switch (new_flow_level) {
2590         case HNS3_FLOW_LOW:
2591                 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
2592                         new_flow_level = HNS3_FLOW_MID;
2593                 break;
2594         case HNS3_FLOW_MID:
2595                 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
2596                         new_flow_level = HNS3_FLOW_HIGH;
2597                 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
2598                         new_flow_level = HNS3_FLOW_LOW;
2599                 break;
2600         case HNS3_FLOW_HIGH:
2601         case HNS3_FLOW_ULTRA:
2602         default:
2603                 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
2604                         new_flow_level = HNS3_FLOW_MID;
2605                 break;
2606         }
2607
2608 #define HNS3_RX_ULTRA_PACKET_RATE 40
2609
2610         if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2611             &tqp_vector->rx_group == ring_group)
2612                 new_flow_level = HNS3_FLOW_ULTRA;
2613
2614         switch (new_flow_level) {
2615         case HNS3_FLOW_LOW:
2616                 new_int_gl = HNS3_INT_GL_50K;
2617                 break;
2618         case HNS3_FLOW_MID:
2619                 new_int_gl = HNS3_INT_GL_20K;
2620                 break;
2621         case HNS3_FLOW_HIGH:
2622                 new_int_gl = HNS3_INT_GL_18K;
2623                 break;
2624         case HNS3_FLOW_ULTRA:
2625                 new_int_gl = HNS3_INT_GL_8K;
2626                 break;
2627         default:
2628                 break;
2629         }
2630
2631         ring_group->total_bytes = 0;
2632         ring_group->total_packets = 0;
2633         ring_group->coal.flow_level = new_flow_level;
2634         if (new_int_gl != ring_group->coal.int_gl) {
2635                 ring_group->coal.int_gl = new_int_gl;
2636                 return true;
2637         }
2638         return false;
2639 }
2640
2641 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2642 {
2643         struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2644         struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2645         bool rx_update, tx_update;
2646
2647         if (tqp_vector->int_adapt_down > 0) {
2648                 tqp_vector->int_adapt_down--;
2649                 return;
2650         }
2651
2652         if (rx_group->coal.gl_adapt_enable) {
2653                 rx_update = hns3_get_new_int_gl(rx_group);
2654                 if (rx_update)
2655                         hns3_set_vector_coalesce_rx_gl(tqp_vector,
2656                                                        rx_group->coal.int_gl);
2657         }
2658
2659         if (tx_group->coal.gl_adapt_enable) {
2660                 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2661                 if (tx_update)
2662                         hns3_set_vector_coalesce_tx_gl(tqp_vector,
2663                                                        tx_group->coal.int_gl);
2664         }
2665
2666         tqp_vector->last_jiffies = jiffies;
2667         tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
2668 }
2669
2670 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2671 {
2672         struct hns3_enet_ring *ring;
2673         int rx_pkt_total = 0;
2674
2675         struct hns3_enet_tqp_vector *tqp_vector =
2676                 container_of(napi, struct hns3_enet_tqp_vector, napi);
2677         bool clean_complete = true;
2678         int rx_budget;
2679
2680         /* Since the actual Tx work is minimal, we can give the Tx a larger
2681          * budget and be more aggressive about cleaning up the Tx descriptors.
2682          */
2683         hns3_for_each_ring(ring, tqp_vector->tx_group)
2684                 hns3_clean_tx_ring(ring);
2685
2686         /* make sure rx ring budget not smaller than 1 */
2687         rx_budget = max(budget / tqp_vector->num_tqps, 1);
2688
2689         hns3_for_each_ring(ring, tqp_vector->rx_group) {
2690                 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2691                                                     hns3_rx_skb);
2692
2693                 if (rx_cleaned >= rx_budget)
2694                         clean_complete = false;
2695
2696                 rx_pkt_total += rx_cleaned;
2697         }
2698
2699         tqp_vector->rx_group.total_packets += rx_pkt_total;
2700
2701         if (!clean_complete)
2702                 return budget;
2703
2704         napi_complete(napi);
2705         hns3_update_new_int_gl(tqp_vector);
2706         hns3_mask_vector_irq(tqp_vector, 1);
2707
2708         return rx_pkt_total;
2709 }
2710
2711 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2712                                       struct hnae3_ring_chain_node *head)
2713 {
2714         struct pci_dev *pdev = tqp_vector->handle->pdev;
2715         struct hnae3_ring_chain_node *cur_chain = head;
2716         struct hnae3_ring_chain_node *chain;
2717         struct hns3_enet_ring *tx_ring;
2718         struct hns3_enet_ring *rx_ring;
2719
2720         tx_ring = tqp_vector->tx_group.ring;
2721         if (tx_ring) {
2722                 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2723                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2724                               HNAE3_RING_TYPE_TX);
2725                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2726                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
2727
2728                 cur_chain->next = NULL;
2729
2730                 while (tx_ring->next) {
2731                         tx_ring = tx_ring->next;
2732
2733                         chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2734                                              GFP_KERNEL);
2735                         if (!chain)
2736                                 goto err_free_chain;
2737
2738                         cur_chain->next = chain;
2739                         chain->tqp_index = tx_ring->tqp->tqp_index;
2740                         hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2741                                       HNAE3_RING_TYPE_TX);
2742                         hnae3_set_field(chain->int_gl_idx,
2743                                         HNAE3_RING_GL_IDX_M,
2744                                         HNAE3_RING_GL_IDX_S,
2745                                         HNAE3_RING_GL_TX);
2746
2747                         cur_chain = chain;
2748                 }
2749         }
2750
2751         rx_ring = tqp_vector->rx_group.ring;
2752         if (!tx_ring && rx_ring) {
2753                 cur_chain->next = NULL;
2754                 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2755                 hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2756                               HNAE3_RING_TYPE_RX);
2757                 hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2758                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2759
2760                 rx_ring = rx_ring->next;
2761         }
2762
2763         while (rx_ring) {
2764                 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2765                 if (!chain)
2766                         goto err_free_chain;
2767
2768                 cur_chain->next = chain;
2769                 chain->tqp_index = rx_ring->tqp->tqp_index;
2770                 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2771                               HNAE3_RING_TYPE_RX);
2772                 hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2773                                 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2774
2775                 cur_chain = chain;
2776
2777                 rx_ring = rx_ring->next;
2778         }
2779
2780         return 0;
2781
2782 err_free_chain:
2783         cur_chain = head->next;
2784         while (cur_chain) {
2785                 chain = cur_chain->next;
2786                 devm_kfree(&pdev->dev, chain);
2787                 cur_chain = chain;
2788         }
2789
2790         return -ENOMEM;
2791 }
2792
2793 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2794                                         struct hnae3_ring_chain_node *head)
2795 {
2796         struct pci_dev *pdev = tqp_vector->handle->pdev;
2797         struct hnae3_ring_chain_node *chain_tmp, *chain;
2798
2799         chain = head->next;
2800
2801         while (chain) {
2802                 chain_tmp = chain->next;
2803                 devm_kfree(&pdev->dev, chain);
2804                 chain = chain_tmp;
2805         }
2806 }
2807
2808 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2809                                    struct hns3_enet_ring *ring)
2810 {
2811         ring->next = group->ring;
2812         group->ring = ring;
2813
2814         group->count++;
2815 }
2816
2817 static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
2818 {
2819         struct pci_dev *pdev = priv->ae_handle->pdev;
2820         struct hns3_enet_tqp_vector *tqp_vector;
2821         int num_vectors = priv->vector_num;
2822         int numa_node;
2823         int vector_i;
2824
2825         numa_node = dev_to_node(&pdev->dev);
2826
2827         for (vector_i = 0; vector_i < num_vectors; vector_i++) {
2828                 tqp_vector = &priv->tqp_vector[vector_i];
2829                 cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
2830                                 &tqp_vector->affinity_mask);
2831         }
2832 }
2833
2834 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2835 {
2836         struct hnae3_ring_chain_node vector_ring_chain;
2837         struct hnae3_handle *h = priv->ae_handle;
2838         struct hns3_enet_tqp_vector *tqp_vector;
2839         int ret = 0;
2840         int i;
2841
2842         hns3_nic_set_cpumask(priv);
2843
2844         for (i = 0; i < priv->vector_num; i++) {
2845                 tqp_vector = &priv->tqp_vector[i];
2846                 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2847                 tqp_vector->num_tqps = 0;
2848         }
2849
2850         for (i = 0; i < h->kinfo.num_tqps; i++) {
2851                 u16 vector_i = i % priv->vector_num;
2852                 u16 tqp_num = h->kinfo.num_tqps;
2853
2854                 tqp_vector = &priv->tqp_vector[vector_i];
2855
2856                 hns3_add_ring_to_group(&tqp_vector->tx_group,
2857                                        priv->ring_data[i].ring);
2858
2859                 hns3_add_ring_to_group(&tqp_vector->rx_group,
2860                                        priv->ring_data[i + tqp_num].ring);
2861
2862                 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2863                 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2864                 tqp_vector->num_tqps++;
2865         }
2866
2867         for (i = 0; i < priv->vector_num; i++) {
2868                 tqp_vector = &priv->tqp_vector[i];
2869
2870                 tqp_vector->rx_group.total_bytes = 0;
2871                 tqp_vector->rx_group.total_packets = 0;
2872                 tqp_vector->tx_group.total_bytes = 0;
2873                 tqp_vector->tx_group.total_packets = 0;
2874                 tqp_vector->handle = h;
2875
2876                 ret = hns3_get_vector_ring_chain(tqp_vector,
2877                                                  &vector_ring_chain);
2878                 if (ret)
2879                         return ret;
2880
2881                 ret = h->ae_algo->ops->map_ring_to_vector(h,
2882                         tqp_vector->vector_irq, &vector_ring_chain);
2883
2884                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2885
2886                 if (ret)
2887                         goto map_ring_fail;
2888
2889                 netif_napi_add(priv->netdev, &tqp_vector->napi,
2890                                hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2891         }
2892
2893         return 0;
2894
2895 map_ring_fail:
2896         while (i--)
2897                 netif_napi_del(&priv->tqp_vector[i].napi);
2898
2899         return ret;
2900 }
2901
2902 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2903 {
2904         struct hnae3_handle *h = priv->ae_handle;
2905         struct hns3_enet_tqp_vector *tqp_vector;
2906         struct hnae3_vector_info *vector;
2907         struct pci_dev *pdev = h->pdev;
2908         u16 tqp_num = h->kinfo.num_tqps;
2909         u16 vector_num;
2910         int ret = 0;
2911         u16 i;
2912
2913         /* RSS size, cpu online and vector_num should be the same */
2914         /* Should consider 2p/4p later */
2915         vector_num = min_t(u16, num_online_cpus(), tqp_num);
2916         vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2917                               GFP_KERNEL);
2918         if (!vector)
2919                 return -ENOMEM;
2920
2921         vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2922
2923         priv->vector_num = vector_num;
2924         priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2925                 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2926                              GFP_KERNEL);
2927         if (!priv->tqp_vector) {
2928                 ret = -ENOMEM;
2929                 goto out;
2930         }
2931
2932         for (i = 0; i < priv->vector_num; i++) {
2933                 tqp_vector = &priv->tqp_vector[i];
2934                 tqp_vector->idx = i;
2935                 tqp_vector->mask_addr = vector[i].io_addr;
2936                 tqp_vector->vector_irq = vector[i].vector;
2937                 hns3_vector_gl_rl_init(tqp_vector, priv);
2938         }
2939
2940 out:
2941         devm_kfree(&pdev->dev, vector);
2942         return ret;
2943 }
2944
2945 static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2946 {
2947         group->ring = NULL;
2948         group->count = 0;
2949 }
2950
2951 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2952 {
2953         struct hnae3_ring_chain_node vector_ring_chain;
2954         struct hnae3_handle *h = priv->ae_handle;
2955         struct hns3_enet_tqp_vector *tqp_vector;
2956         int i, ret;
2957
2958         for (i = 0; i < priv->vector_num; i++) {
2959                 tqp_vector = &priv->tqp_vector[i];
2960
2961                 ret = hns3_get_vector_ring_chain(tqp_vector,
2962                                                  &vector_ring_chain);
2963                 if (ret)
2964                         return ret;
2965
2966                 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2967                         tqp_vector->vector_irq, &vector_ring_chain);
2968                 if (ret)
2969                         return ret;
2970
2971                 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2972
2973                 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2974                         (void)irq_set_affinity_hint(
2975                                 priv->tqp_vector[i].vector_irq,
2976                                                     NULL);
2977                         free_irq(priv->tqp_vector[i].vector_irq,
2978                                  &priv->tqp_vector[i]);
2979                 }
2980
2981                 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2982                 hns3_clear_ring_group(&tqp_vector->rx_group);
2983                 hns3_clear_ring_group(&tqp_vector->tx_group);
2984                 netif_napi_del(&priv->tqp_vector[i].napi);
2985         }
2986
2987         return 0;
2988 }
2989
2990 static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2991 {
2992         struct hnae3_handle *h = priv->ae_handle;
2993         struct pci_dev *pdev = h->pdev;
2994         int i, ret;
2995
2996         for (i = 0; i < priv->vector_num; i++) {
2997                 struct hns3_enet_tqp_vector *tqp_vector;
2998
2999                 tqp_vector = &priv->tqp_vector[i];
3000                 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
3001                 if (ret)
3002                         return ret;
3003         }
3004
3005         devm_kfree(&pdev->dev, priv->tqp_vector);
3006         return 0;
3007 }
3008
3009 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
3010                              int ring_type)
3011 {
3012         struct hns3_nic_ring_data *ring_data = priv->ring_data;
3013         int queue_num = priv->ae_handle->kinfo.num_tqps;
3014         struct pci_dev *pdev = priv->ae_handle->pdev;
3015         struct hns3_enet_ring *ring;
3016
3017         ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
3018         if (!ring)
3019                 return -ENOMEM;
3020
3021         if (ring_type == HNAE3_RING_TYPE_TX) {
3022                 ring_data[q->tqp_index].ring = ring;
3023                 ring_data[q->tqp_index].queue_index = q->tqp_index;
3024                 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
3025         } else {
3026                 ring_data[q->tqp_index + queue_num].ring = ring;
3027                 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
3028                 ring->io_base = q->io_base;
3029         }
3030
3031         hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
3032
3033         ring->tqp = q;
3034         ring->desc = NULL;
3035         ring->desc_cb = NULL;
3036         ring->dev = priv->dev;
3037         ring->desc_dma_addr = 0;
3038         ring->buf_size = q->buf_size;
3039         ring->desc_num = q->desc_num;
3040         ring->next_to_use = 0;
3041         ring->next_to_clean = 0;
3042
3043         return 0;
3044 }
3045
3046 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
3047                               struct hns3_nic_priv *priv)
3048 {
3049         int ret;
3050
3051         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
3052         if (ret)
3053                 return ret;
3054
3055         ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
3056         if (ret) {
3057                 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
3058                 return ret;
3059         }
3060
3061         return 0;
3062 }
3063
3064 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
3065 {
3066         struct hnae3_handle *h = priv->ae_handle;
3067         struct pci_dev *pdev = h->pdev;
3068         int i, ret;
3069
3070         priv->ring_data =  devm_kzalloc(&pdev->dev,
3071                                         array3_size(h->kinfo.num_tqps,
3072                                                     sizeof(*priv->ring_data),
3073                                                     2),
3074                                         GFP_KERNEL);
3075         if (!priv->ring_data)
3076                 return -ENOMEM;
3077
3078         for (i = 0; i < h->kinfo.num_tqps; i++) {
3079                 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
3080                 if (ret)
3081                         goto err;
3082         }
3083
3084         return 0;
3085 err:
3086         while (i--) {
3087                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3088                 devm_kfree(priv->dev,
3089                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3090         }
3091
3092         devm_kfree(&pdev->dev, priv->ring_data);
3093         return ret;
3094 }
3095
3096 static void hns3_put_ring_config(struct hns3_nic_priv *priv)
3097 {
3098         struct hnae3_handle *h = priv->ae_handle;
3099         int i;
3100
3101         for (i = 0; i < h->kinfo.num_tqps; i++) {
3102                 devm_kfree(priv->dev, priv->ring_data[i].ring);
3103                 devm_kfree(priv->dev,
3104                            priv->ring_data[i + h->kinfo.num_tqps].ring);
3105         }
3106         devm_kfree(priv->dev, priv->ring_data);
3107 }
3108
3109 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
3110 {
3111         int ret;
3112
3113         if (ring->desc_num <= 0 || ring->buf_size <= 0)
3114                 return -EINVAL;
3115
3116         ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
3117                                 GFP_KERNEL);
3118         if (!ring->desc_cb) {
3119                 ret = -ENOMEM;
3120                 goto out;
3121         }
3122
3123         ret = hns3_alloc_desc(ring);
3124         if (ret)
3125                 goto out_with_desc_cb;
3126
3127         if (!HNAE3_IS_TX_RING(ring)) {
3128                 ret = hns3_alloc_ring_buffers(ring);
3129                 if (ret)
3130                         goto out_with_desc;
3131         }
3132
3133         return 0;
3134
3135 out_with_desc:
3136         hns3_free_desc(ring);
3137 out_with_desc_cb:
3138         kfree(ring->desc_cb);
3139         ring->desc_cb = NULL;
3140 out:
3141         return ret;
3142 }
3143
3144 static void hns3_fini_ring(struct hns3_enet_ring *ring)
3145 {
3146         hns3_free_desc(ring);
3147         kfree(ring->desc_cb);
3148         ring->desc_cb = NULL;
3149         ring->next_to_clean = 0;
3150         ring->next_to_use = 0;
3151 }
3152
3153 static int hns3_buf_size2type(u32 buf_size)
3154 {
3155         int bd_size_type;
3156
3157         switch (buf_size) {
3158         case 512:
3159                 bd_size_type = HNS3_BD_SIZE_512_TYPE;
3160                 break;
3161         case 1024:
3162                 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
3163                 break;
3164         case 2048:
3165                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3166                 break;
3167         case 4096:
3168                 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
3169                 break;
3170         default:
3171                 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
3172         }
3173
3174         return bd_size_type;
3175 }
3176
3177 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
3178 {
3179         dma_addr_t dma = ring->desc_dma_addr;
3180         struct hnae3_queue *q = ring->tqp;
3181
3182         if (!HNAE3_IS_TX_RING(ring)) {
3183                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
3184                                (u32)dma);
3185                 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
3186                                (u32)((dma >> 31) >> 1));
3187
3188                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
3189                                hns3_buf_size2type(ring->buf_size));
3190                 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
3191                                ring->desc_num / 8 - 1);
3192
3193         } else {
3194                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
3195                                (u32)dma);
3196                 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
3197                                (u32)((dma >> 31) >> 1));
3198
3199                 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
3200                                ring->desc_num / 8 - 1);
3201         }
3202 }
3203
3204 static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
3205 {
3206         struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
3207         int i;
3208
3209         for (i = 0; i < HNAE3_MAX_TC; i++) {
3210                 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3211                 int j;
3212
3213                 if (!tc_info->enable)
3214                         continue;
3215
3216                 for (j = 0; j < tc_info->tqp_count; j++) {
3217                         struct hnae3_queue *q;
3218
3219                         q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp;
3220                         hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
3221                                        tc_info->tc);
3222                 }
3223         }
3224 }
3225
3226 int hns3_init_all_ring(struct hns3_nic_priv *priv)
3227 {
3228         struct hnae3_handle *h = priv->ae_handle;
3229         int ring_num = h->kinfo.num_tqps * 2;
3230         int i, j;
3231         int ret;
3232
3233         for (i = 0; i < ring_num; i++) {
3234                 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
3235                 if (ret) {
3236                         dev_err(priv->dev,
3237                                 "Alloc ring memory fail! ret=%d\n", ret);
3238                         goto out_when_alloc_ring_memory;
3239                 }
3240
3241                 u64_stats_init(&priv->ring_data[i].ring->syncp);
3242         }
3243
3244         return 0;
3245
3246 out_when_alloc_ring_memory:
3247         for (j = i - 1; j >= 0; j--)
3248                 hns3_fini_ring(priv->ring_data[j].ring);
3249
3250         return -ENOMEM;
3251 }
3252
3253 int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
3254 {
3255         struct hnae3_handle *h = priv->ae_handle;
3256         int i;
3257
3258         for (i = 0; i < h->kinfo.num_tqps; i++) {
3259                 hns3_fini_ring(priv->ring_data[i].ring);
3260                 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3261         }
3262         return 0;
3263 }
3264
3265 /* Set mac addr if it is configured. or leave it to the AE driver */
3266 static int hns3_init_mac_addr(struct net_device *netdev, bool init)
3267 {
3268         struct hns3_nic_priv *priv = netdev_priv(netdev);
3269         struct hnae3_handle *h = priv->ae_handle;
3270         u8 mac_addr_temp[ETH_ALEN];
3271         int ret = 0;
3272
3273         if (h->ae_algo->ops->get_mac_addr && init) {
3274                 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3275                 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3276         }
3277
3278         /* Check if the MAC address is valid, if not get a random one */
3279         if (!is_valid_ether_addr(netdev->dev_addr)) {
3280                 eth_hw_addr_random(netdev);
3281                 dev_warn(priv->dev, "using random MAC address %pM\n",
3282                          netdev->dev_addr);
3283         }
3284
3285         if (h->ae_algo->ops->set_mac_addr)
3286                 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
3287
3288         return ret;
3289 }
3290
3291 static int hns3_restore_fd_rules(struct net_device *netdev)
3292 {
3293         struct hnae3_handle *h = hns3_get_handle(netdev);
3294         int ret = 0;
3295
3296         if (h->ae_algo->ops->restore_fd_rules)
3297                 ret = h->ae_algo->ops->restore_fd_rules(h);
3298
3299         return ret;
3300 }
3301
3302 static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
3303 {
3304         struct hnae3_handle *h = hns3_get_handle(netdev);
3305
3306         if (h->ae_algo->ops->del_all_fd_entries)
3307                 h->ae_algo->ops->del_all_fd_entries(h, clear_list);
3308 }
3309
3310 static void hns3_nic_set_priv_ops(struct net_device *netdev)
3311 {
3312         struct hns3_nic_priv *priv = netdev_priv(netdev);
3313
3314         priv->ops.fill_desc = hns3_fill_desc;
3315         if ((netdev->features & NETIF_F_TSO) ||
3316             (netdev->features & NETIF_F_TSO6))
3317                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3318         else
3319                 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3320 }
3321
3322 static int hns3_client_init(struct hnae3_handle *handle)
3323 {
3324         struct pci_dev *pdev = handle->pdev;
3325         u16 alloc_tqps, max_rss_size;
3326         struct hns3_nic_priv *priv;
3327         struct net_device *netdev;
3328         int ret;
3329
3330         handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
3331                                                     &max_rss_size);
3332         netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
3333         if (!netdev)
3334                 return -ENOMEM;
3335
3336         priv = netdev_priv(netdev);
3337         priv->dev = &pdev->dev;
3338         priv->netdev = netdev;
3339         priv->ae_handle = handle;
3340         priv->ae_handle->last_reset_time = jiffies;
3341         priv->tx_timeout_count = 0;
3342
3343         handle->kinfo.netdev = netdev;
3344         handle->priv = (void *)priv;
3345
3346         hns3_init_mac_addr(netdev, true);
3347
3348         hns3_set_default_feature(netdev);
3349
3350         netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3351         netdev->priv_flags |= IFF_UNICAST_FLT;
3352         netdev->netdev_ops = &hns3_nic_netdev_ops;
3353         SET_NETDEV_DEV(netdev, &pdev->dev);
3354         hns3_ethtool_set_ops(netdev);
3355         hns3_nic_set_priv_ops(netdev);
3356
3357         /* Carrier off reporting is important to ethtool even BEFORE open */
3358         netif_carrier_off(netdev);
3359
3360         if (handle->flags & HNAE3_SUPPORT_VF)
3361                 handle->reset_level = HNAE3_VF_RESET;
3362         else
3363                 handle->reset_level = HNAE3_FUNC_RESET;
3364
3365         ret = hns3_get_ring_config(priv);
3366         if (ret) {
3367                 ret = -ENOMEM;
3368                 goto out_get_ring_cfg;
3369         }
3370
3371         ret = hns3_nic_alloc_vector_data(priv);
3372         if (ret) {
3373                 ret = -ENOMEM;
3374                 goto out_alloc_vector_data;
3375         }
3376
3377         ret = hns3_nic_init_vector_data(priv);
3378         if (ret) {
3379                 ret = -ENOMEM;
3380                 goto out_init_vector_data;
3381         }
3382
3383         ret = hns3_init_all_ring(priv);
3384         if (ret) {
3385                 ret = -ENOMEM;
3386                 goto out_init_ring_data;
3387         }
3388
3389         ret = register_netdev(netdev);
3390         if (ret) {
3391                 dev_err(priv->dev, "probe register netdev fail!\n");
3392                 goto out_reg_netdev_fail;
3393         }
3394
3395         hns3_dcbnl_setup(handle);
3396
3397         /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3398         netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3399
3400         return ret;
3401
3402 out_reg_netdev_fail:
3403 out_init_ring_data:
3404         (void)hns3_nic_uninit_vector_data(priv);
3405 out_init_vector_data:
3406         hns3_nic_dealloc_vector_data(priv);
3407 out_alloc_vector_data:
3408         priv->ring_data = NULL;
3409 out_get_ring_cfg:
3410         priv->ae_handle = NULL;
3411         free_netdev(netdev);
3412         return ret;
3413 }
3414
3415 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3416 {
3417         struct net_device *netdev = handle->kinfo.netdev;
3418         struct hns3_nic_priv *priv = netdev_priv(netdev);
3419         int ret;
3420
3421         hns3_remove_hw_addr(netdev);
3422
3423         if (netdev->reg_state != NETREG_UNINITIALIZED)
3424                 unregister_netdev(netdev);
3425
3426         hns3_del_all_fd_rules(netdev, true);
3427
3428         hns3_force_clear_all_rx_ring(handle);
3429
3430         ret = hns3_nic_uninit_vector_data(priv);
3431         if (ret)
3432                 netdev_err(netdev, "uninit vector error\n");
3433
3434         ret = hns3_nic_dealloc_vector_data(priv);
3435         if (ret)
3436                 netdev_err(netdev, "dealloc vector error\n");
3437
3438         ret = hns3_uninit_all_ring(priv);
3439         if (ret)
3440                 netdev_err(netdev, "uninit ring error\n");
3441
3442         hns3_put_ring_config(priv);
3443
3444         priv->ring_data = NULL;
3445
3446         free_netdev(netdev);
3447 }
3448
3449 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3450 {
3451         struct net_device *netdev = handle->kinfo.netdev;
3452
3453         if (!netdev)
3454                 return;
3455
3456         if (linkup) {
3457                 netif_carrier_on(netdev);
3458                 netif_tx_wake_all_queues(netdev);
3459                 netdev_info(netdev, "link up\n");
3460         } else {
3461                 netif_carrier_off(netdev);
3462                 netif_tx_stop_all_queues(netdev);
3463                 netdev_info(netdev, "link down\n");
3464         }
3465 }
3466
3467 static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3468 {
3469         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3470         struct net_device *ndev = kinfo->netdev;
3471         bool if_running;
3472         int ret;
3473
3474         if (tc > HNAE3_MAX_TC)
3475                 return -EINVAL;
3476
3477         if (!ndev)
3478                 return -ENODEV;
3479
3480         if_running = netif_running(ndev);
3481
3482         if (if_running) {
3483                 (void)hns3_nic_net_stop(ndev);
3484                 msleep(100);
3485         }
3486
3487         ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3488                 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3489         if (ret)
3490                 goto err_out;
3491
3492         ret = hns3_nic_set_real_num_queue(ndev);
3493
3494 err_out:
3495         if (if_running)
3496                 (void)hns3_nic_net_open(ndev);
3497
3498         return ret;
3499 }
3500
3501 static int hns3_recover_hw_addr(struct net_device *ndev)
3502 {
3503         struct netdev_hw_addr_list *list;
3504         struct netdev_hw_addr *ha, *tmp;
3505         int ret = 0;
3506
3507         /* go through and sync uc_addr entries to the device */
3508         list = &ndev->uc;
3509         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3510                 ret = hns3_nic_uc_sync(ndev, ha->addr);
3511                 if (ret)
3512                         return ret;
3513         }
3514
3515         /* go through and sync mc_addr entries to the device */
3516         list = &ndev->mc;
3517         list_for_each_entry_safe(ha, tmp, &list->list, list) {
3518                 ret = hns3_nic_mc_sync(ndev, ha->addr);
3519                 if (ret)
3520                         return ret;
3521         }
3522
3523         return ret;
3524 }
3525
3526 static void hns3_remove_hw_addr(struct net_device *netdev)
3527 {
3528         struct netdev_hw_addr_list *list;
3529         struct netdev_hw_addr *ha, *tmp;
3530
3531         hns3_nic_uc_unsync(netdev, netdev->dev_addr);
3532
3533         /* go through and unsync uc_addr entries to the device */
3534         list = &netdev->uc;
3535         list_for_each_entry_safe(ha, tmp, &list->list, list)
3536                 hns3_nic_uc_unsync(netdev, ha->addr);
3537
3538         /* go through and unsync mc_addr entries to the device */
3539         list = &netdev->mc;
3540         list_for_each_entry_safe(ha, tmp, &list->list, list)
3541                 if (ha->refcount > 1)
3542                         hns3_nic_mc_unsync(netdev, ha->addr);
3543 }
3544
3545 static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
3546 {
3547         while (ring->next_to_clean != ring->next_to_use) {
3548                 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
3549                 hns3_free_buffer_detach(ring, ring->next_to_clean);
3550                 ring_ptr_move_fw(ring, next_to_clean);
3551         }
3552 }
3553
3554 static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3555 {
3556         struct hns3_desc_cb res_cbs;
3557         int ret;
3558
3559         while (ring->next_to_use != ring->next_to_clean) {
3560                 /* When a buffer is not reused, it's memory has been
3561                  * freed in hns3_handle_rx_bd or will be freed by
3562                  * stack, so we need to replace the buffer here.
3563                  */
3564                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3565                         ret = hns3_reserve_buffer_map(ring, &res_cbs);
3566                         if (ret) {
3567                                 u64_stats_update_begin(&ring->syncp);
3568                                 ring->stats.sw_err_cnt++;
3569                                 u64_stats_update_end(&ring->syncp);
3570                                 /* if alloc new buffer fail, exit directly
3571                                  * and reclear in up flow.
3572                                  */
3573                                 netdev_warn(ring->tqp->handle->kinfo.netdev,
3574                                             "reserve buffer map failed, ret = %d\n",
3575                                             ret);
3576                                 return ret;
3577                         }
3578                         hns3_replace_buffer(ring, ring->next_to_use,
3579                                             &res_cbs);
3580                 }
3581                 ring_ptr_move_fw(ring, next_to_use);
3582         }
3583
3584         return 0;
3585 }
3586
3587 static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
3588 {
3589         while (ring->next_to_use != ring->next_to_clean) {
3590                 /* When a buffer is not reused, it's memory has been
3591                  * freed in hns3_handle_rx_bd or will be freed by
3592                  * stack, so only need to unmap the buffer here.
3593                  */
3594                 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3595                         hns3_unmap_buffer(ring,
3596                                           &ring->desc_cb[ring->next_to_use]);
3597                         ring->desc_cb[ring->next_to_use].dma = 0;
3598                 }
3599
3600                 ring_ptr_move_fw(ring, next_to_use);
3601         }
3602 }
3603
3604 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h)
3605 {
3606         struct net_device *ndev = h->kinfo.netdev;
3607         struct hns3_nic_priv *priv = netdev_priv(ndev);
3608         struct hns3_enet_ring *ring;
3609         u32 i;
3610
3611         for (i = 0; i < h->kinfo.num_tqps; i++) {
3612                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3613                 hns3_force_clear_rx_ring(ring);
3614         }
3615 }
3616
3617 static void hns3_clear_all_ring(struct hnae3_handle *h)
3618 {
3619         struct net_device *ndev = h->kinfo.netdev;
3620         struct hns3_nic_priv *priv = netdev_priv(ndev);
3621         u32 i;
3622
3623         for (i = 0; i < h->kinfo.num_tqps; i++) {
3624                 struct netdev_queue *dev_queue;
3625                 struct hns3_enet_ring *ring;
3626
3627                 ring = priv->ring_data[i].ring;
3628                 hns3_clear_tx_ring(ring);
3629                 dev_queue = netdev_get_tx_queue(ndev,
3630                                                 priv->ring_data[i].queue_index);
3631                 netdev_tx_reset_queue(dev_queue);
3632
3633                 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3634                 /* Continue to clear other rings even if clearing some
3635                  * rings failed.
3636                  */
3637                 hns3_clear_rx_ring(ring);
3638         }
3639 }
3640
3641 int hns3_nic_reset_all_ring(struct hnae3_handle *h)
3642 {
3643         struct net_device *ndev = h->kinfo.netdev;
3644         struct hns3_nic_priv *priv = netdev_priv(ndev);
3645         struct hns3_enet_ring *rx_ring;
3646         int i, j;
3647         int ret;
3648
3649         for (i = 0; i < h->kinfo.num_tqps; i++) {
3650                 ret = h->ae_algo->ops->reset_queue(h, i);
3651                 if (ret)
3652                         return ret;
3653
3654                 hns3_init_ring_hw(priv->ring_data[i].ring);
3655
3656                 /* We need to clear tx ring here because self test will
3657                  * use the ring and will not run down before up
3658                  */
3659                 hns3_clear_tx_ring(priv->ring_data[i].ring);
3660                 priv->ring_data[i].ring->next_to_clean = 0;
3661                 priv->ring_data[i].ring->next_to_use = 0;
3662
3663                 rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3664                 hns3_init_ring_hw(rx_ring);
3665                 ret = hns3_clear_rx_ring(rx_ring);
3666                 if (ret)
3667                         return ret;
3668
3669                 /* We can not know the hardware head and tail when this
3670                  * function is called in reset flow, so we reuse all desc.
3671                  */
3672                 for (j = 0; j < rx_ring->desc_num; j++)
3673                         hns3_reuse_buffer(rx_ring, j);
3674
3675                 rx_ring->next_to_clean = 0;
3676                 rx_ring->next_to_use = 0;
3677         }
3678
3679         hns3_init_tx_ring_tc(priv);
3680
3681         return 0;
3682 }
3683
3684 static void hns3_store_coal(struct hns3_nic_priv *priv)
3685 {
3686         /* ethtool only support setting and querying one coal
3687          * configuation for now, so save the vector 0' coal
3688          * configuation here in order to restore it.
3689          */
3690         memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
3691                sizeof(struct hns3_enet_coalesce));
3692         memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
3693                sizeof(struct hns3_enet_coalesce));
3694 }
3695
3696 static void hns3_restore_coal(struct hns3_nic_priv *priv)
3697 {
3698         u16 vector_num = priv->vector_num;
3699         int i;
3700
3701         for (i = 0; i < vector_num; i++) {
3702                 memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
3703                        sizeof(struct hns3_enet_coalesce));
3704                 memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
3705                        sizeof(struct hns3_enet_coalesce));
3706         }
3707 }
3708
3709 static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3710 {
3711         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3712         struct net_device *ndev = kinfo->netdev;
3713
3714         if (!netif_running(ndev))
3715                 return 0;
3716
3717         return hns3_nic_net_stop(ndev);
3718 }
3719
3720 static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3721 {
3722         struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3723         int ret = 0;
3724
3725         if (netif_running(kinfo->netdev)) {
3726                 ret = hns3_nic_net_up(kinfo->netdev);
3727                 if (ret) {
3728                         netdev_err(kinfo->netdev,
3729                                    "hns net up fail, ret=%d!\n", ret);
3730                         return ret;
3731                 }
3732                 handle->last_reset_time = jiffies;
3733         }
3734
3735         return ret;
3736 }
3737
3738 static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3739 {
3740         struct net_device *netdev = handle->kinfo.netdev;
3741         struct hns3_nic_priv *priv = netdev_priv(netdev);
3742         bool vlan_filter_enable;
3743         int ret;
3744
3745         ret = hns3_init_mac_addr(netdev, false);
3746         if (ret)
3747                 return ret;
3748
3749         ret = hns3_recover_hw_addr(netdev);
3750         if (ret)
3751                 return ret;
3752
3753         ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
3754         if (ret)
3755                 return ret;
3756
3757         vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
3758         hns3_enable_vlan_filter(netdev, vlan_filter_enable);
3759
3760         /* Hardware table is only clear when pf resets */
3761         if (!(handle->flags & HNAE3_SUPPORT_VF)) {
3762                 ret = hns3_restore_vlan(netdev);
3763                 if (ret)
3764                         return ret;
3765         }
3766
3767         ret = hns3_restore_fd_rules(netdev);
3768         if (ret)
3769                 return ret;
3770
3771         /* Carrier off reporting is important to ethtool even BEFORE open */
3772         netif_carrier_off(netdev);
3773
3774         hns3_restore_coal(priv);
3775
3776         ret = hns3_nic_init_vector_data(priv);
3777         if (ret)
3778                 return ret;
3779
3780         ret = hns3_init_all_ring(priv);
3781         if (ret) {
3782                 hns3_nic_uninit_vector_data(priv);
3783                 priv->ring_data = NULL;
3784         }
3785
3786         return ret;
3787 }
3788
3789 static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3790 {
3791         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
3792         struct net_device *netdev = handle->kinfo.netdev;
3793         struct hns3_nic_priv *priv = netdev_priv(netdev);
3794         int ret;
3795
3796         hns3_force_clear_all_rx_ring(handle);
3797
3798         ret = hns3_nic_uninit_vector_data(priv);
3799         if (ret) {
3800                 netdev_err(netdev, "uninit vector error\n");
3801                 return ret;
3802         }
3803
3804         hns3_store_coal(priv);
3805
3806         ret = hns3_uninit_all_ring(priv);
3807         if (ret)
3808                 netdev_err(netdev, "uninit ring error\n");
3809
3810         /* it is cumbersome for hardware to pick-and-choose entries for deletion
3811          * from table space. Hence, for function reset software intervention is
3812          * required to delete the entries
3813          */
3814         if (hns3_dev_ongoing_func_reset(ae_dev)) {
3815                 hns3_remove_hw_addr(netdev);
3816                 hns3_del_all_fd_rules(netdev, false);
3817         }
3818
3819         return ret;
3820 }
3821
3822 static int hns3_reset_notify(struct hnae3_handle *handle,
3823                              enum hnae3_reset_notify_type type)
3824 {
3825         int ret = 0;
3826
3827         switch (type) {
3828         case HNAE3_UP_CLIENT:
3829                 ret = hns3_reset_notify_up_enet(handle);
3830                 break;
3831         case HNAE3_DOWN_CLIENT:
3832                 ret = hns3_reset_notify_down_enet(handle);
3833                 break;
3834         case HNAE3_INIT_CLIENT:
3835                 ret = hns3_reset_notify_init_enet(handle);
3836                 break;
3837         case HNAE3_UNINIT_CLIENT:
3838                 ret = hns3_reset_notify_uninit_enet(handle);
3839                 break;
3840         default:
3841                 break;
3842         }
3843
3844         return ret;
3845 }
3846
3847 static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3848 {
3849         struct hns3_nic_priv *priv = netdev_priv(netdev);
3850         struct hnae3_handle *h = hns3_get_handle(netdev);
3851         int ret;
3852
3853         ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3854         if (ret)
3855                 return ret;
3856
3857         ret = hns3_get_ring_config(priv);
3858         if (ret)
3859                 return ret;
3860
3861         ret = hns3_nic_alloc_vector_data(priv);
3862         if (ret)
3863                 goto err_alloc_vector;
3864
3865         hns3_restore_coal(priv);
3866
3867         ret = hns3_nic_init_vector_data(priv);
3868         if (ret)
3869                 goto err_uninit_vector;
3870
3871         ret = hns3_init_all_ring(priv);
3872         if (ret)
3873                 goto err_put_ring;
3874
3875         return 0;
3876
3877 err_put_ring:
3878         hns3_put_ring_config(priv);
3879 err_uninit_vector:
3880         hns3_nic_uninit_vector_data(priv);
3881 err_alloc_vector:
3882         hns3_nic_dealloc_vector_data(priv);
3883         return ret;
3884 }
3885
3886 static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3887 {
3888         return (new_tqp_num / num_tc) * num_tc;
3889 }
3890
3891 int hns3_set_channels(struct net_device *netdev,
3892                       struct ethtool_channels *ch)
3893 {
3894         struct hns3_nic_priv *priv = netdev_priv(netdev);
3895         struct hnae3_handle *h = hns3_get_handle(netdev);
3896         struct hnae3_knic_private_info *kinfo = &h->kinfo;
3897         bool if_running = netif_running(netdev);
3898         u32 new_tqp_num = ch->combined_count;
3899         u16 org_tqp_num;
3900         int ret;
3901
3902         if (ch->rx_count || ch->tx_count)
3903                 return -EINVAL;
3904
3905         if (new_tqp_num > hns3_get_max_available_channels(h) ||
3906             new_tqp_num < kinfo->num_tc) {
3907                 dev_err(&netdev->dev,
3908                         "Change tqps fail, the tqp range is from %d to %d",
3909                         kinfo->num_tc,
3910                         hns3_get_max_available_channels(h));
3911                 return -EINVAL;
3912         }
3913
3914         new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3915         if (kinfo->num_tqps == new_tqp_num)
3916                 return 0;
3917
3918         if (if_running)
3919                 hns3_nic_net_stop(netdev);
3920
3921         ret = hns3_nic_uninit_vector_data(priv);
3922         if (ret) {
3923                 dev_err(&netdev->dev,
3924                         "Unbind vector with tqp fail, nothing is changed");
3925                 goto open_netdev;
3926         }
3927
3928         hns3_store_coal(priv);
3929
3930         hns3_nic_dealloc_vector_data(priv);
3931
3932         hns3_uninit_all_ring(priv);
3933         hns3_put_ring_config(priv);
3934
3935         org_tqp_num = h->kinfo.num_tqps;
3936         ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3937         if (ret) {
3938                 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3939                 if (ret) {
3940                         /* If revert to old tqp failed, fatal error occurred */
3941                         dev_err(&netdev->dev,
3942                                 "Revert to old tqp num fail, ret=%d", ret);
3943                         return ret;
3944                 }
3945                 dev_info(&netdev->dev,
3946                          "Change tqp num fail, Revert to old tqp num");
3947         }
3948
3949 open_netdev:
3950         if (if_running)
3951                 hns3_nic_net_open(netdev);
3952
3953         return ret;
3954 }
3955
3956 static const struct hnae3_client_ops client_ops = {
3957         .init_instance = hns3_client_init,
3958         .uninit_instance = hns3_client_uninit,
3959         .link_status_change = hns3_link_status_change,
3960         .setup_tc = hns3_client_setup_tc,
3961         .reset_notify = hns3_reset_notify,
3962 };
3963
3964 /* hns3_init_module - Driver registration routine
3965  * hns3_init_module is the first routine called when the driver is
3966  * loaded. All it does is register with the PCI subsystem.
3967  */
3968 static int __init hns3_init_module(void)
3969 {
3970         int ret;
3971
3972         pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3973         pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3974
3975         client.type = HNAE3_CLIENT_KNIC;
3976         snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3977                  hns3_driver_name);
3978
3979         client.ops = &client_ops;
3980
3981         INIT_LIST_HEAD(&client.node);
3982
3983         ret = hnae3_register_client(&client);
3984         if (ret)
3985                 return ret;
3986
3987         ret = pci_register_driver(&hns3_driver);
3988         if (ret)
3989                 hnae3_unregister_client(&client);
3990
3991         return ret;
3992 }
3993 module_init(hns3_init_module);
3994
3995 /* hns3_exit_module - Driver exit cleanup routine
3996  * hns3_exit_module is called just before the driver is removed
3997  * from memory.
3998  */
3999 static void __exit hns3_exit_module(void)
4000 {
4001         pci_unregister_driver(&hns3_driver);
4002         hnae3_unregister_client(&client);
4003 }
4004 module_exit(hns3_exit_module);
4005
4006 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
4007 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
4008 MODULE_LICENSE("GPL");
4009 MODULE_ALIAS("pci:hns-nic");
4010 MODULE_VERSION(HNS3_MOD_VERSION);