Merge tag 'nfs-for-4.20-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[sfrench/cifs-2.6.git] / drivers / net / ethernet / aquantia / atlantic / hw_atl / hw_atl_a0.c
1 /*
2  * aQuantia Corporation Network Driver
3  * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  */
9
10 /* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
11
12 #include "../aq_hw.h"
13 #include "../aq_hw_utils.h"
14 #include "../aq_ring.h"
15 #include "../aq_nic.h"
16 #include "hw_atl_a0.h"
17 #include "hw_atl_utils.h"
18 #include "hw_atl_llh.h"
19 #include "hw_atl_a0_internal.h"
20
21 #define DEFAULT_A0_BOARD_BASIC_CAPABILITIES \
22         .is_64_dma = true,                \
23         .msix_irqs = 4U,                  \
24         .irq_mask = ~0U,                  \
25         .vecs = HW_ATL_A0_RSS_MAX,        \
26         .tcs = HW_ATL_A0_TC_MAX,          \
27         .rxd_alignment = 1U,              \
28         .rxd_size = HW_ATL_A0_RXD_SIZE,   \
29         .rxds_max = HW_ATL_A0_MAX_RXD,    \
30         .rxds_min = HW_ATL_A0_MIN_RXD,    \
31         .txd_alignment = 1U,              \
32         .txd_size = HW_ATL_A0_TXD_SIZE,   \
33         .txds_max = HW_ATL_A0_MAX_TXD,    \
34         .txds_min = HW_ATL_A0_MIN_RXD,    \
35         .txhwb_alignment = 4096U,         \
36         .tx_rings = HW_ATL_A0_TX_RINGS,   \
37         .rx_rings = HW_ATL_A0_RX_RINGS,   \
38         .hw_features = NETIF_F_HW_CSUM |  \
39                         NETIF_F_RXHASH |  \
40                         NETIF_F_RXCSUM |  \
41                         NETIF_F_SG |      \
42                         NETIF_F_TSO,      \
43         .hw_priv_flags = IFF_UNICAST_FLT, \
44         .flow_control = true,             \
45         .mtu = HW_ATL_A0_MTU_JUMBO,       \
46         .mac_regs_count = 88,             \
47         .hw_alive_check_addr = 0x10U
48
49 const struct aq_hw_caps_s hw_atl_a0_caps_aqc100 = {
50         DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
51         .media_type = AQ_HW_MEDIA_TYPE_FIBRE,
52         .link_speed_msk = AQ_NIC_RATE_5G |
53                           AQ_NIC_RATE_2GS |
54                           AQ_NIC_RATE_1G |
55                           AQ_NIC_RATE_100M,
56 };
57
58 const struct aq_hw_caps_s hw_atl_a0_caps_aqc107 = {
59         DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
60         .media_type = AQ_HW_MEDIA_TYPE_TP,
61         .link_speed_msk = AQ_NIC_RATE_10G |
62                           AQ_NIC_RATE_5G |
63                           AQ_NIC_RATE_2GS |
64                           AQ_NIC_RATE_1G |
65                           AQ_NIC_RATE_100M,
66 };
67
68 const struct aq_hw_caps_s hw_atl_a0_caps_aqc108 = {
69         DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
70         .media_type = AQ_HW_MEDIA_TYPE_TP,
71         .link_speed_msk = AQ_NIC_RATE_5G |
72                           AQ_NIC_RATE_2GS |
73                           AQ_NIC_RATE_1G |
74                           AQ_NIC_RATE_100M,
75 };
76
77 const struct aq_hw_caps_s hw_atl_a0_caps_aqc109 = {
78         DEFAULT_A0_BOARD_BASIC_CAPABILITIES,
79         .media_type = AQ_HW_MEDIA_TYPE_TP,
80         .link_speed_msk = AQ_NIC_RATE_2GS |
81                           AQ_NIC_RATE_1G |
82                           AQ_NIC_RATE_100M,
83 };
84
85 static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
86 {
87         int err = 0;
88
89         hw_atl_glb_glb_reg_res_dis_set(self, 1U);
90         hw_atl_pci_pci_reg_res_dis_set(self, 0U);
91         hw_atl_rx_rx_reg_res_dis_set(self, 0U);
92         hw_atl_tx_tx_reg_res_dis_set(self, 0U);
93
94         HW_ATL_FLUSH();
95         hw_atl_glb_soft_res_set(self, 1);
96
97         /* check 10 times by 1ms */
98         AQ_HW_WAIT_FOR(hw_atl_glb_soft_res_get(self) == 0, 1000U, 10U);
99         if (err < 0)
100                 goto err_exit;
101
102         hw_atl_itr_irq_reg_res_dis_set(self, 0U);
103         hw_atl_itr_res_irq_set(self, 1U);
104
105         /* check 10 times by 1ms */
106         AQ_HW_WAIT_FOR(hw_atl_itr_res_irq_get(self) == 0, 1000U, 10U);
107         if (err < 0)
108                 goto err_exit;
109
110         self->aq_fw_ops->set_state(self, MPI_RESET);
111
112         err = aq_hw_err_from_flags(self);
113
114 err_exit:
115         return err;
116 }
117
118 static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
119 {
120         u32 tc = 0U;
121         u32 buff_size = 0U;
122         unsigned int i_priority = 0U;
123         bool is_rx_flow_control = false;
124
125         /* TPS Descriptor rate init */
126         hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
127         hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
128
129         /* TPS VM init */
130         hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
131
132         /* TPS TC credits init */
133         hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
134         hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
135
136         hw_atl_tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
137         hw_atl_tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
138         hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
139         hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
140
141         /* Tx buf size */
142         buff_size = HW_ATL_A0_TXBUF_MAX;
143
144         hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
145         hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self,
146                                                    (buff_size *
147                                                    (1024 / 32U) * 66U) /
148                                                    100U, tc);
149         hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self,
150                                                    (buff_size *
151                                                    (1024 / 32U) * 50U) /
152                                                    100U, tc);
153
154         /* QoS Rx buf size per TC */
155         tc = 0;
156         is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
157         buff_size = HW_ATL_A0_RXBUF_MAX;
158
159         hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
160         hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self,
161                                                    (buff_size *
162                                                    (1024U / 32U) * 66U) /
163                                                    100U, tc);
164         hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self,
165                                                    (buff_size *
166                                                    (1024U / 32U) * 50U) /
167                                                    100U, tc);
168         hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
169
170         /* QoS 802.1p priority -> TC mapping */
171         for (i_priority = 8U; i_priority--;)
172                 hw_atl_rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
173
174         return aq_hw_err_from_flags(self);
175 }
176
177 static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
178                                      struct aq_rss_parameters *rss_params)
179 {
180         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
181         int err = 0;
182         unsigned int i = 0U;
183         unsigned int addr = 0U;
184
185         for (i = 10, addr = 0U; i--; ++addr) {
186                 u32 key_data = cfg->is_rss ?
187                         __swab32(rss_params->hash_secret_key[i]) : 0U;
188                 hw_atl_rpf_rss_key_wr_data_set(self, key_data);
189                 hw_atl_rpf_rss_key_addr_set(self, addr);
190                 hw_atl_rpf_rss_key_wr_en_set(self, 1U);
191                 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_key_wr_en_get(self) == 0,
192                                1000U, 10U);
193                 if (err < 0)
194                         goto err_exit;
195         }
196
197         err = aq_hw_err_from_flags(self);
198
199 err_exit:
200         return err;
201 }
202
203 static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
204                                 struct aq_rss_parameters *rss_params)
205 {
206         u8 *indirection_table = rss_params->indirection_table;
207         u32 i = 0U;
208         u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
209         int err = 0;
210         u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
211                                         HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
212
213         memset(bitary, 0, sizeof(bitary));
214
215         for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
216                 (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
217                         ((indirection_table[i] % num_rss_queues) <<
218                         ((i * 3U) & 0xFU));
219         }
220
221         for (i = ARRAY_SIZE(bitary); i--;) {
222                 hw_atl_rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
223                 hw_atl_rpf_rss_redir_tbl_addr_set(self, i);
224                 hw_atl_rpf_rss_redir_wr_en_set(self, 1U);
225                 AQ_HW_WAIT_FOR(hw_atl_rpf_rss_redir_wr_en_get(self) == 0,
226                                1000U, 10U);
227                 if (err < 0)
228                         goto err_exit;
229         }
230
231         err = aq_hw_err_from_flags(self);
232
233 err_exit:
234         return err;
235 }
236
237 static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
238                                     struct aq_nic_cfg_s *aq_nic_cfg)
239 {
240         /* TX checksums offloads*/
241         hw_atl_tpo_ipv4header_crc_offload_en_set(self, 1);
242         hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
243
244         /* RX checksums offloads*/
245         hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
246         hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
247
248         /* LSO offloads*/
249         hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
250
251         return aq_hw_err_from_flags(self);
252 }
253
254 static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
255 {
256         hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
257         hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
258         hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
259
260         /* Tx interrupts */
261         hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
262
263         /* misc */
264         aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
265                         0x00010000U : 0x00000000U);
266         hw_atl_tdm_tx_dca_en_set(self, 0U);
267         hw_atl_tdm_tx_dca_mode_set(self, 0U);
268
269         hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U);
270
271         return aq_hw_err_from_flags(self);
272 }
273
274 static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
275 {
276         struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
277         int i;
278
279         /* Rx TC/RSS number config */
280         hw_atl_rpb_rpf_rx_traf_class_mode_set(self, 1U);
281
282         /* Rx flow control */
283         hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U);
284
285         /* RSS Ring selection */
286         hw_atl_reg_rx_flr_rss_control1set(self, cfg->is_rss ?
287                                           0xB3333333U : 0x00000000U);
288
289         /* Multicast filters */
290         for (i = HW_ATL_A0_MAC_MAX; i--;) {
291                 hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
292                 hw_atl_rpfl2unicast_flr_act_set(self, 1U, i);
293         }
294
295         hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
296         hw_atl_reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
297
298         /* Vlan filters */
299         hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
300         hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
301         hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
302
303         /* Rx Interrupts */
304         hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
305
306         /* misc */
307         hw_atl_rpfl2broadcast_flr_act_set(self, 1U);
308         hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
309
310         hw_atl_rdm_rx_dca_en_set(self, 0U);
311         hw_atl_rdm_rx_dca_mode_set(self, 0U);
312
313         return aq_hw_err_from_flags(self);
314 }
315
316 static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
317 {
318         int err = 0;
319         unsigned int h = 0U;
320         unsigned int l = 0U;
321
322         if (!mac_addr) {
323                 err = -EINVAL;
324                 goto err_exit;
325         }
326         h = (mac_addr[0] << 8) | (mac_addr[1]);
327         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
328             (mac_addr[4] << 8) | mac_addr[5];
329
330         hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
331         hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
332         hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
333         hw_atl_rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
334
335         err = aq_hw_err_from_flags(self);
336
337 err_exit:
338         return err;
339 }
340
341 static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
342 {
343         static u32 aq_hw_atl_igcr_table_[4][2] = {
344                 { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
345                 { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
346                 { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
347                 { 0x20000022U, 0x20000026U }  /* AQ_IRQ_MSIX */
348         };
349
350         int err = 0;
351
352         struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg;
353
354         hw_atl_a0_hw_init_tx_path(self);
355         hw_atl_a0_hw_init_rx_path(self);
356
357         hw_atl_a0_hw_mac_addr_set(self, mac_addr);
358
359         self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk);
360         self->aq_fw_ops->set_state(self, MPI_INIT);
361
362         hw_atl_reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
363         hw_atl_reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
364
365         hw_atl_a0_hw_qos_set(self);
366         hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
367         hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
368
369         /* Reset link status and read out initial hardware counters */
370         self->aq_link_status.mbps = 0;
371         self->aq_fw_ops->update_stats(self);
372
373         err = aq_hw_err_from_flags(self);
374         if (err < 0)
375                 goto err_exit;
376
377         /* Interrupts */
378         hw_atl_reg_irq_glb_ctl_set(self,
379                                    aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
380                                         [(aq_nic_cfg->vecs > 1U) ? 1 : 0]);
381
382         hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
383
384         /* Interrupts */
385         hw_atl_reg_gen_irq_map_set(self,
386                                    ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
387                                    ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
388                                    ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
389                                    ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
390
391         hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
392
393 err_exit:
394         return err;
395 }
396
397 static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
398                                       struct aq_ring_s *ring)
399 {
400         hw_atl_tdm_tx_desc_en_set(self, 1, ring->idx);
401         return aq_hw_err_from_flags(self);
402 }
403
404 static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
405                                       struct aq_ring_s *ring)
406 {
407         hw_atl_rdm_rx_desc_en_set(self, 1, ring->idx);
408         return aq_hw_err_from_flags(self);
409 }
410
411 static int hw_atl_a0_hw_start(struct aq_hw_s *self)
412 {
413         hw_atl_tpb_tx_buff_en_set(self, 1);
414         hw_atl_rpb_rx_buff_en_set(self, 1);
415         return aq_hw_err_from_flags(self);
416 }
417
418 static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
419                                             struct aq_ring_s *ring)
420 {
421         hw_atl_reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
422         return 0;
423 }
424
425 static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
426                                      struct aq_ring_s *ring,
427                                      unsigned int frags)
428 {
429         struct aq_ring_buff_s *buff = NULL;
430         struct hw_atl_txd_s *txd = NULL;
431         unsigned int buff_pa_len = 0U;
432         unsigned int pkt_len = 0U;
433         unsigned int frag_count = 0U;
434         bool is_gso = false;
435
436         buff = &ring->buff_ring[ring->sw_tail];
437         pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
438
439         for (frag_count = 0; frag_count < frags; frag_count++) {
440                 txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
441                                                 HW_ATL_A0_TXD_SIZE];
442                 txd->ctl = 0;
443                 txd->ctl2 = 0;
444                 txd->buf_addr = 0;
445
446                 buff = &ring->buff_ring[ring->sw_tail];
447
448                 if (buff->is_txc) {
449                         txd->ctl |= (buff->len_l3 << 31) |
450                                 (buff->len_l2 << 24) |
451                                 HW_ATL_A0_TXD_CTL_CMD_TCP |
452                                 HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
453                         txd->ctl2 |= (buff->mss << 16) |
454                                 (buff->len_l4 << 8) |
455                                 (buff->len_l3 >> 1);
456
457                         pkt_len -= (buff->len_l4 +
458                                     buff->len_l3 +
459                                     buff->len_l2);
460                         is_gso = true;
461
462                         if (buff->is_ipv6)
463                                 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPV6;
464                 } else {
465                         buff_pa_len = buff->len;
466
467                         txd->buf_addr = buff->pa;
468                         txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
469                                                 ((u32)buff_pa_len << 4));
470                         txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
471                         /* PAY_LEN */
472                         txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
473
474                         if (is_gso) {
475                                 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
476                                 txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
477                         }
478
479                         /* Tx checksum offloads */
480                         if (buff->is_ip_cso)
481                                 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
482
483                         if (buff->is_udp_cso || buff->is_tcp_cso)
484                                 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
485
486                         if (unlikely(buff->is_eop)) {
487                                 txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
488                                 txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
489                                 is_gso = false;
490                         }
491                 }
492
493                 ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
494         }
495
496         hw_atl_a0_hw_tx_ring_tail_update(self, ring);
497         return aq_hw_err_from_flags(self);
498 }
499
500 static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
501                                      struct aq_ring_s *aq_ring,
502                                      struct aq_ring_param_s *aq_ring_param)
503 {
504         u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
505         u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
506
507         hw_atl_rdm_rx_desc_en_set(self, false, aq_ring->idx);
508
509         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
510
511         hw_atl_reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
512                                                   aq_ring->idx);
513
514         hw_atl_reg_rx_dma_desc_base_addressmswset(self,
515                                                   dma_desc_addr_msw,
516                                                   aq_ring->idx);
517
518         hw_atl_rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
519
520         hw_atl_rdm_rx_desc_data_buff_size_set(self,
521                                               AQ_CFG_RX_FRAME_MAX / 1024U,
522                                               aq_ring->idx);
523
524         hw_atl_rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
525         hw_atl_rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
526         hw_atl_rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
527
528         /* Rx ring set mode */
529
530         /* Mapping interrupt vector */
531         hw_atl_itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
532         hw_atl_itr_irq_map_en_rx_set(self, true, aq_ring->idx);
533
534         hw_atl_rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
535         hw_atl_rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
536         hw_atl_rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
537         hw_atl_rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
538
539         return aq_hw_err_from_flags(self);
540 }
541
542 static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
543                                      struct aq_ring_s *aq_ring,
544                                      struct aq_ring_param_s *aq_ring_param)
545 {
546         u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
547         u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
548
549         hw_atl_reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
550                                                   aq_ring->idx);
551
552         hw_atl_reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
553                                                   aq_ring->idx);
554
555         hw_atl_tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
556
557         hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
558
559         /* Set Tx threshold */
560         hw_atl_tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
561
562         /* Mapping interrupt vector */
563         hw_atl_itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
564         hw_atl_itr_irq_map_en_tx_set(self, true, aq_ring->idx);
565
566         hw_atl_tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
567         hw_atl_tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
568
569         return aq_hw_err_from_flags(self);
570 }
571
572 static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
573                                      struct aq_ring_s *ring,
574                                      unsigned int sw_tail_old)
575 {
576         for (; sw_tail_old != ring->sw_tail;
577                 sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
578                 struct hw_atl_rxd_s *rxd =
579                         (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
580                                                         HW_ATL_A0_RXD_SIZE];
581
582                 struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
583
584                 rxd->buf_addr = buff->pa;
585                 rxd->hdr_addr = 0U;
586         }
587
588         hw_atl_reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
589
590         return aq_hw_err_from_flags(self);
591 }
592
593 static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
594                                             struct aq_ring_s *ring)
595 {
596         int err = 0;
597         unsigned int hw_head = hw_atl_tdm_tx_desc_head_ptr_get(self, ring->idx);
598
599         if (aq_utils_obj_test(&self->flags, AQ_HW_FLAG_ERR_UNPLUG)) {
600                 err = -ENXIO;
601                 goto err_exit;
602         }
603         ring->hw_head = hw_head;
604         err = aq_hw_err_from_flags(self);
605
606 err_exit:
607         return err;
608 }
609
610 static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
611                                         struct aq_ring_s *ring)
612 {
613         struct device *ndev = aq_nic_get_dev(ring->aq_nic);
614
615         for (; ring->hw_head != ring->sw_tail;
616                 ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
617                 struct aq_ring_buff_s *buff = NULL;
618                 struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
619                         &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
620
621                 unsigned int is_err = 1U;
622                 unsigned int is_rx_check_sum_enabled = 0U;
623                 unsigned int pkt_type = 0U;
624
625                 if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
626                         if ((1U << 4) &
627                         hw_atl_reg_rx_dma_desc_status_get(self, ring->idx)) {
628                                 hw_atl_rdm_rx_desc_en_set(self, false, ring->idx);
629                                 hw_atl_rdm_rx_desc_res_set(self, true, ring->idx);
630                                 hw_atl_rdm_rx_desc_res_set(self, false, ring->idx);
631                                 hw_atl_rdm_rx_desc_en_set(self, true, ring->idx);
632                         }
633
634                         if (ring->hw_head ||
635                             (hw_atl_rdm_rx_desc_head_ptr_get(self,
636                                                              ring->idx) < 2U)) {
637                                 break;
638                         } else if (!(rxd_wb->status & 0x1U)) {
639                                 struct hw_atl_rxd_wb_s *rxd_wb1 =
640                                         (struct hw_atl_rxd_wb_s *)
641                                         (&ring->dx_ring[(1U) *
642                                                 HW_ATL_A0_RXD_SIZE]);
643
644                                 if ((rxd_wb1->status & 0x1U)) {
645                                         rxd_wb->pkt_len = 1514U;
646                                         rxd_wb->status = 3U;
647                                 } else {
648                                         break;
649                                 }
650                         }
651                 }
652
653                 buff = &ring->buff_ring[ring->hw_head];
654
655                 if (0x3U != (rxd_wb->status & 0x3U))
656                         rxd_wb->status |= 4;
657
658                 is_err = (0x0000001CU & rxd_wb->status);
659                 is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
660                 pkt_type = 0xFFU & (rxd_wb->type >> 4);
661
662                 if (is_rx_check_sum_enabled) {
663                         if (0x0U == (pkt_type & 0x3U))
664                                 buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
665
666                         if (0x4U == (pkt_type & 0x1CU))
667                                 buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
668                         else if (0x0U == (pkt_type & 0x1CU))
669                                 buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
670
671                         /* Checksum offload workaround for small packets */
672                         if (rxd_wb->pkt_len <= 60) {
673                                 buff->is_ip_cso = 0U;
674                                 buff->is_cso_err = 0U;
675                         }
676                 }
677
678                 is_err &= ~0x18U;
679                 is_err &= ~0x04U;
680
681                 dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
682
683                 if (is_err || rxd_wb->type & 0x1000U) {
684                         /* status error or DMA error */
685                         buff->is_error = 1U;
686                 } else {
687                         if (self->aq_nic_cfg->is_rss) {
688                                 /* last 4 byte */
689                                 u16 rss_type = rxd_wb->type & 0xFU;
690
691                                 if (rss_type && rss_type < 0x8U) {
692                                         buff->is_hash_l4 = (rss_type == 0x4 ||
693                                                         rss_type == 0x5);
694                                         buff->rss_hash = rxd_wb->rss_hash;
695                                 }
696                         }
697
698                         if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
699                                 buff->len = rxd_wb->pkt_len %
700                                         AQ_CFG_RX_FRAME_MAX;
701                                 buff->len = buff->len ?
702                                         buff->len : AQ_CFG_RX_FRAME_MAX;
703                                 buff->next = 0U;
704                                 buff->is_eop = 1U;
705                         } else {
706                                 /* jumbo */
707                                 buff->next = aq_ring_next_dx(ring,
708                                                              ring->hw_head);
709                                 ++ring->stats.rx.jumbo_packets;
710                         }
711                 }
712         }
713
714         return aq_hw_err_from_flags(self);
715 }
716
717 static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
718 {
719         hw_atl_itr_irq_msk_setlsw_set(self, LODWORD(mask) |
720                                (1U << HW_ATL_A0_ERR_INT));
721         return aq_hw_err_from_flags(self);
722 }
723
724 static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
725 {
726         hw_atl_itr_irq_msk_clearlsw_set(self, LODWORD(mask));
727         hw_atl_itr_irq_status_clearlsw_set(self, LODWORD(mask));
728
729         if ((1U << 16) & hw_atl_reg_gen_irq_status_get(self))
730                 atomic_inc(&self->dpc);
731
732         return aq_hw_err_from_flags(self);
733 }
734
735 static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
736 {
737         *mask = hw_atl_itr_irq_statuslsw_get(self);
738         return aq_hw_err_from_flags(self);
739 }
740
741 #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
742
743 static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
744                                           unsigned int packet_filter)
745 {
746         unsigned int i = 0U;
747
748         hw_atl_rpfl2promiscuous_mode_en_set(self,
749                                             IS_FILTER_ENABLED(IFF_PROMISC));
750         hw_atl_rpfl2multicast_flr_en_set(self,
751                                          IS_FILTER_ENABLED(IFF_MULTICAST), 0);
752         hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
753
754         self->aq_nic_cfg->is_mc_list_enabled =
755                         IS_FILTER_ENABLED(IFF_MULTICAST);
756
757         for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
758                 hw_atl_rpfl2_uc_flr_en_set(self,
759                                            (self->aq_nic_cfg->is_mc_list_enabled &&
760                                            (i <= self->aq_nic_cfg->mc_list_count)) ?
761                                            1U : 0U, i);
762
763         return aq_hw_err_from_flags(self);
764 }
765
766 #undef IS_FILTER_ENABLED
767
768 static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
769                                            u8 ar_mac
770                                            [AQ_HW_MULTICAST_ADDRESS_MAX]
771                                            [ETH_ALEN],
772                                            u32 count)
773 {
774         int err = 0;
775
776         if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
777                 err = EBADRQC;
778                 goto err_exit;
779         }
780         for (self->aq_nic_cfg->mc_list_count = 0U;
781                         self->aq_nic_cfg->mc_list_count < count;
782                         ++self->aq_nic_cfg->mc_list_count) {
783                 u32 i = self->aq_nic_cfg->mc_list_count;
784                 u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
785                 u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
786                                         (ar_mac[i][4] << 8) | ar_mac[i][5];
787
788                 hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
789
790                 hw_atl_rpfl2unicast_dest_addresslsw_set(self,
791                                                         l,
792                                                         HW_ATL_A0_MAC_MIN + i);
793
794                 hw_atl_rpfl2unicast_dest_addressmsw_set(self,
795                                                         h,
796                                                         HW_ATL_A0_MAC_MIN + i);
797
798                 hw_atl_rpfl2_uc_flr_en_set(self,
799                                            (self->aq_nic_cfg->is_mc_list_enabled),
800                                            HW_ATL_A0_MAC_MIN + i);
801         }
802
803         err = aq_hw_err_from_flags(self);
804
805 err_exit:
806         return err;
807 }
808
809 static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self)
810 {
811         unsigned int i = 0U;
812         u32 itr_rx;
813
814         if (self->aq_nic_cfg->itr) {
815                 if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) {
816                         u32 itr_ = (self->aq_nic_cfg->itr >> 1);
817
818                         itr_ = min(AQ_CFG_IRQ_MASK, itr_);
819
820                         itr_rx = 0x80000000U | (itr_ << 0x10);
821                 } else  {
822                         u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
823
824                         if (n < self->aq_link_status.mbps) {
825                                 itr_rx = 0U;
826                         } else {
827                                 static unsigned int hw_timers_tbl_[] = {
828                                         0x01CU, /* 10Gbit */
829                                         0x039U, /* 5Gbit */
830                                         0x039U, /* 5Gbit 5GS */
831                                         0x073U, /* 2.5Gbit */
832                                         0x120U, /* 1Gbit */
833                                         0x1FFU, /* 100Mbit */
834                                 };
835
836                                 unsigned int speed_index =
837                                         hw_atl_utils_mbps_2_speed_index(
838                                                 self->aq_link_status.mbps);
839
840                                 itr_rx = 0x80000000U |
841                                         (hw_timers_tbl_[speed_index] << 0x10U);
842                         }
843
844                         aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
845                         aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
846                 }
847         } else {
848                 itr_rx = 0U;
849         }
850
851         for (i = HW_ATL_A0_RINGS_MAX; i--;)
852                 hw_atl_reg_irq_thr_set(self, itr_rx, i);
853
854         return aq_hw_err_from_flags(self);
855 }
856
857 static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
858 {
859         hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
860         return aq_hw_err_from_flags(self);
861 }
862
863 static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
864                                      struct aq_ring_s *ring)
865 {
866         hw_atl_tdm_tx_desc_en_set(self, 0U, ring->idx);
867         return aq_hw_err_from_flags(self);
868 }
869
870 static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
871                                      struct aq_ring_s *ring)
872 {
873         hw_atl_rdm_rx_desc_en_set(self, 0U, ring->idx);
874         return aq_hw_err_from_flags(self);
875 }
876
877 const struct aq_hw_ops hw_atl_ops_a0 = {
878         .hw_set_mac_address   = hw_atl_a0_hw_mac_addr_set,
879         .hw_init              = hw_atl_a0_hw_init,
880         .hw_reset             = hw_atl_a0_hw_reset,
881         .hw_start             = hw_atl_a0_hw_start,
882         .hw_ring_tx_start     = hw_atl_a0_hw_ring_tx_start,
883         .hw_ring_tx_stop      = hw_atl_a0_hw_ring_tx_stop,
884         .hw_ring_rx_start     = hw_atl_a0_hw_ring_rx_start,
885         .hw_ring_rx_stop      = hw_atl_a0_hw_ring_rx_stop,
886         .hw_stop              = hw_atl_a0_hw_stop,
887
888         .hw_ring_tx_xmit         = hw_atl_a0_hw_ring_tx_xmit,
889         .hw_ring_tx_head_update  = hw_atl_a0_hw_ring_tx_head_update,
890
891         .hw_ring_rx_receive      = hw_atl_a0_hw_ring_rx_receive,
892         .hw_ring_rx_fill         = hw_atl_a0_hw_ring_rx_fill,
893
894         .hw_irq_enable           = hw_atl_a0_hw_irq_enable,
895         .hw_irq_disable          = hw_atl_a0_hw_irq_disable,
896         .hw_irq_read             = hw_atl_a0_hw_irq_read,
897
898         .hw_ring_rx_init             = hw_atl_a0_hw_ring_rx_init,
899         .hw_ring_tx_init             = hw_atl_a0_hw_ring_tx_init,
900         .hw_packet_filter_set        = hw_atl_a0_hw_packet_filter_set,
901         .hw_multicast_list_set       = hw_atl_a0_hw_multicast_list_set,
902         .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
903         .hw_rss_set                  = hw_atl_a0_hw_rss_set,
904         .hw_rss_hash_set             = hw_atl_a0_hw_rss_hash_set,
905         .hw_get_regs                 = hw_atl_utils_hw_get_regs,
906         .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
907         .hw_get_fw_version           = hw_atl_utils_get_fw_version,
908 };