qed: Fix SPQ entries not returned to pool in error flows
[sfrench/cifs-2.6.git] / drivers / net / ethernet / qlogic / qed / qed_l2.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <asm/param.h>
36 #include <linux/delay.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/etherdevice.h>
39 #include <linux/interrupt.h>
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/slab.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48 #include <linux/bug.h>
49 #include <linux/vmalloc.h>
50 #include "qed.h"
51 #include <linux/qed/qed_chain.h>
52 #include "qed_cxt.h"
53 #include "qed_dev_api.h"
54 #include <linux/qed/qed_eth_if.h>
55 #include "qed_hsi.h"
56 #include "qed_hw.h"
57 #include "qed_int.h"
58 #include "qed_l2.h"
59 #include "qed_mcp.h"
60 #include "qed_reg_addr.h"
61 #include "qed_sp.h"
62 #include "qed_sriov.h"
63
64
65 #define QED_MAX_SGES_NUM 16
66 #define CRC32_POLY 0x1edc6f41
67
68 struct qed_l2_info {
69         u32 queues;
70         unsigned long **pp_qid_usage;
71
72         /* The lock is meant to synchronize access to the qid usage */
73         struct mutex lock;
74 };
75
76 int qed_l2_alloc(struct qed_hwfn *p_hwfn)
77 {
78         struct qed_l2_info *p_l2_info;
79         unsigned long **pp_qids;
80         u32 i;
81
82         if (!QED_IS_L2_PERSONALITY(p_hwfn))
83                 return 0;
84
85         p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
86         if (!p_l2_info)
87                 return -ENOMEM;
88         p_hwfn->p_l2_info = p_l2_info;
89
90         if (IS_PF(p_hwfn->cdev)) {
91                 p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
92         } else {
93                 u8 rx = 0, tx = 0;
94
95                 qed_vf_get_num_rxqs(p_hwfn, &rx);
96                 qed_vf_get_num_txqs(p_hwfn, &tx);
97
98                 p_l2_info->queues = max_t(u8, rx, tx);
99         }
100
101         pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
102                           GFP_KERNEL);
103         if (!pp_qids)
104                 return -ENOMEM;
105         p_l2_info->pp_qid_usage = pp_qids;
106
107         for (i = 0; i < p_l2_info->queues; i++) {
108                 pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
109                 if (!pp_qids[i])
110                         return -ENOMEM;
111         }
112
113         return 0;
114 }
115
116 void qed_l2_setup(struct qed_hwfn *p_hwfn)
117 {
118         if (!QED_IS_L2_PERSONALITY(p_hwfn))
119                 return;
120
121         mutex_init(&p_hwfn->p_l2_info->lock);
122 }
123
124 void qed_l2_free(struct qed_hwfn *p_hwfn)
125 {
126         u32 i;
127
128         if (!QED_IS_L2_PERSONALITY(p_hwfn))
129                 return;
130
131         if (!p_hwfn->p_l2_info)
132                 return;
133
134         if (!p_hwfn->p_l2_info->pp_qid_usage)
135                 goto out_l2_info;
136
137         /* Free until hit first uninitialized entry */
138         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
139                 if (!p_hwfn->p_l2_info->pp_qid_usage[i])
140                         break;
141                 kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
142         }
143
144         kfree(p_hwfn->p_l2_info->pp_qid_usage);
145
146 out_l2_info:
147         kfree(p_hwfn->p_l2_info);
148         p_hwfn->p_l2_info = NULL;
149 }
150
151 static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
152                                         struct qed_queue_cid *p_cid)
153 {
154         struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
155         u16 queue_id = p_cid->rel.queue_id;
156         bool b_rc = true;
157         u8 first;
158
159         mutex_lock(&p_l2_info->lock);
160
161         if (queue_id >= p_l2_info->queues) {
162                 DP_NOTICE(p_hwfn,
163                           "Requested to increase usage for qzone %04x out of %08x\n",
164                           queue_id, p_l2_info->queues);
165                 b_rc = false;
166                 goto out;
167         }
168
169         first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
170                                         MAX_QUEUES_PER_QZONE);
171         if (first >= MAX_QUEUES_PER_QZONE) {
172                 b_rc = false;
173                 goto out;
174         }
175
176         __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
177         p_cid->qid_usage_idx = first;
178
179 out:
180         mutex_unlock(&p_l2_info->lock);
181         return b_rc;
182 }
183
184 static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
185                                         struct qed_queue_cid *p_cid)
186 {
187         mutex_lock(&p_hwfn->p_l2_info->lock);
188
189         clear_bit(p_cid->qid_usage_idx,
190                   p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
191
192         mutex_unlock(&p_hwfn->p_l2_info->lock);
193 }
194
195 void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
196                                struct qed_queue_cid *p_cid)
197 {
198         bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
199
200         if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
201                 _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
202
203         /* For PF's VFs we maintain the index inside queue-zone in IOV */
204         if (p_cid->vfid == QED_QUEUE_CID_SELF)
205                 qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
206
207         vfree(p_cid);
208 }
209
210 /* The internal is only meant to be directly called by PFs initializeing CIDs
211  * for their VFs.
212  */
213 static struct qed_queue_cid *
214 _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
215                       u16 opaque_fid,
216                       u32 cid,
217                       struct qed_queue_start_common_params *p_params,
218                       bool b_is_rx,
219                       struct qed_queue_cid_vf_params *p_vf_params)
220 {
221         struct qed_queue_cid *p_cid;
222         int rc;
223
224         p_cid = vzalloc(sizeof(*p_cid));
225         if (!p_cid)
226                 return NULL;
227
228         p_cid->opaque_fid = opaque_fid;
229         p_cid->cid = cid;
230         p_cid->p_owner = p_hwfn;
231
232         /* Fill in parameters */
233         p_cid->rel.vport_id = p_params->vport_id;
234         p_cid->rel.queue_id = p_params->queue_id;
235         p_cid->rel.stats_id = p_params->stats_id;
236         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
237         p_cid->b_is_rx = b_is_rx;
238         p_cid->sb_idx = p_params->sb_idx;
239
240         /* Fill-in bits related to VFs' queues if information was provided */
241         if (p_vf_params) {
242                 p_cid->vfid = p_vf_params->vfid;
243                 p_cid->vf_qid = p_vf_params->vf_qid;
244                 p_cid->vf_legacy = p_vf_params->vf_legacy;
245         } else {
246                 p_cid->vfid = QED_QUEUE_CID_SELF;
247         }
248
249         /* Don't try calculating the absolute indices for VFs */
250         if (IS_VF(p_hwfn->cdev)) {
251                 p_cid->abs = p_cid->rel;
252                 goto out;
253         }
254
255         /* Calculate the engine-absolute indices of the resources.
256          * This would guarantee they're valid later on.
257          * In some cases [SBs] we already have the right values.
258          */
259         rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
260         if (rc)
261                 goto fail;
262
263         rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
264         if (rc)
265                 goto fail;
266
267         /* In case of a PF configuring its VF's queues, the stats-id is already
268          * absolute [since there's a single index that's suitable per-VF].
269          */
270         if (p_cid->vfid == QED_QUEUE_CID_SELF) {
271                 rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
272                                   &p_cid->abs.stats_id);
273                 if (rc)
274                         goto fail;
275         } else {
276                 p_cid->abs.stats_id = p_cid->rel.stats_id;
277         }
278
279 out:
280         /* VF-images have provided the qid_usage_idx on their own.
281          * Otherwise, we need to allocate a unique one.
282          */
283         if (!p_vf_params) {
284                 if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
285                         goto fail;
286         } else {
287                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
288         }
289
290         DP_VERBOSE(p_hwfn,
291                    QED_MSG_SP,
292                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
293                    p_cid->opaque_fid,
294                    p_cid->cid,
295                    p_cid->rel.vport_id,
296                    p_cid->abs.vport_id,
297                    p_cid->rel.queue_id,
298                    p_cid->qid_usage_idx,
299                    p_cid->abs.queue_id,
300                    p_cid->rel.stats_id,
301                    p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
302
303         return p_cid;
304
305 fail:
306         vfree(p_cid);
307         return NULL;
308 }
309
310 struct qed_queue_cid *
311 qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
312                      u16 opaque_fid,
313                      struct qed_queue_start_common_params *p_params,
314                      bool b_is_rx,
315                      struct qed_queue_cid_vf_params *p_vf_params)
316 {
317         struct qed_queue_cid *p_cid;
318         u8 vfid = QED_CXT_PF_CID;
319         bool b_legacy_vf = false;
320         u32 cid = 0;
321
322         /* In case of legacy VFs, The CID can be derived from the additional
323          * VF parameters - the VF assumes queue X uses CID X, so we can simply
324          * use the vf_qid for this purpose as well.
325          */
326         if (p_vf_params) {
327                 vfid = p_vf_params->vfid;
328
329                 if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
330                         b_legacy_vf = true;
331                         cid = p_vf_params->vf_qid;
332                 }
333         }
334
335         /* Get a unique firmware CID for this queue, in case it's a PF.
336          * VF's don't need a CID as the queue configuration will be done
337          * by PF.
338          */
339         if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
340                 if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
341                                          &cid, vfid)) {
342                         DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
343                         return NULL;
344                 }
345         }
346
347         p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
348                                       p_params, b_is_rx, p_vf_params);
349         if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
350                 _qed_cxt_release_cid(p_hwfn, cid, vfid);
351
352         return p_cid;
353 }
354
355 static struct qed_queue_cid *
356 qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
357                         u16 opaque_fid,
358                         bool b_is_rx,
359                         struct qed_queue_start_common_params *p_params)
360 {
361         return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
362                                     NULL);
363 }
364
365 int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
366                            struct qed_sp_vport_start_params *p_params)
367 {
368         struct vport_start_ramrod_data *p_ramrod = NULL;
369         struct qed_spq_entry *p_ent =  NULL;
370         struct qed_sp_init_data init_data;
371         u8 abs_vport_id = 0;
372         int rc = -EINVAL;
373         u16 rx_mode = 0;
374
375         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
376         if (rc)
377                 return rc;
378
379         memset(&init_data, 0, sizeof(init_data));
380         init_data.cid = qed_spq_get_cid(p_hwfn);
381         init_data.opaque_fid = p_params->opaque_fid;
382         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
383
384         rc = qed_sp_init_request(p_hwfn, &p_ent,
385                                  ETH_RAMROD_VPORT_START,
386                                  PROTOCOLID_ETH, &init_data);
387         if (rc)
388                 return rc;
389
390         p_ramrod                = &p_ent->ramrod.vport_start;
391         p_ramrod->vport_id      = abs_vport_id;
392
393         p_ramrod->mtu                   = cpu_to_le16(p_params->mtu);
394         p_ramrod->handle_ptp_pkts       = p_params->handle_ptp_pkts;
395         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
396         p_ramrod->drop_ttl0_en          = p_params->drop_ttl0;
397         p_ramrod->untagged              = p_params->only_untagged;
398
399         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
400         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
401
402         p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
403
404         /* TPA related fields */
405         memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param));
406
407         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
408
409         switch (p_params->tpa_mode) {
410         case QED_TPA_MODE_GRO:
411                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
412                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
413                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
414                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
415                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
416                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
417                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
418                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
419                 break;
420         default:
421                 break;
422         }
423
424         p_ramrod->tx_switching_en = p_params->tx_switching;
425
426         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
427         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
428
429         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
430         p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
431                                                   p_params->concrete_fid);
432
433         return qed_spq_post(p_hwfn, p_ent, NULL);
434 }
435
436 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
437                               struct qed_sp_vport_start_params *p_params)
438 {
439         if (IS_VF(p_hwfn->cdev)) {
440                 return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
441                                              p_params->mtu,
442                                              p_params->remove_inner_vlan,
443                                              p_params->tpa_mode,
444                                              p_params->max_buffers_per_cqe,
445                                              p_params->only_untagged);
446         }
447
448         return qed_sp_eth_vport_start(p_hwfn, p_params);
449 }
450
451 static int
452 qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
453                         struct vport_update_ramrod_data *p_ramrod,
454                         struct qed_rss_params *p_rss)
455 {
456         struct eth_vport_rss_config *p_config;
457         u16 capabilities = 0;
458         int i, table_size;
459         int rc = 0;
460
461         if (!p_rss) {
462                 p_ramrod->common.update_rss_flg = 0;
463                 return rc;
464         }
465         p_config = &p_ramrod->rss_config;
466
467         BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
468
469         rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
470         if (rc)
471                 return rc;
472
473         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
474         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
475         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
476         p_config->update_rss_key = p_rss->update_rss_key;
477
478         p_config->rss_mode = p_rss->rss_enable ?
479                              ETH_VPORT_RSS_MODE_REGULAR :
480                              ETH_VPORT_RSS_MODE_DISABLED;
481
482         SET_FIELD(capabilities,
483                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
484                   !!(p_rss->rss_caps & QED_RSS_IPV4));
485         SET_FIELD(capabilities,
486                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
487                   !!(p_rss->rss_caps & QED_RSS_IPV6));
488         SET_FIELD(capabilities,
489                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
490                   !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
491         SET_FIELD(capabilities,
492                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
493                   !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
494         SET_FIELD(capabilities,
495                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
496                   !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
497         SET_FIELD(capabilities,
498                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
499                   !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
500         p_config->tbl_size = p_rss->rss_table_size_log;
501
502         p_config->capabilities = cpu_to_le16(capabilities);
503
504         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
505                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
506                    p_ramrod->common.update_rss_flg,
507                    p_config->rss_mode,
508                    p_config->update_rss_capabilities,
509                    p_config->capabilities,
510                    p_config->update_rss_ind_table, p_config->update_rss_key);
511
512         table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
513                            1 << p_config->tbl_size);
514         for (i = 0; i < table_size; i++) {
515                 struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
516
517                 if (!p_queue)
518                         return -EINVAL;
519
520                 p_config->indirection_table[i] =
521                     cpu_to_le16(p_queue->abs.queue_id);
522         }
523
524         DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
525                    "Configured RSS indirection table [%d entries]:\n",
526                    table_size);
527         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
528                 DP_VERBOSE(p_hwfn,
529                            NETIF_MSG_IFUP,
530                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
531                            le16_to_cpu(p_config->indirection_table[i]),
532                            le16_to_cpu(p_config->indirection_table[i + 1]),
533                            le16_to_cpu(p_config->indirection_table[i + 2]),
534                            le16_to_cpu(p_config->indirection_table[i + 3]),
535                            le16_to_cpu(p_config->indirection_table[i + 4]),
536                            le16_to_cpu(p_config->indirection_table[i + 5]),
537                            le16_to_cpu(p_config->indirection_table[i + 6]),
538                            le16_to_cpu(p_config->indirection_table[i + 7]),
539                            le16_to_cpu(p_config->indirection_table[i + 8]),
540                            le16_to_cpu(p_config->indirection_table[i + 9]),
541                            le16_to_cpu(p_config->indirection_table[i + 10]),
542                            le16_to_cpu(p_config->indirection_table[i + 11]),
543                            le16_to_cpu(p_config->indirection_table[i + 12]),
544                            le16_to_cpu(p_config->indirection_table[i + 13]),
545                            le16_to_cpu(p_config->indirection_table[i + 14]),
546                            le16_to_cpu(p_config->indirection_table[i + 15]));
547         }
548
549         for (i = 0; i < 10; i++)
550                 p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
551
552         return rc;
553 }
554
555 static void
556 qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
557                           struct vport_update_ramrod_data *p_ramrod,
558                           struct qed_filter_accept_flags accept_flags)
559 {
560         p_ramrod->common.update_rx_mode_flg =
561                 accept_flags.update_rx_mode_config;
562
563         p_ramrod->common.update_tx_mode_flg =
564                 accept_flags.update_tx_mode_config;
565
566         /* Set Rx mode accept flags */
567         if (p_ramrod->common.update_rx_mode_flg) {
568                 u8 accept_filter = accept_flags.rx_accept_filter;
569                 u16 state = 0;
570
571                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
572                           !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
573                             !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
574
575                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
576                           !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
577
578                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
579                           !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
580                             !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
581
582                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
583                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
584                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
585
586                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
587                           !!(accept_filter & QED_ACCEPT_BCAST));
588
589                 SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
590                           !!(accept_filter & QED_ACCEPT_ANY_VNI));
591
592                 p_ramrod->rx_mode.state = cpu_to_le16(state);
593                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
594                            "p_ramrod->rx_mode.state = 0x%x\n", state);
595         }
596
597         /* Set Tx mode accept flags */
598         if (p_ramrod->common.update_tx_mode_flg) {
599                 u8 accept_filter = accept_flags.tx_accept_filter;
600                 u16 state = 0;
601
602                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
603                           !!(accept_filter & QED_ACCEPT_NONE));
604
605                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
606                           !!(accept_filter & QED_ACCEPT_NONE));
607
608                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
609                           (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
610                            !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
611
612                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
613                           !!(accept_filter & QED_ACCEPT_BCAST));
614
615                 p_ramrod->tx_mode.state = cpu_to_le16(state);
616                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
617                            "p_ramrod->tx_mode.state = 0x%x\n", state);
618         }
619 }
620
621 static void
622 qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
623                             struct vport_update_ramrod_data *p_ramrod,
624                             struct qed_sge_tpa_params *p_params)
625 {
626         struct eth_vport_tpa_param *p_tpa;
627
628         if (!p_params) {
629                 p_ramrod->common.update_tpa_param_flg = 0;
630                 p_ramrod->common.update_tpa_en_flg = 0;
631                 p_ramrod->common.update_tpa_param_flg = 0;
632                 return;
633         }
634
635         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
636         p_tpa = &p_ramrod->tpa_param;
637         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
638         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
639         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
640         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
641
642         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
643         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
644         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
645         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
646         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
647         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
648         p_tpa->tpa_max_size = p_params->tpa_max_size;
649         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
650         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
651 }
652
653 static void
654 qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
655                         struct vport_update_ramrod_data *p_ramrod,
656                         struct qed_sp_vport_update_params *p_params)
657 {
658         int i;
659
660         memset(&p_ramrod->approx_mcast.bins, 0,
661                sizeof(p_ramrod->approx_mcast.bins));
662
663         if (!p_params->update_approx_mcast_flg)
664                 return;
665
666         p_ramrod->common.update_approx_mcast_flg = 1;
667         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
668                 u32 *p_bins = p_params->bins;
669
670                 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
671         }
672 }
673
674 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
675                         struct qed_sp_vport_update_params *p_params,
676                         enum spq_mode comp_mode,
677                         struct qed_spq_comp_cb *p_comp_data)
678 {
679         struct qed_rss_params *p_rss_params = p_params->rss_params;
680         struct vport_update_ramrod_data_cmn *p_cmn;
681         struct qed_sp_init_data init_data;
682         struct vport_update_ramrod_data *p_ramrod = NULL;
683         struct qed_spq_entry *p_ent = NULL;
684         u8 abs_vport_id = 0, val;
685         int rc = -EINVAL;
686
687         if (IS_VF(p_hwfn->cdev)) {
688                 rc = qed_vf_pf_vport_update(p_hwfn, p_params);
689                 return rc;
690         }
691
692         rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
693         if (rc)
694                 return rc;
695
696         memset(&init_data, 0, sizeof(init_data));
697         init_data.cid = qed_spq_get_cid(p_hwfn);
698         init_data.opaque_fid = p_params->opaque_fid;
699         init_data.comp_mode = comp_mode;
700         init_data.p_comp_data = p_comp_data;
701
702         rc = qed_sp_init_request(p_hwfn, &p_ent,
703                                  ETH_RAMROD_VPORT_UPDATE,
704                                  PROTOCOLID_ETH, &init_data);
705         if (rc)
706                 return rc;
707
708         /* Copy input params to ramrod according to FW struct */
709         p_ramrod = &p_ent->ramrod.vport_update;
710         p_cmn = &p_ramrod->common;
711
712         p_cmn->vport_id = abs_vport_id;
713         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
714         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
715         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
716         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
717         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
718         val = p_params->update_accept_any_vlan_flg;
719         p_cmn->update_accept_any_vlan_flg = val;
720
721         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
722         val = p_params->update_inner_vlan_removal_flg;
723         p_cmn->update_inner_vlan_removal_en_flg = val;
724
725         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
726         val = p_params->update_default_vlan_enable_flg;
727         p_cmn->update_default_vlan_en_flg = val;
728
729         p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
730         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
731
732         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
733
734         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
735         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
736
737         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
738         val = p_params->update_anti_spoofing_en_flg;
739         p_ramrod->common.update_anti_spoofing_en_flg = val;
740
741         rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
742         if (rc) {
743                 qed_sp_destroy_request(p_hwfn, p_ent);
744                 return rc;
745         }
746
747         /* Update mcast bins for VFs, PF doesn't use this functionality */
748         qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
749
750         qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
751         qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
752         return qed_spq_post(p_hwfn, p_ent, NULL);
753 }
754
755 int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
756 {
757         struct vport_stop_ramrod_data *p_ramrod;
758         struct qed_sp_init_data init_data;
759         struct qed_spq_entry *p_ent;
760         u8 abs_vport_id = 0;
761         int rc;
762
763         if (IS_VF(p_hwfn->cdev))
764                 return qed_vf_pf_vport_stop(p_hwfn);
765
766         rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
767         if (rc)
768                 return rc;
769
770         memset(&init_data, 0, sizeof(init_data));
771         init_data.cid = qed_spq_get_cid(p_hwfn);
772         init_data.opaque_fid = opaque_fid;
773         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
774
775         rc = qed_sp_init_request(p_hwfn, &p_ent,
776                                  ETH_RAMROD_VPORT_STOP,
777                                  PROTOCOLID_ETH, &init_data);
778         if (rc)
779                 return rc;
780
781         p_ramrod = &p_ent->ramrod.vport_stop;
782         p_ramrod->vport_id = abs_vport_id;
783
784         return qed_spq_post(p_hwfn, p_ent, NULL);
785 }
786
787 static int
788 qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
789                        struct qed_filter_accept_flags *p_accept_flags)
790 {
791         struct qed_sp_vport_update_params s_params;
792
793         memset(&s_params, 0, sizeof(s_params));
794         memcpy(&s_params.accept_flags, p_accept_flags,
795                sizeof(struct qed_filter_accept_flags));
796
797         return qed_vf_pf_vport_update(p_hwfn, &s_params);
798 }
799
800 static int qed_filter_accept_cmd(struct qed_dev *cdev,
801                                  u8 vport,
802                                  struct qed_filter_accept_flags accept_flags,
803                                  u8 update_accept_any_vlan,
804                                  u8 accept_any_vlan,
805                                  enum spq_mode comp_mode,
806                                  struct qed_spq_comp_cb *p_comp_data)
807 {
808         struct qed_sp_vport_update_params vport_update_params;
809         int i, rc;
810
811         /* Prepare and send the vport rx_mode change */
812         memset(&vport_update_params, 0, sizeof(vport_update_params));
813         vport_update_params.vport_id = vport;
814         vport_update_params.accept_flags = accept_flags;
815         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
816         vport_update_params.accept_any_vlan = accept_any_vlan;
817
818         for_each_hwfn(cdev, i) {
819                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
820
821                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
822
823                 if (IS_VF(cdev)) {
824                         rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
825                         if (rc)
826                                 return rc;
827                         continue;
828                 }
829
830                 rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
831                                          comp_mode, p_comp_data);
832                 if (rc) {
833                         DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
834                         return rc;
835                 }
836
837                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
838                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
839                            accept_flags.rx_accept_filter,
840                            accept_flags.tx_accept_filter);
841                 if (update_accept_any_vlan)
842                         DP_VERBOSE(p_hwfn, QED_MSG_SP,
843                                    "accept_any_vlan=%d configured\n",
844                                    accept_any_vlan);
845         }
846
847         return 0;
848 }
849
850 int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
851                              struct qed_queue_cid *p_cid,
852                              u16 bd_max_bytes,
853                              dma_addr_t bd_chain_phys_addr,
854                              dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
855 {
856         struct rx_queue_start_ramrod_data *p_ramrod = NULL;
857         struct qed_spq_entry *p_ent = NULL;
858         struct qed_sp_init_data init_data;
859         int rc = -EINVAL;
860
861         DP_VERBOSE(p_hwfn, QED_MSG_SP,
862                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
863                    p_cid->opaque_fid, p_cid->cid,
864                    p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
865
866         /* Get SPQ entry */
867         memset(&init_data, 0, sizeof(init_data));
868         init_data.cid = p_cid->cid;
869         init_data.opaque_fid = p_cid->opaque_fid;
870         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
871
872         rc = qed_sp_init_request(p_hwfn, &p_ent,
873                                  ETH_RAMROD_RX_QUEUE_START,
874                                  PROTOCOLID_ETH, &init_data);
875         if (rc)
876                 return rc;
877
878         p_ramrod = &p_ent->ramrod.rx_queue_start;
879
880         p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
881         p_ramrod->sb_index = p_cid->sb_idx;
882         p_ramrod->vport_id = p_cid->abs.vport_id;
883         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
884         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
885         p_ramrod->complete_cqe_flg = 0;
886         p_ramrod->complete_event_flg = 1;
887
888         p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
889         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
890
891         p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
892         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
893
894         if (p_cid->vfid != QED_QUEUE_CID_SELF) {
895                 bool b_legacy_vf = !!(p_cid->vf_legacy &
896                                       QED_QCID_LEGACY_VF_RX_PROD);
897
898                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
899                 DP_VERBOSE(p_hwfn, QED_MSG_SP,
900                            "Queue%s is meant for VF rxq[%02x]\n",
901                            b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
902                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
903         }
904
905         return qed_spq_post(p_hwfn, p_ent, NULL);
906 }
907
908 static int
909 qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
910                           struct qed_queue_cid *p_cid,
911                           u16 bd_max_bytes,
912                           dma_addr_t bd_chain_phys_addr,
913                           dma_addr_t cqe_pbl_addr,
914                           u16 cqe_pbl_size, void __iomem **pp_prod)
915 {
916         u32 init_prod_val = 0;
917
918         *pp_prod = p_hwfn->regview +
919                    GTT_BAR0_MAP_REG_MSDM_RAM +
920                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
921
922         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
923         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
924                           (u32 *)(&init_prod_val));
925
926         return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
927                                         bd_max_bytes,
928                                         bd_chain_phys_addr,
929                                         cqe_pbl_addr, cqe_pbl_size);
930 }
931
932 static int
933 qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
934                        u16 opaque_fid,
935                        struct qed_queue_start_common_params *p_params,
936                        u16 bd_max_bytes,
937                        dma_addr_t bd_chain_phys_addr,
938                        dma_addr_t cqe_pbl_addr,
939                        u16 cqe_pbl_size,
940                        struct qed_rxq_start_ret_params *p_ret_params)
941 {
942         struct qed_queue_cid *p_cid;
943         int rc;
944
945         /* Allocate a CID for the queue */
946         p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
947         if (!p_cid)
948                 return -ENOMEM;
949
950         if (IS_PF(p_hwfn->cdev)) {
951                 rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
952                                                bd_max_bytes,
953                                                bd_chain_phys_addr,
954                                                cqe_pbl_addr, cqe_pbl_size,
955                                                &p_ret_params->p_prod);
956         } else {
957                 rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
958                                          bd_max_bytes,
959                                          bd_chain_phys_addr,
960                                          cqe_pbl_addr,
961                                          cqe_pbl_size, &p_ret_params->p_prod);
962         }
963
964         /* Provide the caller with a reference to as handler */
965         if (rc)
966                 qed_eth_queue_cid_release(p_hwfn, p_cid);
967         else
968                 p_ret_params->p_handle = (void *)p_cid;
969
970         return rc;
971 }
972
973 int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
974                                 void **pp_rxq_handles,
975                                 u8 num_rxqs,
976                                 u8 complete_cqe_flg,
977                                 u8 complete_event_flg,
978                                 enum spq_mode comp_mode,
979                                 struct qed_spq_comp_cb *p_comp_data)
980 {
981         struct rx_queue_update_ramrod_data *p_ramrod = NULL;
982         struct qed_spq_entry *p_ent = NULL;
983         struct qed_sp_init_data init_data;
984         struct qed_queue_cid *p_cid;
985         int rc = -EINVAL;
986         u8 i;
987
988         memset(&init_data, 0, sizeof(init_data));
989         init_data.comp_mode = comp_mode;
990         init_data.p_comp_data = p_comp_data;
991
992         for (i = 0; i < num_rxqs; i++) {
993                 p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
994
995                 /* Get SPQ entry */
996                 init_data.cid = p_cid->cid;
997                 init_data.opaque_fid = p_cid->opaque_fid;
998
999                 rc = qed_sp_init_request(p_hwfn, &p_ent,
1000                                          ETH_RAMROD_RX_QUEUE_UPDATE,
1001                                          PROTOCOLID_ETH, &init_data);
1002                 if (rc)
1003                         return rc;
1004
1005                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1006                 p_ramrod->vport_id = p_cid->abs.vport_id;
1007
1008                 p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1009                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1010                 p_ramrod->complete_event_flg = complete_event_flg;
1011
1012                 rc = qed_spq_post(p_hwfn, p_ent, NULL);
1013                 if (rc)
1014                         return rc;
1015         }
1016
1017         return rc;
1018 }
1019
1020 static int
1021 qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
1022                          struct qed_queue_cid *p_cid,
1023                          bool b_eq_completion_only, bool b_cqe_completion)
1024 {
1025         struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
1026         struct qed_spq_entry *p_ent = NULL;
1027         struct qed_sp_init_data init_data;
1028         int rc;
1029
1030         memset(&init_data, 0, sizeof(init_data));
1031         init_data.cid = p_cid->cid;
1032         init_data.opaque_fid = p_cid->opaque_fid;
1033         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1034
1035         rc = qed_sp_init_request(p_hwfn, &p_ent,
1036                                  ETH_RAMROD_RX_QUEUE_STOP,
1037                                  PROTOCOLID_ETH, &init_data);
1038         if (rc)
1039                 return rc;
1040
1041         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1042         p_ramrod->vport_id = p_cid->abs.vport_id;
1043         p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
1044
1045         /* Cleaning the queue requires the completion to arrive there.
1046          * In addition, VFs require the answer to come as eqe to PF.
1047          */
1048         p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
1049                                       !b_eq_completion_only) ||
1050                                      b_cqe_completion;
1051         p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
1052                                        b_eq_completion_only;
1053
1054         return qed_spq_post(p_hwfn, p_ent, NULL);
1055 }
1056
1057 int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
1058                           void *p_rxq,
1059                           bool eq_completion_only, bool cqe_completion)
1060 {
1061         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
1062         int rc = -EINVAL;
1063
1064         if (IS_PF(p_hwfn->cdev))
1065                 rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1066                                               eq_completion_only,
1067                                               cqe_completion);
1068         else
1069                 rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1070
1071         if (!rc)
1072                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1073         return rc;
1074 }
1075
1076 int
1077 qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
1078                          struct qed_queue_cid *p_cid,
1079                          dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
1080 {
1081         struct tx_queue_start_ramrod_data *p_ramrod = NULL;
1082         struct qed_spq_entry *p_ent = NULL;
1083         struct qed_sp_init_data init_data;
1084         int rc = -EINVAL;
1085
1086         /* Get SPQ entry */
1087         memset(&init_data, 0, sizeof(init_data));
1088         init_data.cid = p_cid->cid;
1089         init_data.opaque_fid = p_cid->opaque_fid;
1090         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1091
1092         rc = qed_sp_init_request(p_hwfn, &p_ent,
1093                                  ETH_RAMROD_TX_QUEUE_START,
1094                                  PROTOCOLID_ETH, &init_data);
1095         if (rc)
1096                 return rc;
1097
1098         p_ramrod = &p_ent->ramrod.tx_queue_start;
1099         p_ramrod->vport_id = p_cid->abs.vport_id;
1100
1101         p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
1102         p_ramrod->sb_index = p_cid->sb_idx;
1103         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1104
1105         p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
1106         p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
1107
1108         p_ramrod->pbl_size = cpu_to_le16(pbl_size);
1109         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1110
1111         p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
1112
1113         return qed_spq_post(p_hwfn, p_ent, NULL);
1114 }
1115
1116 static int
1117 qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
1118                           struct qed_queue_cid *p_cid,
1119                           u8 tc,
1120                           dma_addr_t pbl_addr,
1121                           u16 pbl_size, void __iomem **pp_doorbell)
1122 {
1123         int rc;
1124
1125
1126         rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
1127                                       pbl_addr, pbl_size,
1128                                       qed_get_cm_pq_idx_mcos(p_hwfn, tc));
1129         if (rc)
1130                 return rc;
1131
1132         /* Provide the caller with the necessary return values */
1133         *pp_doorbell = p_hwfn->doorbells +
1134                        qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
1135
1136         return 0;
1137 }
1138
1139 static int
1140 qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
1141                        u16 opaque_fid,
1142                        struct qed_queue_start_common_params *p_params,
1143                        u8 tc,
1144                        dma_addr_t pbl_addr,
1145                        u16 pbl_size,
1146                        struct qed_txq_start_ret_params *p_ret_params)
1147 {
1148         struct qed_queue_cid *p_cid;
1149         int rc;
1150
1151         p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1152         if (!p_cid)
1153                 return -EINVAL;
1154
1155         if (IS_PF(p_hwfn->cdev))
1156                 rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1157                                                pbl_addr, pbl_size,
1158                                                &p_ret_params->p_doorbell);
1159         else
1160                 rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
1161                                          pbl_addr, pbl_size,
1162                                          &p_ret_params->p_doorbell);
1163
1164         if (rc)
1165                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1166         else
1167                 p_ret_params->p_handle = (void *)p_cid;
1168
1169         return rc;
1170 }
1171
1172 static int
1173 qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
1174 {
1175         struct qed_spq_entry *p_ent = NULL;
1176         struct qed_sp_init_data init_data;
1177         int rc;
1178
1179         memset(&init_data, 0, sizeof(init_data));
1180         init_data.cid = p_cid->cid;
1181         init_data.opaque_fid = p_cid->opaque_fid;
1182         init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
1183
1184         rc = qed_sp_init_request(p_hwfn, &p_ent,
1185                                  ETH_RAMROD_TX_QUEUE_STOP,
1186                                  PROTOCOLID_ETH, &init_data);
1187         if (rc)
1188                 return rc;
1189
1190         return qed_spq_post(p_hwfn, p_ent, NULL);
1191 }
1192
1193 int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
1194 {
1195         struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
1196         int rc;
1197
1198         if (IS_PF(p_hwfn->cdev))
1199                 rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1200         else
1201                 rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
1202
1203         if (!rc)
1204                 qed_eth_queue_cid_release(p_hwfn, p_cid);
1205         return rc;
1206 }
1207
1208 static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
1209 {
1210         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1211
1212         switch (opcode) {
1213         case QED_FILTER_ADD:
1214                 action = ETH_FILTER_ACTION_ADD;
1215                 break;
1216         case QED_FILTER_REMOVE:
1217                 action = ETH_FILTER_ACTION_REMOVE;
1218                 break;
1219         case QED_FILTER_FLUSH:
1220                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1221                 break;
1222         default:
1223                 action = MAX_ETH_FILTER_ACTION;
1224         }
1225
1226         return action;
1227 }
1228
1229 static int
1230 qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
1231                         u16 opaque_fid,
1232                         struct qed_filter_ucast *p_filter_cmd,
1233                         struct vport_filter_update_ramrod_data **pp_ramrod,
1234                         struct qed_spq_entry **pp_ent,
1235                         enum spq_mode comp_mode,
1236                         struct qed_spq_comp_cb *p_comp_data)
1237 {
1238         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1239         struct vport_filter_update_ramrod_data *p_ramrod;
1240         struct eth_filter_cmd *p_first_filter;
1241         struct eth_filter_cmd *p_second_filter;
1242         struct qed_sp_init_data init_data;
1243         enum eth_filter_action action;
1244         int rc;
1245
1246         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1247                           &vport_to_remove_from);
1248         if (rc)
1249                 return rc;
1250
1251         rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1252                           &vport_to_add_to);
1253         if (rc)
1254                 return rc;
1255
1256         /* Get SPQ entry */
1257         memset(&init_data, 0, sizeof(init_data));
1258         init_data.cid = qed_spq_get_cid(p_hwfn);
1259         init_data.opaque_fid = opaque_fid;
1260         init_data.comp_mode = comp_mode;
1261         init_data.p_comp_data = p_comp_data;
1262
1263         rc = qed_sp_init_request(p_hwfn, pp_ent,
1264                                  ETH_RAMROD_FILTERS_UPDATE,
1265                                  PROTOCOLID_ETH, &init_data);
1266         if (rc)
1267                 return rc;
1268
1269         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1270         p_ramrod = *pp_ramrod;
1271         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1272         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1273
1274         switch (p_filter_cmd->opcode) {
1275         case QED_FILTER_REPLACE:
1276         case QED_FILTER_MOVE:
1277                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
1278         default:
1279                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
1280         }
1281
1282         p_first_filter  = &p_ramrod->filter_cmds[0];
1283         p_second_filter = &p_ramrod->filter_cmds[1];
1284
1285         switch (p_filter_cmd->type) {
1286         case QED_FILTER_MAC:
1287                 p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
1288         case QED_FILTER_VLAN:
1289                 p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
1290         case QED_FILTER_MAC_VLAN:
1291                 p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
1292         case QED_FILTER_INNER_MAC:
1293                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
1294         case QED_FILTER_INNER_VLAN:
1295                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
1296         case QED_FILTER_INNER_PAIR:
1297                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
1298         case QED_FILTER_INNER_MAC_VNI_PAIR:
1299                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1300                 break;
1301         case QED_FILTER_MAC_VNI_PAIR:
1302                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
1303         case QED_FILTER_VNI:
1304                 p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
1305         }
1306
1307         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1308             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1309             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1310             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1311             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1312             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
1313                 qed_set_fw_mac_addr(&p_first_filter->mac_msb,
1314                                     &p_first_filter->mac_mid,
1315                                     &p_first_filter->mac_lsb,
1316                                     (u8 *)p_filter_cmd->mac);
1317         }
1318
1319         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1320             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1321             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1322             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1323                 p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
1324
1325         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1326             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1327             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1328                 p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
1329
1330         if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
1331                 p_second_filter->type = p_first_filter->type;
1332                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1333                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1334                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1335                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1336                 p_second_filter->vni = p_first_filter->vni;
1337
1338                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1339
1340                 p_first_filter->vport_id = vport_to_remove_from;
1341
1342                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1343                 p_second_filter->vport_id = vport_to_add_to;
1344         } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
1345                 p_first_filter->vport_id = vport_to_add_to;
1346                 memcpy(p_second_filter, p_first_filter,
1347                        sizeof(*p_second_filter));
1348                 p_first_filter->action  = ETH_FILTER_ACTION_REMOVE_ALL;
1349                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1350         } else {
1351                 action = qed_filter_action(p_filter_cmd->opcode);
1352
1353                 if (action == MAX_ETH_FILTER_ACTION) {
1354                         DP_NOTICE(p_hwfn,
1355                                   "%d is not supported yet\n",
1356                                   p_filter_cmd->opcode);
1357                         qed_sp_destroy_request(p_hwfn, *pp_ent);
1358                         return -EINVAL;
1359                 }
1360
1361                 p_first_filter->action = action;
1362                 p_first_filter->vport_id = (p_filter_cmd->opcode ==
1363                                             QED_FILTER_REMOVE) ?
1364                                            vport_to_remove_from :
1365                                            vport_to_add_to;
1366         }
1367
1368         return 0;
1369 }
1370
1371 int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
1372                             u16 opaque_fid,
1373                             struct qed_filter_ucast *p_filter_cmd,
1374                             enum spq_mode comp_mode,
1375                             struct qed_spq_comp_cb *p_comp_data)
1376 {
1377         struct vport_filter_update_ramrod_data  *p_ramrod       = NULL;
1378         struct qed_spq_entry                    *p_ent          = NULL;
1379         struct eth_filter_cmd_header            *p_header;
1380         int                                     rc;
1381
1382         rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1383                                      &p_ramrod, &p_ent,
1384                                      comp_mode, p_comp_data);
1385         if (rc) {
1386                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1387                 return rc;
1388         }
1389         p_header = &p_ramrod->filter_cmd_hdr;
1390         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1391
1392         rc = qed_spq_post(p_hwfn, p_ent, NULL);
1393         if (rc) {
1394                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1395                 return rc;
1396         }
1397
1398         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1399                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1400                    (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
1401                    ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
1402                    "REMOVE" :
1403                    ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
1404                     "MOVE" : "REPLACE")),
1405                    (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
1406                    ((p_filter_cmd->type == QED_FILTER_VLAN) ?
1407                     "VLAN" : "MAC & VLAN"),
1408                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1409                    p_filter_cmd->is_rx_filter,
1410                    p_filter_cmd->is_tx_filter);
1411         DP_VERBOSE(p_hwfn, QED_MSG_SP,
1412                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1413                    p_filter_cmd->vport_to_add_to,
1414                    p_filter_cmd->vport_to_remove_from,
1415                    p_filter_cmd->mac[0],
1416                    p_filter_cmd->mac[1],
1417                    p_filter_cmd->mac[2],
1418                    p_filter_cmd->mac[3],
1419                    p_filter_cmd->mac[4],
1420                    p_filter_cmd->mac[5],
1421                    p_filter_cmd->vlan);
1422
1423         return 0;
1424 }
1425
1426 /*******************************************************************************
1427  * Description:
1428  *         Calculates crc 32 on a buffer
1429  *         Note: crc32_length MUST be aligned to 8
1430  * Return:
1431  ******************************************************************************/
1432 static u32 qed_calc_crc32c(u8 *crc32_packet,
1433                            u32 crc32_length, u32 crc32_seed, u8 complement)
1434 {
1435         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1436         u8 msb = 0, current_byte = 0;
1437
1438         if ((!crc32_packet) ||
1439             (crc32_length == 0) ||
1440             ((crc32_length % 8) != 0))
1441                 return crc32_result;
1442         for (byte = 0; byte < crc32_length; byte++) {
1443                 current_byte = crc32_packet[byte];
1444                 for (bit = 0; bit < 8; bit++) {
1445                         msb = (u8)(crc32_result >> 31);
1446                         crc32_result = crc32_result << 1;
1447                         if (msb != (0x1 & (current_byte >> bit))) {
1448                                 crc32_result = crc32_result ^ CRC32_POLY;
1449                                 crc32_result |= 1; /*crc32_result[0] = 1;*/
1450                         }
1451                 }
1452         }
1453         return crc32_result;
1454 }
1455
1456 static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
1457 {
1458         u32 packet_buf[2] = { 0 };
1459
1460         memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
1461         return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1462 }
1463
1464 u8 qed_mcast_bin_from_mac(u8 *mac)
1465 {
1466         u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1467                                 mac, ETH_ALEN);
1468
1469         return crc & 0xff;
1470 }
1471
1472 static int
1473 qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1474                         u16 opaque_fid,
1475                         struct qed_filter_mcast *p_filter_cmd,
1476                         enum spq_mode comp_mode,
1477                         struct qed_spq_comp_cb *p_comp_data)
1478 {
1479         struct vport_update_ramrod_data *p_ramrod = NULL;
1480         u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1481         struct qed_spq_entry *p_ent = NULL;
1482         struct qed_sp_init_data init_data;
1483         u8 abs_vport_id = 0;
1484         int rc, i;
1485
1486         if (p_filter_cmd->opcode == QED_FILTER_ADD)
1487                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1488                                   &abs_vport_id);
1489         else
1490                 rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1491                                   &abs_vport_id);
1492         if (rc)
1493                 return rc;
1494
1495         /* Get SPQ entry */
1496         memset(&init_data, 0, sizeof(init_data));
1497         init_data.cid = qed_spq_get_cid(p_hwfn);
1498         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1499         init_data.comp_mode = comp_mode;
1500         init_data.p_comp_data = p_comp_data;
1501
1502         rc = qed_sp_init_request(p_hwfn, &p_ent,
1503                                  ETH_RAMROD_VPORT_UPDATE,
1504                                  PROTOCOLID_ETH, &init_data);
1505         if (rc) {
1506                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1507                 return rc;
1508         }
1509
1510         p_ramrod = &p_ent->ramrod.vport_update;
1511         p_ramrod->common.update_approx_mcast_flg = 1;
1512
1513         /* explicitly clear out the entire vector */
1514         memset(&p_ramrod->approx_mcast.bins, 0,
1515                sizeof(p_ramrod->approx_mcast.bins));
1516         memset(bins, 0, sizeof(bins));
1517         /* filter ADD op is explicit set op and it removes
1518          *  any existing filters for the vport
1519          */
1520         if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1521                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1522                         u32 bit, nbits;
1523
1524                         bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1525                         nbits = sizeof(u32) * BITS_PER_BYTE;
1526                         bins[bit / nbits] |= 1 << (bit % nbits);
1527                 }
1528
1529                 /* Convert to correct endianity */
1530                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1531                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1532
1533                         p_ramrod_bins = &p_ramrod->approx_mcast;
1534                         p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1535                 }
1536         }
1537
1538         p_ramrod->common.vport_id = abs_vport_id;
1539
1540         return qed_spq_post(p_hwfn, p_ent, NULL);
1541 }
1542
1543 static int qed_filter_mcast_cmd(struct qed_dev *cdev,
1544                                 struct qed_filter_mcast *p_filter_cmd,
1545                                 enum spq_mode comp_mode,
1546                                 struct qed_spq_comp_cb *p_comp_data)
1547 {
1548         int rc = 0;
1549         int i;
1550
1551         /* only ADD and REMOVE operations are supported for multi-cast */
1552         if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
1553              (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
1554             (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
1555                 return -EINVAL;
1556
1557         for_each_hwfn(cdev, i) {
1558                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1559
1560                 u16 opaque_fid;
1561
1562                 if (IS_VF(cdev)) {
1563                         qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1564                         continue;
1565                 }
1566
1567                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1568
1569                 rc = qed_sp_eth_filter_mcast(p_hwfn,
1570                                              opaque_fid,
1571                                              p_filter_cmd,
1572                                              comp_mode, p_comp_data);
1573         }
1574         return rc;
1575 }
1576
1577 static int qed_filter_ucast_cmd(struct qed_dev *cdev,
1578                                 struct qed_filter_ucast *p_filter_cmd,
1579                                 enum spq_mode comp_mode,
1580                                 struct qed_spq_comp_cb *p_comp_data)
1581 {
1582         int rc = 0;
1583         int i;
1584
1585         for_each_hwfn(cdev, i) {
1586                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1587                 u16 opaque_fid;
1588
1589                 if (IS_VF(cdev)) {
1590                         rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1591                         continue;
1592                 }
1593
1594                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1595
1596                 rc = qed_sp_eth_filter_ucast(p_hwfn,
1597                                              opaque_fid,
1598                                              p_filter_cmd,
1599                                              comp_mode, p_comp_data);
1600                 if (rc)
1601                         break;
1602         }
1603
1604         return rc;
1605 }
1606
1607 /* Statistics related code */
1608 static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
1609                                            u32 *p_addr,
1610                                            u32 *p_len, u16 statistics_bin)
1611 {
1612         if (IS_PF(p_hwfn->cdev)) {
1613                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1614                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1615                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1616         } else {
1617                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1618                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1619
1620                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1621                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1622         }
1623 }
1624
1625 static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
1626                                    struct qed_ptt *p_ptt,
1627                                    struct qed_eth_stats *p_stats,
1628                                    u16 statistics_bin)
1629 {
1630         struct eth_pstorm_per_queue_stat pstats;
1631         u32 pstats_addr = 0, pstats_len = 0;
1632
1633         __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1634                                        statistics_bin);
1635
1636         memset(&pstats, 0, sizeof(pstats));
1637         qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1638
1639         p_stats->common.tx_ucast_bytes +=
1640             HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1641         p_stats->common.tx_mcast_bytes +=
1642             HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1643         p_stats->common.tx_bcast_bytes +=
1644             HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1645         p_stats->common.tx_ucast_pkts +=
1646             HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1647         p_stats->common.tx_mcast_pkts +=
1648             HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1649         p_stats->common.tx_bcast_pkts +=
1650             HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1651         p_stats->common.tx_err_drop_pkts +=
1652             HILO_64_REGPAIR(pstats.error_drop_pkts);
1653 }
1654
1655 static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
1656                                    struct qed_ptt *p_ptt,
1657                                    struct qed_eth_stats *p_stats,
1658                                    u16 statistics_bin)
1659 {
1660         struct tstorm_per_port_stat tstats;
1661         u32 tstats_addr, tstats_len;
1662
1663         if (IS_PF(p_hwfn->cdev)) {
1664                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1665                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1666                 tstats_len = sizeof(struct tstorm_per_port_stat);
1667         } else {
1668                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1669                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1670
1671                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1672                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1673         }
1674
1675         memset(&tstats, 0, sizeof(tstats));
1676         qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1677
1678         p_stats->common.mftag_filter_discards +=
1679             HILO_64_REGPAIR(tstats.mftag_filter_discard);
1680         p_stats->common.mac_filter_discards +=
1681             HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1682         p_stats->common.gft_filter_drop +=
1683                 HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
1684 }
1685
1686 static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
1687                                            u32 *p_addr,
1688                                            u32 *p_len, u16 statistics_bin)
1689 {
1690         if (IS_PF(p_hwfn->cdev)) {
1691                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1692                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1693                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1694         } else {
1695                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1696                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1697
1698                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1699                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1700         }
1701 }
1702
1703 static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
1704                                    struct qed_ptt *p_ptt,
1705                                    struct qed_eth_stats *p_stats,
1706                                    u16 statistics_bin)
1707 {
1708         struct eth_ustorm_per_queue_stat ustats;
1709         u32 ustats_addr = 0, ustats_len = 0;
1710
1711         __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1712                                        statistics_bin);
1713
1714         memset(&ustats, 0, sizeof(ustats));
1715         qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1716
1717         p_stats->common.rx_ucast_bytes +=
1718             HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1719         p_stats->common.rx_mcast_bytes +=
1720             HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1721         p_stats->common.rx_bcast_bytes +=
1722             HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1723         p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1724         p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1725         p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1726 }
1727
1728 static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
1729                                            u32 *p_addr,
1730                                            u32 *p_len, u16 statistics_bin)
1731 {
1732         if (IS_PF(p_hwfn->cdev)) {
1733                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1734                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1735                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1736         } else {
1737                 struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
1738                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1739
1740                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1741                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1742         }
1743 }
1744
1745 static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
1746                                    struct qed_ptt *p_ptt,
1747                                    struct qed_eth_stats *p_stats,
1748                                    u16 statistics_bin)
1749 {
1750         struct eth_mstorm_per_queue_stat mstats;
1751         u32 mstats_addr = 0, mstats_len = 0;
1752
1753         __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1754                                        statistics_bin);
1755
1756         memset(&mstats, 0, sizeof(mstats));
1757         qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1758
1759         p_stats->common.no_buff_discards +=
1760             HILO_64_REGPAIR(mstats.no_buff_discard);
1761         p_stats->common.packet_too_big_discard +=
1762             HILO_64_REGPAIR(mstats.packet_too_big_discard);
1763         p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1764         p_stats->common.tpa_coalesced_pkts +=
1765             HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1766         p_stats->common.tpa_coalesced_events +=
1767             HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1768         p_stats->common.tpa_aborts_num +=
1769             HILO_64_REGPAIR(mstats.tpa_aborts_num);
1770         p_stats->common.tpa_coalesced_bytes +=
1771             HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1772 }
1773
1774 static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
1775                                        struct qed_ptt *p_ptt,
1776                                        struct qed_eth_stats *p_stats)
1777 {
1778         struct qed_eth_stats_common *p_common = &p_stats->common;
1779         struct port_stats port_stats;
1780         int j;
1781
1782         memset(&port_stats, 0, sizeof(port_stats));
1783
1784         qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
1785                         p_hwfn->mcp_info->port_addr +
1786                         offsetof(struct public_port, stats),
1787                         sizeof(port_stats));
1788
1789         p_common->rx_64_byte_packets += port_stats.eth.r64;
1790         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1791         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1792         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1793         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1794         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1795         p_common->rx_crc_errors += port_stats.eth.rfcs;
1796         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1797         p_common->rx_pause_frames += port_stats.eth.rxpf;
1798         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1799         p_common->rx_align_errors += port_stats.eth.raln;
1800         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1801         p_common->rx_oversize_packets += port_stats.eth.rovr;
1802         p_common->rx_jabbers += port_stats.eth.rjbr;
1803         p_common->rx_undersize_packets += port_stats.eth.rund;
1804         p_common->rx_fragments += port_stats.eth.rfrg;
1805         p_common->tx_64_byte_packets += port_stats.eth.t64;
1806         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1807         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1808         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1809         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1810         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1811         p_common->tx_pause_frames += port_stats.eth.txpf;
1812         p_common->tx_pfc_frames += port_stats.eth.txpp;
1813         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1814         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1815         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1816         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1817         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1818         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1819         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1820         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1821         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1822         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1823         for (j = 0; j < 8; j++) {
1824                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1825                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1826         }
1827
1828         if (QED_IS_BB(p_hwfn->cdev)) {
1829                 struct qed_eth_stats_bb *p_bb = &p_stats->bb;
1830
1831                 p_bb->rx_1519_to_1522_byte_packets +=
1832                     port_stats.eth.u0.bb0.r1522;
1833                 p_bb->rx_1519_to_2047_byte_packets +=
1834                     port_stats.eth.u0.bb0.r2047;
1835                 p_bb->rx_2048_to_4095_byte_packets +=
1836                     port_stats.eth.u0.bb0.r4095;
1837                 p_bb->rx_4096_to_9216_byte_packets +=
1838                     port_stats.eth.u0.bb0.r9216;
1839                 p_bb->rx_9217_to_16383_byte_packets +=
1840                     port_stats.eth.u0.bb0.r16383;
1841                 p_bb->tx_1519_to_2047_byte_packets +=
1842                     port_stats.eth.u1.bb1.t2047;
1843                 p_bb->tx_2048_to_4095_byte_packets +=
1844                     port_stats.eth.u1.bb1.t4095;
1845                 p_bb->tx_4096_to_9216_byte_packets +=
1846                     port_stats.eth.u1.bb1.t9216;
1847                 p_bb->tx_9217_to_16383_byte_packets +=
1848                     port_stats.eth.u1.bb1.t16383;
1849                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1850                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1851         } else {
1852                 struct qed_eth_stats_ah *p_ah = &p_stats->ah;
1853
1854                 p_ah->rx_1519_to_max_byte_packets +=
1855                     port_stats.eth.u0.ah0.r1519_to_max;
1856                 p_ah->tx_1519_to_max_byte_packets =
1857                     port_stats.eth.u1.ah1.t1519_to_max;
1858         }
1859
1860         p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
1861                                              p_hwfn->mcp_info->port_addr +
1862                                              offsetof(struct public_port,
1863                                                       link_change_count));
1864 }
1865
1866 static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
1867                                   struct qed_ptt *p_ptt,
1868                                   struct qed_eth_stats *stats,
1869                                   u16 statistics_bin, bool b_get_port_stats)
1870 {
1871         __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1872         __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1873         __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1874         __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1875
1876         if (b_get_port_stats && p_hwfn->mcp_info)
1877                 __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
1878 }
1879
1880 static void _qed_get_vport_stats(struct qed_dev *cdev,
1881                                  struct qed_eth_stats *stats)
1882 {
1883         u8 fw_vport = 0;
1884         int i;
1885
1886         memset(stats, 0, sizeof(*stats));
1887
1888         for_each_hwfn(cdev, i) {
1889                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1890                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1891                                                     :  NULL;
1892
1893                 if (IS_PF(cdev)) {
1894                         /* The main vport index is relative first */
1895                         if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
1896                                 DP_ERR(p_hwfn, "No vport available!\n");
1897                                 goto out;
1898                         }
1899                 }
1900
1901                 if (IS_PF(cdev) && !p_ptt) {
1902                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1903                         continue;
1904                 }
1905
1906                 __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1907                                       IS_PF(cdev) ? true : false);
1908
1909 out:
1910                 if (IS_PF(cdev) && p_ptt)
1911                         qed_ptt_release(p_hwfn, p_ptt);
1912         }
1913 }
1914
1915 void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
1916 {
1917         u32 i;
1918
1919         if (!cdev) {
1920                 memset(stats, 0, sizeof(*stats));
1921                 return;
1922         }
1923
1924         _qed_get_vport_stats(cdev, stats);
1925
1926         if (!cdev->reset_stats)
1927                 return;
1928
1929         /* Reduce the statistics baseline */
1930         for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
1931                 ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
1932 }
1933
1934 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1935 void qed_reset_vport_stats(struct qed_dev *cdev)
1936 {
1937         int i;
1938
1939         for_each_hwfn(cdev, i) {
1940                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1941                 struct eth_mstorm_per_queue_stat mstats;
1942                 struct eth_ustorm_per_queue_stat ustats;
1943                 struct eth_pstorm_per_queue_stat pstats;
1944                 struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
1945                                                     : NULL;
1946                 u32 addr = 0, len = 0;
1947
1948                 if (IS_PF(cdev) && !p_ptt) {
1949                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1950                         continue;
1951                 }
1952
1953                 memset(&mstats, 0, sizeof(mstats));
1954                 __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1955                 qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
1956
1957                 memset(&ustats, 0, sizeof(ustats));
1958                 __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
1959                 qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
1960
1961                 memset(&pstats, 0, sizeof(pstats));
1962                 __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
1963                 qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
1964
1965                 if (IS_PF(cdev))
1966                         qed_ptt_release(p_hwfn, p_ptt);
1967         }
1968
1969         /* PORT statistics are not necessarily reset, so we need to
1970          * read and create a baseline for future statistics.
1971          * Link change stat is maintained by MFW, return its value as is.
1972          */
1973         if (!cdev->reset_stats) {
1974                 DP_INFO(cdev, "Reset stats not allocated\n");
1975         } else {
1976                 _qed_get_vport_stats(cdev, cdev->reset_stats);
1977                 cdev->reset_stats->common.link_change_count = 0;
1978         }
1979 }
1980
1981 static enum gft_profile_type
1982 qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
1983 {
1984         if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
1985                 return GFT_PROFILE_TYPE_4_TUPLE;
1986         if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
1987                 return GFT_PROFILE_TYPE_IP_DST_ADDR;
1988         if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
1989                 return GFT_PROFILE_TYPE_IP_SRC_ADDR;
1990         return GFT_PROFILE_TYPE_L4_DST_PORT;
1991 }
1992
1993 void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
1994                              struct qed_ptt *p_ptt,
1995                              struct qed_arfs_config_params *p_cfg_params)
1996 {
1997         if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
1998                 qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1999                                p_cfg_params->tcp,
2000                                p_cfg_params->udp,
2001                                p_cfg_params->ipv4,
2002                                p_cfg_params->ipv6,
2003                                qed_arfs_mode_to_hsi(p_cfg_params->mode));
2004                 DP_VERBOSE(p_hwfn,
2005                            QED_MSG_SP,
2006                            "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
2007                            p_cfg_params->tcp ? "Enable" : "Disable",
2008                            p_cfg_params->udp ? "Enable" : "Disable",
2009                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2010                            p_cfg_params->ipv6 ? "Enable" : "Disable",
2011                            (u32)p_cfg_params->mode);
2012         } else {
2013                 DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
2014                 qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2015         }
2016 }
2017
2018 int
2019 qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
2020                                 struct qed_spq_comp_cb *p_cb,
2021                                 struct qed_ntuple_filter_params *p_params)
2022 {
2023         struct rx_update_gft_filter_data *p_ramrod = NULL;
2024         struct qed_spq_entry *p_ent = NULL;
2025         struct qed_sp_init_data init_data;
2026         u16 abs_rx_q_id = 0;
2027         u8 abs_vport_id = 0;
2028         int rc = -EINVAL;
2029
2030         /* Get SPQ entry */
2031         memset(&init_data, 0, sizeof(init_data));
2032         init_data.cid = qed_spq_get_cid(p_hwfn);
2033
2034         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2035
2036         if (p_cb) {
2037                 init_data.comp_mode = QED_SPQ_MODE_CB;
2038                 init_data.p_comp_data = p_cb;
2039         } else {
2040                 init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
2041         }
2042
2043         rc = qed_sp_init_request(p_hwfn, &p_ent,
2044                                  ETH_RAMROD_GFT_UPDATE_FILTER,
2045                                  PROTOCOLID_ETH, &init_data);
2046         if (rc)
2047                 return rc;
2048
2049         p_ramrod = &p_ent->ramrod.rx_update_gft;
2050
2051         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
2052         p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
2053
2054         if (p_params->b_is_drop) {
2055                 p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
2056         } else {
2057                 rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
2058                 if (rc)
2059                         goto err;
2060
2061                 if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
2062                         rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
2063                                              &abs_rx_q_id);
2064                         if (rc)
2065                                 goto err;
2066
2067                         p_ramrod->rx_qid_valid = 1;
2068                         p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
2069                 }
2070
2071                 p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
2072         }
2073
2074         p_ramrod->flow_id_valid = 0;
2075         p_ramrod->flow_id = 0;
2076         p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
2077             : GFT_DELETE_FILTER;
2078
2079         DP_VERBOSE(p_hwfn, QED_MSG_SP,
2080                    "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
2081                    abs_vport_id, abs_rx_q_id,
2082                    p_params->b_is_add ? "Adding" : "Removing",
2083                    (u64)p_params->addr, p_params->length);
2084
2085         return qed_spq_post(p_hwfn, p_ent, NULL);
2086
2087 err:
2088         qed_sp_destroy_request(p_hwfn, p_ent);
2089         return rc;
2090 }
2091
2092 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
2093                          struct qed_ptt *p_ptt,
2094                          struct qed_queue_cid *p_cid, u16 *p_rx_coal)
2095 {
2096         u32 coalesce, address, is_valid;
2097         struct cau_sb_entry sb_entry;
2098         u8 timer_res;
2099         int rc;
2100
2101         rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2102                                p_cid->sb_igu_id * sizeof(u64),
2103                                (u64)(uintptr_t)&sb_entry, 2, 0);
2104         if (rc) {
2105                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2106                 return rc;
2107         }
2108
2109         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2110
2111         address = BAR0_MAP_REG_USDM_RAM +
2112                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2113         coalesce = qed_rd(p_hwfn, p_ptt, address);
2114
2115         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2116         if (!is_valid)
2117                 return -EINVAL;
2118
2119         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2120         *p_rx_coal = (u16)(coalesce << timer_res);
2121
2122         return 0;
2123 }
2124
2125 int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
2126                          struct qed_ptt *p_ptt,
2127                          struct qed_queue_cid *p_cid, u16 *p_tx_coal)
2128 {
2129         u32 coalesce, address, is_valid;
2130         struct cau_sb_entry sb_entry;
2131         u8 timer_res;
2132         int rc;
2133
2134         rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2135                                p_cid->sb_igu_id * sizeof(u64),
2136                                (u64)(uintptr_t)&sb_entry, 2, 0);
2137         if (rc) {
2138                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2139                 return rc;
2140         }
2141
2142         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2143
2144         address = BAR0_MAP_REG_XSDM_RAM +
2145                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2146         coalesce = qed_rd(p_hwfn, p_ptt, address);
2147
2148         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2149         if (!is_valid)
2150                 return -EINVAL;
2151
2152         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2153         *p_tx_coal = (u16)(coalesce << timer_res);
2154
2155         return 0;
2156 }
2157
2158 int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
2159 {
2160         struct qed_queue_cid *p_cid = handle;
2161         struct qed_ptt *p_ptt;
2162         int rc = 0;
2163
2164         if (IS_VF(p_hwfn->cdev)) {
2165                 rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2166                 if (rc)
2167                         DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2168
2169                 return rc;
2170         }
2171
2172         p_ptt = qed_ptt_acquire(p_hwfn);
2173         if (!p_ptt)
2174                 return -EAGAIN;
2175
2176         if (p_cid->b_is_rx) {
2177                 rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2178                 if (rc)
2179                         goto out;
2180         } else {
2181                 rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2182                 if (rc)
2183                         goto out;
2184         }
2185
2186 out:
2187         qed_ptt_release(p_hwfn, p_ptt);
2188
2189         return rc;
2190 }
2191
2192 static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2193                                  struct qed_dev_eth_info *info)
2194 {
2195         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2196         int i;
2197
2198         memset(info, 0, sizeof(*info));
2199
2200         if (IS_PF(cdev)) {
2201                 int max_vf_vlan_filters = 0;
2202                 int max_vf_mac_filters = 0;
2203
2204                 info->num_tc = p_hwfn->hw_info.num_hw_tc;
2205
2206                 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
2207                         u16 num_queues = 0;
2208
2209                         /* Since the feature controls only queue-zones,
2210                          * make sure we have the contexts [rx, tx, xdp] to
2211                          * match.
2212                          */
2213                         for_each_hwfn(cdev, i) {
2214                                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2215                                 u16 l2_queues = (u16)FEAT_NUM(hwfn,
2216                                                               QED_PF_L2_QUE);
2217                                 u16 cids;
2218
2219                                 cids = hwfn->pf_params.eth_pf_params.num_cons;
2220                                 num_queues += min_t(u16, l2_queues, cids / 3);
2221                         }
2222
2223                         /* queues might theoretically be >256, but interrupts'
2224                          * upper-limit guarantes that it would fit in a u8.
2225                          */
2226                         if (cdev->int_params.fp_msix_cnt) {
2227                                 u8 irqs = cdev->int_params.fp_msix_cnt;
2228
2229                                 info->num_queues = (u8)min_t(u16,
2230                                                              num_queues, irqs);
2231                         }
2232                 } else {
2233                         info->num_queues = cdev->num_hwfns;
2234                 }
2235
2236                 if (IS_QED_SRIOV(cdev)) {
2237                         max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
2238                                               QED_ETH_VF_NUM_VLAN_FILTERS;
2239                         max_vf_mac_filters = cdev->p_iov_info->total_vfs *
2240                                              QED_ETH_VF_NUM_MAC_FILTERS;
2241                 }
2242                 info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2243                                                   QED_VLAN) -
2244                                          max_vf_vlan_filters;
2245                 info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
2246                                                  QED_MAC) -
2247                                         max_vf_mac_filters;
2248
2249                 ether_addr_copy(info->port_mac,
2250                                 cdev->hwfns[0].hw_info.hw_mac_addr);
2251
2252                 info->xdp_supported = true;
2253         } else {
2254                 u16 total_cids = 0;
2255
2256                 info->num_tc = 1;
2257
2258                 /* Determine queues &  XDP support */
2259                 for_each_hwfn(cdev, i) {
2260                         struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2261                         u8 queues, cids;
2262
2263                         qed_vf_get_num_cids(p_hwfn, &cids);
2264                         qed_vf_get_num_rxqs(p_hwfn, &queues);
2265                         info->num_queues += queues;
2266                         total_cids += cids;
2267                 }
2268
2269                 /* Enable VF XDP in case PF guarntees sufficient connections */
2270                 if (total_cids >= info->num_queues * 3)
2271                         info->xdp_supported = true;
2272
2273                 qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
2274                                             (u8 *)&info->num_vlan_filters);
2275                 qed_vf_get_num_mac_filters(&cdev->hwfns[0],
2276                                            (u8 *)&info->num_mac_filters);
2277                 qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
2278
2279                 info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
2280         }
2281
2282         qed_fill_dev_info(cdev, &info->common);
2283
2284         if (IS_VF(cdev))
2285                 eth_zero_addr(info->common.hw_mac);
2286
2287         return 0;
2288 }
2289
2290 static void qed_register_eth_ops(struct qed_dev *cdev,
2291                                  struct qed_eth_cb_ops *ops, void *cookie)
2292 {
2293         cdev->protocol_ops.eth = ops;
2294         cdev->ops_cookie = cookie;
2295
2296         /* For VF, we start bulletin reading */
2297         if (IS_VF(cdev))
2298                 qed_vf_start_iov_wq(cdev);
2299 }
2300
2301 static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
2302 {
2303         if (IS_PF(cdev))
2304                 return true;
2305
2306         return qed_vf_check_mac(&cdev->hwfns[0], mac);
2307 }
2308
2309 static int qed_start_vport(struct qed_dev *cdev,
2310                            struct qed_start_vport_params *params)
2311 {
2312         int rc, i;
2313
2314         for_each_hwfn(cdev, i) {
2315                 struct qed_sp_vport_start_params start = { 0 };
2316                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2317
2318                 start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
2319                                                         QED_TPA_MODE_NONE;
2320                 start.remove_inner_vlan = params->remove_inner_vlan;
2321                 start.only_untagged = true;     /* untagged only */
2322                 start.drop_ttl0 = params->drop_ttl0;
2323                 start.opaque_fid = p_hwfn->hw_info.opaque_fid;
2324                 start.concrete_fid = p_hwfn->hw_info.concrete_fid;
2325                 start.handle_ptp_pkts = params->handle_ptp_pkts;
2326                 start.vport_id = params->vport_id;
2327                 start.max_buffers_per_cqe = 16;
2328                 start.mtu = params->mtu;
2329
2330                 rc = qed_sp_vport_start(p_hwfn, &start);
2331                 if (rc) {
2332                         DP_ERR(cdev, "Failed to start VPORT\n");
2333                         return rc;
2334                 }
2335
2336                 rc = qed_hw_start_fastpath(p_hwfn);
2337                 if (rc) {
2338                         DP_ERR(cdev, "Failed to start VPORT fastpath\n");
2339                         return rc;
2340                 }
2341
2342                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2343                            "Started V-PORT %d with MTU %d\n",
2344                            start.vport_id, start.mtu);
2345         }
2346
2347         if (params->clear_stats)
2348                 qed_reset_vport_stats(cdev);
2349
2350         return 0;
2351 }
2352
2353 static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
2354 {
2355         int rc, i;
2356
2357         for_each_hwfn(cdev, i) {
2358                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2359
2360                 rc = qed_sp_vport_stop(p_hwfn,
2361                                        p_hwfn->hw_info.opaque_fid, vport_id);
2362
2363                 if (rc) {
2364                         DP_ERR(cdev, "Failed to stop VPORT\n");
2365                         return rc;
2366                 }
2367         }
2368         return 0;
2369 }
2370
2371 static int qed_update_vport_rss(struct qed_dev *cdev,
2372                                 struct qed_update_vport_rss_params *input,
2373                                 struct qed_rss_params *rss)
2374 {
2375         int i, fn;
2376
2377         /* Update configuration with what's correct regardless of CMT */
2378         rss->update_rss_config = 1;
2379         rss->rss_enable = 1;
2380         rss->update_rss_capabilities = 1;
2381         rss->update_rss_ind_table = 1;
2382         rss->update_rss_key = 1;
2383         rss->rss_caps = input->rss_caps;
2384         memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
2385
2386         /* In regular scenario, we'd simply need to take input handlers.
2387          * But in CMT, we'd have to split the handlers according to the
2388          * engine they were configured on. We'd then have to understand
2389          * whether RSS is really required, since 2-queues on CMT doesn't
2390          * require RSS.
2391          */
2392         if (cdev->num_hwfns == 1) {
2393                 memcpy(rss->rss_ind_table,
2394                        input->rss_ind_table,
2395                        QED_RSS_IND_TABLE_SIZE * sizeof(void *));
2396                 rss->rss_table_size_log = 7;
2397                 return 0;
2398         }
2399
2400         /* Start by copying the non-spcific information to the 2nd copy */
2401         memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
2402
2403         /* CMT should be round-robin */
2404         for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
2405                 struct qed_queue_cid *cid = input->rss_ind_table[i];
2406                 struct qed_rss_params *t_rss;
2407
2408                 if (cid->p_owner == QED_LEADING_HWFN(cdev))
2409                         t_rss = &rss[0];
2410                 else
2411                         t_rss = &rss[1];
2412
2413                 t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
2414         }
2415
2416         /* Make sure RSS is actually required */
2417         for_each_hwfn(cdev, fn) {
2418                 for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
2419                         if (rss[fn].rss_ind_table[i] !=
2420                             rss[fn].rss_ind_table[0])
2421                                 break;
2422                 }
2423                 if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
2424                         DP_VERBOSE(cdev, NETIF_MSG_IFUP,
2425                                    "CMT - 1 queue per-hwfn; Disabling RSS\n");
2426                         return -EINVAL;
2427                 }
2428                 rss[fn].rss_table_size_log = 6;
2429         }
2430
2431         return 0;
2432 }
2433
2434 static int qed_update_vport(struct qed_dev *cdev,
2435                             struct qed_update_vport_params *params)
2436 {
2437         struct qed_sp_vport_update_params sp_params;
2438         struct qed_rss_params *rss;
2439         int rc = 0, i;
2440
2441         if (!cdev)
2442                 return -ENODEV;
2443
2444         rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns));
2445         if (!rss)
2446                 return -ENOMEM;
2447
2448         memset(&sp_params, 0, sizeof(sp_params));
2449
2450         /* Translate protocol params into sp params */
2451         sp_params.vport_id = params->vport_id;
2452         sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
2453         sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
2454         sp_params.vport_active_rx_flg = params->vport_active_flg;
2455         sp_params.vport_active_tx_flg = params->vport_active_flg;
2456         sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
2457         sp_params.tx_switching_flg = params->tx_switching_flg;
2458         sp_params.accept_any_vlan = params->accept_any_vlan;
2459         sp_params.update_accept_any_vlan_flg =
2460                 params->update_accept_any_vlan_flg;
2461
2462         /* Prepare the RSS configuration */
2463         if (params->update_rss_flg)
2464                 if (qed_update_vport_rss(cdev, &params->rss_params, rss))
2465                         params->update_rss_flg = 0;
2466
2467         for_each_hwfn(cdev, i) {
2468                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2469
2470                 if (params->update_rss_flg)
2471                         sp_params.rss_params = &rss[i];
2472
2473                 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
2474                 rc = qed_sp_vport_update(p_hwfn, &sp_params,
2475                                          QED_SPQ_MODE_EBLOCK,
2476                                          NULL);
2477                 if (rc) {
2478                         DP_ERR(cdev, "Failed to update VPORT\n");
2479                         goto out;
2480                 }
2481
2482                 DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2483                            "Updated V-PORT %d: active_flag %d [update %d]\n",
2484                            params->vport_id, params->vport_active_flg,
2485                            params->update_vport_active_flg);
2486         }
2487
2488 out:
2489         vfree(rss);
2490         return rc;
2491 }
2492
2493 static int qed_start_rxq(struct qed_dev *cdev,
2494                          u8 rss_num,
2495                          struct qed_queue_start_common_params *p_params,
2496                          u16 bd_max_bytes,
2497                          dma_addr_t bd_chain_phys_addr,
2498                          dma_addr_t cqe_pbl_addr,
2499                          u16 cqe_pbl_size,
2500                          struct qed_rxq_start_ret_params *ret_params)
2501 {
2502         struct qed_hwfn *p_hwfn;
2503         int rc, hwfn_index;
2504
2505         hwfn_index = rss_num % cdev->num_hwfns;
2506         p_hwfn = &cdev->hwfns[hwfn_index];
2507
2508         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2509         p_params->stats_id = p_params->vport_id;
2510
2511         rc = qed_eth_rx_queue_start(p_hwfn,
2512                                     p_hwfn->hw_info.opaque_fid,
2513                                     p_params,
2514                                     bd_max_bytes,
2515                                     bd_chain_phys_addr,
2516                                     cqe_pbl_addr, cqe_pbl_size, ret_params);
2517         if (rc) {
2518                 DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
2519                 return rc;
2520         }
2521
2522         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2523                    "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2524                    p_params->queue_id, rss_num, p_params->vport_id,
2525                    p_params->p_sb->igu_sb_id);
2526
2527         return 0;
2528 }
2529
2530 static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
2531 {
2532         int rc, hwfn_index;
2533         struct qed_hwfn *p_hwfn;
2534
2535         hwfn_index = rss_id % cdev->num_hwfns;
2536         p_hwfn = &cdev->hwfns[hwfn_index];
2537
2538         rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
2539         if (rc) {
2540                 DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
2541                 return rc;
2542         }
2543
2544         return 0;
2545 }
2546
2547 static int qed_start_txq(struct qed_dev *cdev,
2548                          u8 rss_num,
2549                          struct qed_queue_start_common_params *p_params,
2550                          dma_addr_t pbl_addr,
2551                          u16 pbl_size,
2552                          struct qed_txq_start_ret_params *ret_params)
2553 {
2554         struct qed_hwfn *p_hwfn;
2555         int rc, hwfn_index;
2556
2557         hwfn_index = rss_num % cdev->num_hwfns;
2558         p_hwfn = &cdev->hwfns[hwfn_index];
2559         p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
2560         p_params->stats_id = p_params->vport_id;
2561
2562         rc = qed_eth_tx_queue_start(p_hwfn,
2563                                     p_hwfn->hw_info.opaque_fid,
2564                                     p_params, p_params->tc,
2565                                     pbl_addr, pbl_size, ret_params);
2566
2567         if (rc) {
2568                 DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
2569                 return rc;
2570         }
2571
2572         DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
2573                    "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
2574                    p_params->queue_id, rss_num, p_params->vport_id,
2575                    p_params->p_sb->igu_sb_id);
2576
2577         return 0;
2578 }
2579
2580 #define QED_HW_STOP_RETRY_LIMIT (10)
2581 static int qed_fastpath_stop(struct qed_dev *cdev)
2582 {
2583         int rc;
2584
2585         rc = qed_hw_stop_fastpath(cdev);
2586         if (rc) {
2587                 DP_ERR(cdev, "Failed to stop Fastpath\n");
2588                 return rc;
2589         }
2590
2591         return 0;
2592 }
2593
2594 static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
2595 {
2596         struct qed_hwfn *p_hwfn;
2597         int rc, hwfn_index;
2598
2599         hwfn_index = rss_id % cdev->num_hwfns;
2600         p_hwfn = &cdev->hwfns[hwfn_index];
2601
2602         rc = qed_eth_tx_queue_stop(p_hwfn, handle);
2603         if (rc) {
2604                 DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
2605                 return rc;
2606         }
2607
2608         return 0;
2609 }
2610
2611 static int qed_tunn_configure(struct qed_dev *cdev,
2612                               struct qed_tunn_params *tunn_params)
2613 {
2614         struct qed_tunnel_info tunn_info;
2615         int i, rc;
2616
2617         memset(&tunn_info, 0, sizeof(tunn_info));
2618         if (tunn_params->update_vxlan_port) {
2619                 tunn_info.vxlan_port.b_update_port = true;
2620                 tunn_info.vxlan_port.port = tunn_params->vxlan_port;
2621         }
2622
2623         if (tunn_params->update_geneve_port) {
2624                 tunn_info.geneve_port.b_update_port = true;
2625                 tunn_info.geneve_port.port = tunn_params->geneve_port;
2626         }
2627
2628         for_each_hwfn(cdev, i) {
2629                 struct qed_hwfn *hwfn = &cdev->hwfns[i];
2630                 struct qed_ptt *p_ptt;
2631                 struct qed_tunnel_info *tun;
2632
2633                 tun = &hwfn->cdev->tunnel;
2634                 if (IS_PF(cdev)) {
2635                         p_ptt = qed_ptt_acquire(hwfn);
2636                         if (!p_ptt)
2637                                 return -EAGAIN;
2638                 } else {
2639                         p_ptt = NULL;
2640                 }
2641
2642                 rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
2643                                                QED_SPQ_MODE_EBLOCK, NULL);
2644                 if (rc) {
2645                         if (IS_PF(cdev))
2646                                 qed_ptt_release(hwfn, p_ptt);
2647                         return rc;
2648                 }
2649
2650                 if (IS_PF_SRIOV(hwfn)) {
2651                         u16 vxlan_port, geneve_port;
2652                         int j;
2653
2654                         vxlan_port = tun->vxlan_port.port;
2655                         geneve_port = tun->geneve_port.port;
2656
2657                         qed_for_each_vf(hwfn, j) {
2658                                 qed_iov_bulletin_set_udp_ports(hwfn, j,
2659                                                                vxlan_port,
2660                                                                geneve_port);
2661                         }
2662
2663                         qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
2664                 }
2665                 if (IS_PF(cdev))
2666                         qed_ptt_release(hwfn, p_ptt);
2667         }
2668
2669         return 0;
2670 }
2671
2672 static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2673                                         enum qed_filter_rx_mode_type type)
2674 {
2675         struct qed_filter_accept_flags accept_flags;
2676
2677         memset(&accept_flags, 0, sizeof(accept_flags));
2678
2679         accept_flags.update_rx_mode_config = 1;
2680         accept_flags.update_tx_mode_config = 1;
2681         accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2682                                         QED_ACCEPT_MCAST_MATCHED |
2683                                         QED_ACCEPT_BCAST;
2684         accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
2685                                         QED_ACCEPT_MCAST_MATCHED |
2686                                         QED_ACCEPT_BCAST;
2687
2688         if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2689                 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2690                                                  QED_ACCEPT_MCAST_UNMATCHED;
2691                 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2692         } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2693                 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2694                 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2695         }
2696
2697         return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
2698                                      QED_SPQ_MODE_CB, NULL);
2699 }
2700
2701 static int qed_configure_filter_ucast(struct qed_dev *cdev,
2702                                       struct qed_filter_ucast_params *params)
2703 {
2704         struct qed_filter_ucast ucast;
2705
2706         if (!params->vlan_valid && !params->mac_valid) {
2707                 DP_NOTICE(cdev,
2708                           "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
2709                 return -EINVAL;
2710         }
2711
2712         memset(&ucast, 0, sizeof(ucast));
2713         switch (params->type) {
2714         case QED_FILTER_XCAST_TYPE_ADD:
2715                 ucast.opcode = QED_FILTER_ADD;
2716                 break;
2717         case QED_FILTER_XCAST_TYPE_DEL:
2718                 ucast.opcode = QED_FILTER_REMOVE;
2719                 break;
2720         case QED_FILTER_XCAST_TYPE_REPLACE:
2721                 ucast.opcode = QED_FILTER_REPLACE;
2722                 break;
2723         default:
2724                 DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
2725                           params->type);
2726         }
2727
2728         if (params->vlan_valid && params->mac_valid) {
2729                 ucast.type = QED_FILTER_MAC_VLAN;
2730                 ether_addr_copy(ucast.mac, params->mac);
2731                 ucast.vlan = params->vlan;
2732         } else if (params->mac_valid) {
2733                 ucast.type = QED_FILTER_MAC;
2734                 ether_addr_copy(ucast.mac, params->mac);
2735         } else {
2736                 ucast.type = QED_FILTER_VLAN;
2737                 ucast.vlan = params->vlan;
2738         }
2739
2740         ucast.is_rx_filter = true;
2741         ucast.is_tx_filter = true;
2742
2743         return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
2744 }
2745
2746 static int qed_configure_filter_mcast(struct qed_dev *cdev,
2747                                       struct qed_filter_mcast_params *params)
2748 {
2749         struct qed_filter_mcast mcast;
2750         int i;
2751
2752         memset(&mcast, 0, sizeof(mcast));
2753         switch (params->type) {
2754         case QED_FILTER_XCAST_TYPE_ADD:
2755                 mcast.opcode = QED_FILTER_ADD;
2756                 break;
2757         case QED_FILTER_XCAST_TYPE_DEL:
2758                 mcast.opcode = QED_FILTER_REMOVE;
2759                 break;
2760         default:
2761                 DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
2762                           params->type);
2763         }
2764
2765         mcast.num_mc_addrs = params->num;
2766         for (i = 0; i < mcast.num_mc_addrs; i++)
2767                 ether_addr_copy(mcast.mac[i], params->mac[i]);
2768
2769         return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
2770 }
2771
2772 static int qed_configure_filter(struct qed_dev *cdev,
2773                                 struct qed_filter_params *params)
2774 {
2775         enum qed_filter_rx_mode_type accept_flags;
2776
2777         switch (params->type) {
2778         case QED_FILTER_TYPE_UCAST:
2779                 return qed_configure_filter_ucast(cdev, &params->filter.ucast);
2780         case QED_FILTER_TYPE_MCAST:
2781                 return qed_configure_filter_mcast(cdev, &params->filter.mcast);
2782         case QED_FILTER_TYPE_RX_MODE:
2783                 accept_flags = params->filter.accept_flags;
2784                 return qed_configure_filter_rx_mode(cdev, accept_flags);
2785         default:
2786                 DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
2787                 return -EINVAL;
2788         }
2789 }
2790
2791 static int qed_configure_arfs_searcher(struct qed_dev *cdev,
2792                                        enum qed_filter_config_mode mode)
2793 {
2794         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2795         struct qed_arfs_config_params arfs_config_params;
2796
2797         memset(&arfs_config_params, 0, sizeof(arfs_config_params));
2798         arfs_config_params.tcp = true;
2799         arfs_config_params.udp = true;
2800         arfs_config_params.ipv4 = true;
2801         arfs_config_params.ipv6 = true;
2802         arfs_config_params.mode = mode;
2803         qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
2804                                 &arfs_config_params);
2805         return 0;
2806 }
2807
2808 static void
2809 qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
2810                              void *cookie,
2811                              union event_ring_data *data, u8 fw_return_code)
2812 {
2813         struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
2814         void *dev = p_hwfn->cdev->ops_cookie;
2815
2816         op->arfs_filter_op(dev, cookie, fw_return_code);
2817 }
2818
2819 static int
2820 qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
2821                               void *cookie,
2822                               struct qed_ntuple_filter_params *params)
2823 {
2824         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2825         struct qed_spq_comp_cb cb;
2826         int rc = -EINVAL;
2827
2828         cb.function = qed_arfs_sp_response_handler;
2829         cb.cookie = cookie;
2830
2831         if (params->b_is_vf) {
2832                 if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
2833                                            false)) {
2834                         DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
2835                                 params->vf_id);
2836                         return rc;
2837                 }
2838
2839                 params->vport_id = params->vf_id + 1;
2840                 params->qid = QED_RFS_NTUPLE_QID_RSS;
2841         }
2842
2843         rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
2844         if (rc)
2845                 DP_NOTICE(p_hwfn,
2846                           "Failed to issue a-RFS filter configuration\n");
2847         else
2848                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
2849                            "Successfully issued a-RFS filter configuration\n");
2850
2851         return rc;
2852 }
2853
2854 static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
2855 {
2856         struct qed_queue_cid *p_cid = handle;
2857         struct qed_hwfn *p_hwfn;
2858         int rc;
2859
2860         p_hwfn = p_cid->p_owner;
2861         rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
2862         if (rc)
2863                 DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
2864
2865         return rc;
2866 }
2867
2868 static int qed_fp_cqe_completion(struct qed_dev *dev,
2869                                  u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
2870 {
2871         return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
2872                                       cqe);
2873 }
2874
2875 static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
2876 {
2877         int i, ret;
2878
2879         if (IS_PF(cdev))
2880                 return 0;
2881
2882         for_each_hwfn(cdev, i) {
2883                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
2884
2885                 ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
2886                 if (ret)
2887                         return ret;
2888         }
2889
2890         return 0;
2891 }
2892
2893 #ifdef CONFIG_QED_SRIOV
2894 extern const struct qed_iov_hv_ops qed_iov_ops_pass;
2895 #endif
2896
2897 #ifdef CONFIG_DCB
2898 extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
2899 #endif
2900
2901 extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
2902
2903 static const struct qed_eth_ops qed_eth_ops_pass = {
2904         .common = &qed_common_ops_pass,
2905 #ifdef CONFIG_QED_SRIOV
2906         .iov = &qed_iov_ops_pass,
2907 #endif
2908 #ifdef CONFIG_DCB
2909         .dcb = &qed_dcbnl_ops_pass,
2910 #endif
2911         .ptp = &qed_ptp_ops_pass,
2912         .fill_dev_info = &qed_fill_eth_dev_info,
2913         .register_ops = &qed_register_eth_ops,
2914         .check_mac = &qed_check_mac,
2915         .vport_start = &qed_start_vport,
2916         .vport_stop = &qed_stop_vport,
2917         .vport_update = &qed_update_vport,
2918         .q_rx_start = &qed_start_rxq,
2919         .q_rx_stop = &qed_stop_rxq,
2920         .q_tx_start = &qed_start_txq,
2921         .q_tx_stop = &qed_stop_txq,
2922         .filter_config = &qed_configure_filter,
2923         .fastpath_stop = &qed_fastpath_stop,
2924         .eth_cqe_completion = &qed_fp_cqe_completion,
2925         .get_vport_stats = &qed_get_vport_stats,
2926         .tunn_config = &qed_tunn_configure,
2927         .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
2928         .configure_arfs_searcher = &qed_configure_arfs_searcher,
2929         .get_coalesce = &qed_get_coalesce,
2930         .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
2931 };
2932
2933 const struct qed_eth_ops *qed_get_eth_ops(void)
2934 {
2935         return &qed_eth_ops_pass;
2936 }
2937 EXPORT_SYMBOL(qed_get_eth_ops);
2938
2939 void qed_put_eth_ops(void)
2940 {
2941         /* TODO - reference count for module? */
2942 }
2943 EXPORT_SYMBOL(qed_put_eth_ops);