2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
36 #include "en/port_buffer.h"
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
44 /* Max supported cable length is 1000 meters */
45 #define MLX5E_MAX_CABLE_LENGTH 1000
48 MLX5E_VENDOR_TC_GROUP_NUM = 7,
49 MLX5E_LOWEST_PRIO_GROUP = 0,
52 #define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
53 MLX5_CAP_QCAM_REG(mdev, qpts) && \
54 MLX5_CAP_QCAM_REG(mdev, qpdpm))
56 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
57 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
59 /* If dcbx mode is non-host set the dcbx mode to host.
61 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
62 enum mlx5_dcbx_oper_mode mode)
64 struct mlx5_core_dev *mdev = priv->mdev;
65 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
68 err = mlx5_query_port_dcbx_param(mdev, param);
72 MLX5_SET(dcbx_param, param, version_admin, mode);
73 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
74 MLX5_SET(dcbx_param, param, willing_admin, 1);
76 return mlx5_set_port_dcbx_param(mdev, param);
79 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
81 struct mlx5e_dcbx *dcbx = &priv->dcbx;
84 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
87 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
90 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
94 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
98 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
101 struct mlx5e_priv *priv = netdev_priv(netdev);
102 struct mlx5_core_dev *mdev = priv->mdev;
103 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
104 bool is_tc_group_6_exist = false;
105 bool is_zero_bw_ets_tc = false;
109 if (!MLX5_CAP_GEN(priv->mdev, ets))
112 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
113 for (i = 0; i < ets->ets_cap; i++) {
114 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
118 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
122 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
126 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
127 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
128 is_zero_bw_ets_tc = true;
130 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
131 is_tc_group_6_exist = true;
134 /* Report 0% ets tc if exits*/
135 if (is_zero_bw_ets_tc) {
136 for (i = 0; i < ets->ets_cap; i++)
137 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
138 ets->tc_tx_bw[i] = 0;
141 /* Update tc_tsa based on fw setting*/
142 for (i = 0; i < ets->ets_cap; i++) {
143 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
144 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
145 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
146 !is_tc_group_6_exist)
147 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
149 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
154 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
156 bool any_tc_mapped_to_ets = false;
157 bool ets_zero_bw = false;
161 for (i = 0; i <= max_tc; i++) {
162 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
163 any_tc_mapped_to_ets = true;
164 if (!ets->tc_tx_bw[i])
169 /* strict group has higher priority than ets group */
170 strict_group = MLX5E_LOWEST_PRIO_GROUP;
171 if (any_tc_mapped_to_ets)
176 for (i = 0; i <= max_tc; i++) {
177 switch (ets->tc_tsa[i]) {
178 case IEEE_8021QAZ_TSA_VENDOR:
179 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
181 case IEEE_8021QAZ_TSA_STRICT:
182 tc_group[i] = strict_group++;
184 case IEEE_8021QAZ_TSA_ETS:
185 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
186 if (ets->tc_tx_bw[i] && ets_zero_bw)
187 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
193 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
194 u8 *tc_group, int max_tc)
196 int bw_for_ets_zero_bw_tc = 0;
197 int last_ets_zero_bw_tc = -1;
198 int num_ets_zero_bw = 0;
201 for (i = 0; i <= max_tc; i++) {
202 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
205 last_ets_zero_bw_tc = i;
210 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
212 for (i = 0; i <= max_tc; i++) {
213 switch (ets->tc_tsa[i]) {
214 case IEEE_8021QAZ_TSA_VENDOR:
215 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
217 case IEEE_8021QAZ_TSA_STRICT:
218 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
220 case IEEE_8021QAZ_TSA_ETS:
221 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
223 bw_for_ets_zero_bw_tc;
228 /* Make sure the total bw for ets zero bw group is 100% */
229 if (last_ets_zero_bw_tc != -1)
230 tc_tx_bw[last_ets_zero_bw_tc] +=
231 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
234 /* If there are ETS BW 0,
235 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
236 * Set group #0 to all the ETS BW 0 tcs and
237 * equally splits the 100% BW between them
238 * Report both group #0 and #1 as ETS type.
239 * All the tcs in group #0 will be reported with 0% BW.
241 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
243 struct mlx5_core_dev *mdev = priv->mdev;
244 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
245 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
246 int max_tc = mlx5_max_tc(mdev);
249 mlx5e_build_tc_group(ets, tc_group, max_tc);
250 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
252 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
256 err = mlx5_set_port_tc_group(mdev, tc_group);
260 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
265 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
267 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
268 mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
269 __func__, i, ets->prio_tc[i]);
270 mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
271 __func__, i, tc_tx_bw[i], tc_group[i]);
277 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
278 struct ieee_ets *ets,
279 bool zero_sum_allowed)
281 bool have_ets_tc = false;
285 /* Validate Priority */
286 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
287 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
289 "Failed to validate ETS: priority value greater than max(%d)\n",
295 /* Validate Bandwidth Sum */
296 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
297 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
299 bw_sum += ets->tc_tx_bw[i];
303 if (have_ets_tc && bw_sum != 100) {
304 if (bw_sum || (!bw_sum && !zero_sum_allowed))
306 "Failed to validate ETS: BW sum is illegal\n");
312 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
313 struct ieee_ets *ets)
315 struct mlx5e_priv *priv = netdev_priv(netdev);
318 if (!MLX5_CAP_GEN(priv->mdev, ets))
321 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
325 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
332 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
333 struct ieee_pfc *pfc)
335 struct mlx5e_priv *priv = netdev_priv(dev);
336 struct mlx5_core_dev *mdev = priv->mdev;
337 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
340 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
341 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
342 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
343 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
346 if (MLX5_BUFFER_SUPPORTED(mdev))
347 pfc->delay = priv->dcbx.cable_len;
349 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
352 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
353 struct ieee_pfc *pfc)
355 struct mlx5e_priv *priv = netdev_priv(dev);
356 struct mlx5_core_dev *mdev = priv->mdev;
357 u32 old_cable_len = priv->dcbx.cable_len;
358 struct ieee_pfc pfc_new;
364 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
365 if (pfc->pfc_en != curr_pfc_en) {
366 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
369 mlx5_toggle_port_link(mdev);
370 changed |= MLX5E_PORT_BUFFER_PFC;
374 pfc->delay < MLX5E_MAX_CABLE_LENGTH &&
375 pfc->delay != priv->dcbx.cable_len) {
376 priv->dcbx.cable_len = pfc->delay;
377 changed |= MLX5E_PORT_BUFFER_CABLE_LEN;
380 if (MLX5_BUFFER_SUPPORTED(mdev)) {
381 pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
382 if (priv->dcbx.manual_buffer)
383 ret = mlx5e_port_manual_buffer_config(priv, changed,
387 if (ret && (changed & MLX5E_PORT_BUFFER_CABLE_LEN))
388 priv->dcbx.cable_len = old_cable_len;
393 "%s: PFC per priority bit mask: 0x%x\n",
394 __func__, pfc->pfc_en);
399 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
401 struct mlx5e_priv *priv = netdev_priv(dev);
403 return priv->dcbx.cap;
406 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
408 struct mlx5e_priv *priv = netdev_priv(dev);
409 struct mlx5e_dcbx *dcbx = &priv->dcbx;
411 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
414 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
415 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
418 /* set dcbx to fw controlled */
419 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
420 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
421 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
428 if (!(mode & DCB_CAP_DCBX_HOST))
431 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
439 static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
441 struct mlx5e_priv *priv = netdev_priv(dev);
446 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
449 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
452 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
455 if (app->protocol >= MLX5E_MAX_DSCP)
458 /* Save the old entry info */
459 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
460 temp.protocol = app->protocol;
461 temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
463 /* Check if need to switch to dscp trust state */
464 if (!priv->dcbx.dscp_app_cnt) {
465 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
470 /* Skip the fw command if new and old mapping are the same */
471 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
472 err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
477 /* Delete the old entry if exists */
479 err = dcb_ieee_delapp(dev, &temp);
483 /* Add new entry and update counter */
484 err = dcb_ieee_setapp(dev, app);
489 priv->dcbx.dscp_app_cnt++;
494 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
498 static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
500 struct mlx5e_priv *priv = netdev_priv(dev);
503 if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
506 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
509 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
512 if (app->protocol >= MLX5E_MAX_DSCP)
515 /* Skip if no dscp app entry */
516 if (!priv->dcbx.dscp_app_cnt)
519 /* Check if the entry matches fw setting */
520 if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
523 /* Delete the app entry */
524 err = dcb_ieee_delapp(dev, app);
528 /* Reset the priority mapping back to zero */
529 err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
533 priv->dcbx.dscp_app_cnt--;
535 /* Check if need to switch to pcp trust state */
536 if (!priv->dcbx.dscp_app_cnt)
537 err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
542 mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
546 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
547 struct ieee_maxrate *maxrate)
549 struct mlx5e_priv *priv = netdev_priv(netdev);
550 struct mlx5_core_dev *mdev = priv->mdev;
551 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
552 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
556 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
560 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
562 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
563 switch (max_bw_unit[i]) {
564 case MLX5_100_MBPS_UNIT:
565 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
568 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
570 case MLX5_BW_NO_LIMIT:
573 WARN(true, "non-supported BW unit");
581 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
582 struct ieee_maxrate *maxrate)
584 struct mlx5e_priv *priv = netdev_priv(netdev);
585 struct mlx5_core_dev *mdev = priv->mdev;
586 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
587 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
588 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
591 memset(max_bw_value, 0, sizeof(max_bw_value));
592 memset(max_bw_unit, 0, sizeof(max_bw_unit));
594 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
595 if (!maxrate->tc_maxrate[i]) {
596 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
599 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
600 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
602 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
603 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
605 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
607 max_bw_unit[i] = MLX5_GBPS_UNIT;
611 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
612 mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
613 __func__, i, max_bw_value[i]);
616 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
619 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
621 struct mlx5e_priv *priv = netdev_priv(netdev);
622 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
623 struct mlx5_core_dev *mdev = priv->mdev;
626 int err = -EOPNOTSUPP;
629 if (!MLX5_CAP_GEN(mdev, ets))
632 memset(&ets, 0, sizeof(ets));
633 memset(&pfc, 0, sizeof(pfc));
635 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
636 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
637 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
638 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
639 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
640 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
642 "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
643 __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
647 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
651 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
654 "%s, Failed to set ETS: %d\n", __func__, err);
659 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
660 if (!cee_cfg->pfc_enable)
663 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
664 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
666 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
669 "%s, Failed to set PFC: %d\n", __func__, err);
673 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
676 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
678 return MLX5E_CEE_STATE_UP;
681 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
684 struct mlx5e_priv *priv = netdev_priv(netdev);
689 memset(perm_addr, 0xff, MAX_ADDR_LEN);
691 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
694 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
695 int priority, u8 prio_type,
696 u8 pgid, u8 bw_pct, u8 up_map)
698 struct mlx5e_priv *priv = netdev_priv(netdev);
699 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
701 if (priority >= CEE_DCBX_MAX_PRIO) {
703 "%s, priority is out of range\n", __func__);
707 if (pgid >= CEE_DCBX_MAX_PGS) {
709 "%s, priority group is out of range\n", __func__);
713 cee_cfg->prio_to_pg_map[priority] = pgid;
716 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
719 struct mlx5e_priv *priv = netdev_priv(netdev);
720 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
722 if (pgid >= CEE_DCBX_MAX_PGS) {
724 "%s, priority group is out of range\n", __func__);
728 cee_cfg->pg_bw_pct[pgid] = bw_pct;
731 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
732 int priority, u8 *prio_type,
733 u8 *pgid, u8 *bw_pct, u8 *up_map)
735 struct mlx5e_priv *priv = netdev_priv(netdev);
736 struct mlx5_core_dev *mdev = priv->mdev;
738 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
739 netdev_err(netdev, "%s, ets is not supported\n", __func__);
743 if (priority >= CEE_DCBX_MAX_PRIO) {
745 "%s, priority is out of range\n", __func__);
753 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
757 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
758 int pgid, u8 *bw_pct)
762 if (pgid >= CEE_DCBX_MAX_PGS) {
764 "%s, priority group is out of range\n", __func__);
768 mlx5e_dcbnl_ieee_getets(netdev, &ets);
769 *bw_pct = ets.tc_tx_bw[pgid];
772 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
773 int priority, u8 setting)
775 struct mlx5e_priv *priv = netdev_priv(netdev);
776 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
778 if (priority >= CEE_DCBX_MAX_PRIO) {
780 "%s, priority is out of range\n", __func__);
787 cee_cfg->pfc_setting[priority] = setting;
791 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
792 int priority, u8 *setting)
797 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
802 *setting = (pfc.pfc_en >> priority) & 0x01;
807 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
808 int priority, u8 *setting)
810 if (priority >= CEE_DCBX_MAX_PRIO) {
812 "%s, priority is out of range\n", __func__);
819 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
822 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
825 struct mlx5e_priv *priv = netdev_priv(netdev);
826 struct mlx5_core_dev *mdev = priv->mdev;
830 case DCB_CAP_ATTR_PG:
833 case DCB_CAP_ATTR_PFC:
836 case DCB_CAP_ATTR_UP2TC:
839 case DCB_CAP_ATTR_PG_TCS:
840 *cap = 1 << mlx5_max_tc(mdev);
842 case DCB_CAP_ATTR_PFC_TCS:
843 *cap = 1 << mlx5_max_tc(mdev);
845 case DCB_CAP_ATTR_GSP:
848 case DCB_CAP_ATTR_BCN:
851 case DCB_CAP_ATTR_DCBX:
852 *cap = priv->dcbx.cap |
853 DCB_CAP_DCBX_VER_CEE |
854 DCB_CAP_DCBX_VER_IEEE;
865 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
868 struct mlx5e_priv *priv = netdev_priv(netdev);
869 struct mlx5_core_dev *mdev = priv->mdev;
872 case DCB_NUMTCS_ATTR_PG:
873 case DCB_NUMTCS_ATTR_PFC:
874 *num = mlx5_max_tc(mdev) + 1;
883 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
887 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
888 return MLX5E_CEE_STATE_DOWN;
890 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
893 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
895 struct mlx5e_priv *priv = netdev_priv(netdev);
896 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
898 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
901 cee_cfg->pfc_enable = state;
904 static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
905 struct dcbnl_buffer *dcb_buffer)
907 struct mlx5e_priv *priv = netdev_priv(dev);
908 struct mlx5_core_dev *mdev = priv->mdev;
909 struct mlx5e_port_buffer port_buffer;
910 u8 buffer[MLX5E_MAX_PRIORITY];
913 if (!MLX5_BUFFER_SUPPORTED(mdev))
916 err = mlx5e_port_query_priority2buffer(mdev, buffer);
920 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
921 dcb_buffer->prio2buffer[i] = buffer[i];
923 err = mlx5e_port_query_buffer(priv, &port_buffer);
927 for (i = 0; i < MLX5E_MAX_BUFFER; i++)
928 dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
929 dcb_buffer->total_size = port_buffer.port_buffer_size;
934 static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
935 struct dcbnl_buffer *dcb_buffer)
937 struct mlx5e_priv *priv = netdev_priv(dev);
938 struct mlx5_core_dev *mdev = priv->mdev;
939 struct mlx5e_port_buffer port_buffer;
940 u8 old_prio2buffer[MLX5E_MAX_PRIORITY];
941 u32 *buffer_size = NULL;
942 u8 *prio2buffer = NULL;
946 if (!MLX5_BUFFER_SUPPORTED(mdev))
949 for (i = 0; i < DCBX_MAX_BUFFERS; i++)
950 mlx5_core_dbg(mdev, "buffer[%d]=%d\n", i, dcb_buffer->buffer_size[i]);
952 for (i = 0; i < MLX5E_MAX_PRIORITY; i++)
953 mlx5_core_dbg(mdev, "priority %d buffer%d\n", i, dcb_buffer->prio2buffer[i]);
955 err = mlx5e_port_query_priority2buffer(mdev, old_prio2buffer);
959 for (i = 0; i < MLX5E_MAX_PRIORITY; i++) {
960 if (dcb_buffer->prio2buffer[i] != old_prio2buffer[i]) {
961 changed |= MLX5E_PORT_BUFFER_PRIO2BUFFER;
962 prio2buffer = dcb_buffer->prio2buffer;
967 err = mlx5e_port_query_buffer(priv, &port_buffer);
971 for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
972 if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
973 changed |= MLX5E_PORT_BUFFER_SIZE;
974 buffer_size = dcb_buffer->buffer_size;
982 priv->dcbx.manual_buffer = true;
983 err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
984 buffer_size, prio2buffer);
988 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
989 .ieee_getets = mlx5e_dcbnl_ieee_getets,
990 .ieee_setets = mlx5e_dcbnl_ieee_setets,
991 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
992 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
993 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
994 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
995 .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
996 .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
997 .getdcbx = mlx5e_dcbnl_getdcbx,
998 .setdcbx = mlx5e_dcbnl_setdcbx,
999 .dcbnl_getbuffer = mlx5e_dcbnl_getbuffer,
1000 .dcbnl_setbuffer = mlx5e_dcbnl_setbuffer,
1002 /* CEE interfaces */
1003 .setall = mlx5e_dcbnl_setall,
1004 .getstate = mlx5e_dcbnl_getstate,
1005 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
1007 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
1008 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
1009 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
1010 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
1012 .setpfccfg = mlx5e_dcbnl_setpfccfg,
1013 .getpfccfg = mlx5e_dcbnl_getpfccfg,
1014 .getcap = mlx5e_dcbnl_getcap,
1015 .getnumtcs = mlx5e_dcbnl_getnumtcs,
1016 .getpfcstate = mlx5e_dcbnl_getpfcstate,
1017 .setpfcstate = mlx5e_dcbnl_setpfcstate,
1020 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
1021 enum mlx5_dcbx_oper_mode *mode)
1023 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
1025 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
1027 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
1028 *mode = MLX5_GET(dcbx_param, out, version_oper);
1030 /* From driver's point of view, we only care if the mode
1031 * is host (HOST) or non-host (AUTO)
1033 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
1034 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
1037 static void mlx5e_ets_init(struct mlx5e_priv *priv)
1039 struct ieee_ets ets;
1043 if (!MLX5_CAP_GEN(priv->mdev, ets))
1046 memset(&ets, 0, sizeof(ets));
1047 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
1048 for (i = 0; i < ets.ets_cap; i++) {
1049 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
1050 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
1054 if (ets.ets_cap > 1) {
1055 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
1060 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1062 netdev_err(priv->netdev,
1063 "%s, Failed to init ETS: %d\n", __func__, err);
1071 static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
1073 struct dcb_app temp;
1076 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
1079 if (!MLX5_DSCP_SUPPORTED(priv->mdev))
1082 /* No SEL_DSCP entry in non DSCP state */
1083 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
1086 temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
1087 for (i = 0; i < MLX5E_MAX_DSCP; i++) {
1089 temp.priority = priv->dcbx_dp.dscp2prio[i];
1091 dcb_ieee_setapp(priv->netdev, &temp);
1093 dcb_ieee_delapp(priv->netdev, &temp);
1096 priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
1099 void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
1101 mlx5e_dcbnl_dscp_app(priv, INIT);
1104 void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
1106 mlx5e_dcbnl_dscp_app(priv, DELETE);
1109 static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
1110 struct mlx5e_params *params)
1112 params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
1113 if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
1114 params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
1115 params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
1118 static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
1120 struct mlx5e_channels new_channels = {};
1122 mutex_lock(&priv->state_lock);
1124 new_channels.params = priv->channels.params;
1125 mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
1127 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1128 priv->channels.params = new_channels.params;
1132 /* Skip if tx_min_inline is the same */
1133 if (new_channels.params.tx_min_inline_mode ==
1134 priv->channels.params.tx_min_inline_mode)
1137 if (mlx5e_open_channels(priv, &new_channels))
1139 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
1142 mutex_unlock(&priv->state_lock);
1145 static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
1149 err = mlx5_set_trust_state(priv->mdev, trust_state);
1152 priv->dcbx_dp.trust_state = trust_state;
1153 mlx5e_trust_update_sq_inline_mode(priv);
1158 static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
1162 err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
1166 priv->dcbx_dp.dscp2prio[dscp] = prio;
1170 static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
1172 struct mlx5_core_dev *mdev = priv->mdev;
1175 if (!MLX5_DSCP_SUPPORTED(mdev))
1178 err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
1182 mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
1184 err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
1191 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
1193 struct mlx5e_dcbx *dcbx = &priv->dcbx;
1195 mlx5e_trust_initialize(priv);
1197 if (!MLX5_CAP_GEN(priv->mdev, qos))
1200 if (MLX5_CAP_GEN(priv->mdev, dcbx))
1201 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
1203 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
1204 DCB_CAP_DCBX_VER_IEEE;
1205 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
1206 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
1208 priv->dcbx.manual_buffer = false;
1209 priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
1211 mlx5e_ets_init(priv);