2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/netdevice.h>
44 #include <linux/completion.h>
45 #include <linux/skbuff.h>
46 #include <linux/etherdevice.h>
47 #include <linux/types.h>
48 #include <linux/string.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jiffies.h>
52 #include <linux/mutex.h>
53 #include <linux/rcupdate.h>
54 #include <linux/slab.h>
55 #include <linux/workqueue.h>
56 #include <asm/byteorder.h>
57 #include <net/devlink.h>
58 #include <trace/events/devlink.h>
67 #include "resources.h"
69 static LIST_HEAD(mlxsw_core_driver_list);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
72 static const char mlxsw_core_driver_name[] = "mlxsw_core";
74 static struct workqueue_struct *mlxsw_wq;
75 static struct workqueue_struct *mlxsw_owq;
77 struct mlxsw_core_port {
78 struct devlink_port devlink_port;
79 void *port_driver_priv;
83 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
85 return mlxsw_core_port->port_driver_priv;
87 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
89 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
91 return mlxsw_core_port->port_driver_priv != NULL;
95 struct mlxsw_driver *driver;
96 const struct mlxsw_bus *bus;
98 const struct mlxsw_bus_info *bus_info;
99 struct workqueue_struct *emad_wq;
100 struct list_head rx_listener_list;
101 struct list_head event_listener_list;
104 struct list_head trans_list;
105 spinlock_t trans_list_lock; /* protects trans_list writes */
109 u8 *mapping; /* lag_id+port_index to local_port mapping */
111 struct mlxsw_res res;
112 struct mlxsw_hwmon *hwmon;
113 struct mlxsw_thermal *thermal;
114 struct mlxsw_core_port *ports;
115 unsigned int max_ports;
116 unsigned long driver_priv[0];
117 /* driver_priv has to be always the last item */
120 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
122 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
124 /* Switch ports are numbered from 1 to queried value */
125 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
126 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
127 MAX_SYSTEM_PORT) + 1;
129 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
131 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
132 sizeof(struct mlxsw_core_port), GFP_KERNEL);
133 if (!mlxsw_core->ports)
139 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
141 kfree(mlxsw_core->ports);
144 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
146 return mlxsw_core->max_ports;
148 EXPORT_SYMBOL(mlxsw_core_max_ports);
150 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
152 return mlxsw_core->driver_priv;
154 EXPORT_SYMBOL(mlxsw_core_driver_priv);
156 struct mlxsw_rx_listener_item {
157 struct list_head list;
158 struct mlxsw_rx_listener rxl;
162 struct mlxsw_event_listener_item {
163 struct list_head list;
164 struct mlxsw_event_listener el;
173 * Destination MAC in EMAD's Ethernet header.
174 * Must be set to 01:02:c9:00:00:01
176 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
179 * Source MAC in EMAD's Ethernet header.
180 * Must be set to 00:02:c9:01:02:03
182 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
184 /* emad_eth_hdr_ethertype
185 * Ethertype in EMAD's Ethernet header.
186 * Must be set to 0x8932
188 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
190 /* emad_eth_hdr_mlx_proto
192 * Must be set to 0x0.
194 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
197 * Mellanox protocol version.
198 * Must be set to 0x0.
200 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
204 * Must be set to 0x1 (operation TLV).
206 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
209 * Length of the operation TLV in u32.
210 * Must be set to 0x4.
212 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
215 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
216 * EMAD. DR TLV must follow.
218 * Note: Currently not supported and must not be set.
220 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
222 /* emad_op_tlv_status
223 * Returned status in case of EMAD response. Must be set to 0 in case
226 * 0x1 - device is busy. Requester should retry
227 * 0x2 - Mellanox protocol version not supported
229 * 0x4 - register not supported
230 * 0x5 - operation class not supported
231 * 0x6 - EMAD method not supported
232 * 0x7 - bad parameter (e.g. port out of range)
233 * 0x8 - resource not available
234 * 0x9 - message receipt acknowledgment. Requester should retry
235 * 0x70 - internal error
237 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
239 /* emad_op_tlv_register_id
240 * Register ID of register within register TLV.
242 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
245 * Response bit. Setting to 1 indicates Response, otherwise request.
247 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
249 /* emad_op_tlv_method
253 * 0x3 - send (currently not supported)
256 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
259 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
261 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
264 * EMAD transaction ID. Used for pairing request and response EMADs.
266 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
270 * Must be set to 0x3 (register TLV).
272 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
275 * Length of the operation TLV in u32.
277 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
281 * Must be set to 0x0 (end TLV).
283 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
286 * Length of the end TLV in u32.
289 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
291 enum mlxsw_core_reg_access_type {
292 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
293 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
296 static inline const char *
297 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
300 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
302 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
308 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
310 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
311 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
314 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
315 const struct mlxsw_reg_info *reg,
318 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
319 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
320 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
323 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
324 const struct mlxsw_reg_info *reg,
325 enum mlxsw_core_reg_access_type type,
328 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
329 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
330 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
331 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
332 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
333 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
334 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
335 mlxsw_emad_op_tlv_method_set(op_tlv,
336 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
338 mlxsw_emad_op_tlv_method_set(op_tlv,
339 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
340 mlxsw_emad_op_tlv_class_set(op_tlv,
341 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
342 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
345 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
347 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
349 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
350 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
351 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
352 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
353 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
355 skb_reset_mac_header(skb);
360 static void mlxsw_emad_construct(struct sk_buff *skb,
361 const struct mlxsw_reg_info *reg,
363 enum mlxsw_core_reg_access_type type,
368 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
369 mlxsw_emad_pack_end_tlv(buf);
371 buf = skb_push(skb, reg->len + sizeof(u32));
372 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
374 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
375 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
377 mlxsw_emad_construct_eth_hdr(skb);
380 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
382 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
385 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
387 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
388 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
391 static char *mlxsw_emad_reg_payload(const char *op_tlv)
393 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
396 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
400 op_tlv = mlxsw_emad_op_tlv(skb);
401 return mlxsw_emad_op_tlv_tid_get(op_tlv);
404 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
408 op_tlv = mlxsw_emad_op_tlv(skb);
409 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
412 static int mlxsw_emad_process_status(char *op_tlv,
413 enum mlxsw_emad_op_tlv_status *p_status)
415 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
418 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
420 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
421 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
423 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
424 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
425 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
426 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
427 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
428 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
429 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
430 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
437 mlxsw_emad_process_status_skb(struct sk_buff *skb,
438 enum mlxsw_emad_op_tlv_status *p_status)
440 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
443 struct mlxsw_reg_trans {
444 struct list_head list;
445 struct list_head bulk_list;
446 struct mlxsw_core *core;
447 struct sk_buff *tx_skb;
448 struct mlxsw_tx_info tx_info;
449 struct delayed_work timeout_dw;
450 unsigned int retries;
452 struct completion completion;
454 mlxsw_reg_trans_cb_t *cb;
455 unsigned long cb_priv;
456 const struct mlxsw_reg_info *reg;
457 enum mlxsw_core_reg_access_type type;
459 enum mlxsw_emad_op_tlv_status emad_status;
463 #define MLXSW_EMAD_TIMEOUT_MS 200
465 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
467 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
469 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
472 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
473 struct mlxsw_reg_trans *trans)
478 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
482 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
483 skb->data + mlxsw_core->driver->txhdr_len,
484 skb->len - mlxsw_core->driver->txhdr_len);
486 atomic_set(&trans->active, 1);
487 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
492 mlxsw_emad_trans_timeout_schedule(trans);
496 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
498 struct mlxsw_core *mlxsw_core = trans->core;
500 dev_kfree_skb(trans->tx_skb);
501 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
502 list_del_rcu(&trans->list);
503 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
505 complete(&trans->completion);
508 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
509 struct mlxsw_reg_trans *trans)
513 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
515 err = mlxsw_emad_transmit(trans->core, trans);
521 mlxsw_emad_trans_finish(trans, err);
524 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
526 struct mlxsw_reg_trans *trans = container_of(work,
527 struct mlxsw_reg_trans,
530 if (!atomic_dec_and_test(&trans->active))
533 mlxsw_emad_transmit_retry(trans->core, trans);
536 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
537 struct mlxsw_reg_trans *trans,
542 if (!atomic_dec_and_test(&trans->active))
545 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
546 if (err == -EAGAIN) {
547 mlxsw_emad_transmit_retry(mlxsw_core, trans);
550 char *op_tlv = mlxsw_emad_op_tlv(skb);
553 trans->cb(mlxsw_core,
554 mlxsw_emad_reg_payload(op_tlv),
555 trans->reg->len, trans->cb_priv);
557 mlxsw_emad_trans_finish(trans, err);
561 /* called with rcu read lock held */
562 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
565 struct mlxsw_core *mlxsw_core = priv;
566 struct mlxsw_reg_trans *trans;
568 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
569 skb->data, skb->len);
571 if (!mlxsw_emad_is_resp(skb))
574 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
575 if (mlxsw_emad_get_tid(skb) == trans->tid) {
576 mlxsw_emad_process_response(mlxsw_core, trans, skb);
585 static const struct mlxsw_listener mlxsw_emad_rx_listener =
586 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
589 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
591 struct workqueue_struct *emad_wq;
595 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
598 emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
601 mlxsw_core->emad_wq = emad_wq;
603 /* Set the upper 32 bits of the transaction ID field to a random
604 * number. This allows us to discard EMADs addressed to other
607 get_random_bytes(&tid, 4);
609 atomic64_set(&mlxsw_core->emad.tid, tid);
611 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
612 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
614 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
619 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
621 goto err_emad_trap_set;
622 mlxsw_core->emad.use_emad = true;
627 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
629 destroy_workqueue(mlxsw_core->emad_wq);
633 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
636 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
639 mlxsw_core->emad.use_emad = false;
640 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
642 destroy_workqueue(mlxsw_core->emad_wq);
645 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
651 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
652 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
653 sizeof(u32) + mlxsw_core->driver->txhdr_len);
654 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
657 skb = netdev_alloc_skb(NULL, emad_len);
660 memset(skb->data, 0, emad_len);
661 skb_reserve(skb, emad_len);
666 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
667 const struct mlxsw_reg_info *reg,
669 enum mlxsw_core_reg_access_type type,
670 struct mlxsw_reg_trans *trans,
671 struct list_head *bulk_list,
672 mlxsw_reg_trans_cb_t *cb,
673 unsigned long cb_priv, u64 tid)
678 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
679 tid, reg->id, mlxsw_reg_id_str(reg->id),
680 mlxsw_core_reg_access_type_str(type));
682 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
686 list_add_tail(&trans->bulk_list, bulk_list);
687 trans->core = mlxsw_core;
689 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
690 trans->tx_info.is_emad = true;
691 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
693 init_completion(&trans->completion);
695 trans->cb_priv = cb_priv;
699 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
700 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
702 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
703 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
704 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
705 err = mlxsw_emad_transmit(mlxsw_core, trans);
711 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
712 list_del_rcu(&trans->list);
713 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
714 list_del(&trans->bulk_list);
715 dev_kfree_skb(trans->tx_skb);
723 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
725 spin_lock(&mlxsw_core_driver_list_lock);
726 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
727 spin_unlock(&mlxsw_core_driver_list_lock);
730 EXPORT_SYMBOL(mlxsw_core_driver_register);
732 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
734 spin_lock(&mlxsw_core_driver_list_lock);
735 list_del(&mlxsw_driver->list);
736 spin_unlock(&mlxsw_core_driver_list_lock);
738 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
740 static struct mlxsw_driver *__driver_find(const char *kind)
742 struct mlxsw_driver *mlxsw_driver;
744 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
745 if (strcmp(mlxsw_driver->kind, kind) == 0)
751 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
753 struct mlxsw_driver *mlxsw_driver;
755 spin_lock(&mlxsw_core_driver_list_lock);
756 mlxsw_driver = __driver_find(kind);
757 spin_unlock(&mlxsw_core_driver_list_lock);
761 static void mlxsw_core_driver_put(const char *kind)
763 struct mlxsw_driver *mlxsw_driver;
765 spin_lock(&mlxsw_core_driver_list_lock);
766 mlxsw_driver = __driver_find(kind);
767 spin_unlock(&mlxsw_core_driver_list_lock);
770 static int mlxsw_devlink_port_split(struct devlink *devlink,
771 unsigned int port_index,
774 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
776 if (port_index >= mlxsw_core->max_ports)
778 if (!mlxsw_core->driver->port_split)
780 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
783 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
784 unsigned int port_index)
786 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
788 if (port_index >= mlxsw_core->max_ports)
790 if (!mlxsw_core->driver->port_unsplit)
792 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
796 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
797 unsigned int sb_index, u16 pool_index,
798 struct devlink_sb_pool_info *pool_info)
800 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
801 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
803 if (!mlxsw_driver->sb_pool_get)
805 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
806 pool_index, pool_info);
810 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
811 unsigned int sb_index, u16 pool_index, u32 size,
812 enum devlink_sb_threshold_type threshold_type)
814 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
815 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
817 if (!mlxsw_driver->sb_pool_set)
819 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
820 pool_index, size, threshold_type);
823 static void *__dl_port(struct devlink_port *devlink_port)
825 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
828 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
829 enum devlink_port_type port_type)
831 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
832 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
833 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
835 if (!mlxsw_driver->port_type_set)
838 return mlxsw_driver->port_type_set(mlxsw_core,
839 mlxsw_core_port->local_port,
843 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
844 unsigned int sb_index, u16 pool_index,
847 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
848 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
849 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
851 if (!mlxsw_driver->sb_port_pool_get ||
852 !mlxsw_core_port_check(mlxsw_core_port))
854 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
855 pool_index, p_threshold);
858 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
859 unsigned int sb_index, u16 pool_index,
862 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
863 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
864 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
866 if (!mlxsw_driver->sb_port_pool_set ||
867 !mlxsw_core_port_check(mlxsw_core_port))
869 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
870 pool_index, threshold);
874 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
875 unsigned int sb_index, u16 tc_index,
876 enum devlink_sb_pool_type pool_type,
877 u16 *p_pool_index, u32 *p_threshold)
879 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
880 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
881 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
883 if (!mlxsw_driver->sb_tc_pool_bind_get ||
884 !mlxsw_core_port_check(mlxsw_core_port))
886 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
888 p_pool_index, p_threshold);
892 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
893 unsigned int sb_index, u16 tc_index,
894 enum devlink_sb_pool_type pool_type,
895 u16 pool_index, u32 threshold)
897 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
898 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
899 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
901 if (!mlxsw_driver->sb_tc_pool_bind_set ||
902 !mlxsw_core_port_check(mlxsw_core_port))
904 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
906 pool_index, threshold);
909 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
910 unsigned int sb_index)
912 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
913 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
915 if (!mlxsw_driver->sb_occ_snapshot)
917 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
920 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
921 unsigned int sb_index)
923 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
924 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
926 if (!mlxsw_driver->sb_occ_max_clear)
928 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
932 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
933 unsigned int sb_index, u16 pool_index,
934 u32 *p_cur, u32 *p_max)
936 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
937 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
938 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
940 if (!mlxsw_driver->sb_occ_port_pool_get ||
941 !mlxsw_core_port_check(mlxsw_core_port))
943 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
944 pool_index, p_cur, p_max);
948 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
949 unsigned int sb_index, u16 tc_index,
950 enum devlink_sb_pool_type pool_type,
951 u32 *p_cur, u32 *p_max)
953 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
954 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
955 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
957 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
958 !mlxsw_core_port_check(mlxsw_core_port))
960 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
962 pool_type, p_cur, p_max);
965 static const struct devlink_ops mlxsw_devlink_ops = {
966 .port_type_set = mlxsw_devlink_port_type_set,
967 .port_split = mlxsw_devlink_port_split,
968 .port_unsplit = mlxsw_devlink_port_unsplit,
969 .sb_pool_get = mlxsw_devlink_sb_pool_get,
970 .sb_pool_set = mlxsw_devlink_sb_pool_set,
971 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
972 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
973 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
974 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
975 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
976 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
977 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
978 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
981 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
982 const struct mlxsw_bus *mlxsw_bus,
985 const char *device_kind = mlxsw_bus_info->device_kind;
986 struct mlxsw_core *mlxsw_core;
987 struct mlxsw_driver *mlxsw_driver;
988 struct devlink *devlink;
992 mlxsw_driver = mlxsw_core_driver_get(device_kind);
995 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
996 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
999 goto err_devlink_alloc;
1002 mlxsw_core = devlink_priv(devlink);
1003 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1004 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1005 mlxsw_core->driver = mlxsw_driver;
1006 mlxsw_core->bus = mlxsw_bus;
1007 mlxsw_core->bus_priv = bus_priv;
1008 mlxsw_core->bus_info = mlxsw_bus_info;
1010 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
1015 err = mlxsw_ports_init(mlxsw_core);
1017 goto err_ports_init;
1019 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1020 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1021 alloc_size = sizeof(u8) *
1022 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1023 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1024 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1025 if (!mlxsw_core->lag.mapping) {
1027 goto err_alloc_lag_mapping;
1031 err = mlxsw_emad_init(mlxsw_core);
1035 err = devlink_register(devlink, mlxsw_bus_info->dev);
1037 goto err_devlink_register;
1039 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1041 goto err_hwmon_init;
1043 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1044 &mlxsw_core->thermal);
1046 goto err_thermal_init;
1048 if (mlxsw_driver->init) {
1049 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1051 goto err_driver_init;
1057 mlxsw_thermal_fini(mlxsw_core->thermal);
1060 devlink_unregister(devlink);
1061 err_devlink_register:
1062 mlxsw_emad_fini(mlxsw_core);
1064 kfree(mlxsw_core->lag.mapping);
1065 err_alloc_lag_mapping:
1066 mlxsw_ports_fini(mlxsw_core);
1068 mlxsw_bus->fini(bus_priv);
1070 devlink_free(devlink);
1072 mlxsw_core_driver_put(device_kind);
1075 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1077 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1079 const char *device_kind = mlxsw_core->bus_info->device_kind;
1080 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1082 if (mlxsw_core->driver->fini)
1083 mlxsw_core->driver->fini(mlxsw_core);
1084 mlxsw_thermal_fini(mlxsw_core->thermal);
1085 devlink_unregister(devlink);
1086 mlxsw_emad_fini(mlxsw_core);
1087 kfree(mlxsw_core->lag.mapping);
1088 mlxsw_ports_fini(mlxsw_core);
1089 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1090 devlink_free(devlink);
1091 mlxsw_core_driver_put(device_kind);
1093 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1095 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1096 const struct mlxsw_tx_info *tx_info)
1098 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1101 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1103 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1104 const struct mlxsw_tx_info *tx_info)
1106 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1109 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1111 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1112 const struct mlxsw_rx_listener *rxl_b)
1114 return (rxl_a->func == rxl_b->func &&
1115 rxl_a->local_port == rxl_b->local_port &&
1116 rxl_a->trap_id == rxl_b->trap_id);
1119 static struct mlxsw_rx_listener_item *
1120 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1121 const struct mlxsw_rx_listener *rxl,
1124 struct mlxsw_rx_listener_item *rxl_item;
1126 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1127 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1128 rxl_item->priv == priv)
1134 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1135 const struct mlxsw_rx_listener *rxl,
1138 struct mlxsw_rx_listener_item *rxl_item;
1140 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1143 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1146 rxl_item->rxl = *rxl;
1147 rxl_item->priv = priv;
1149 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1152 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1154 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1155 const struct mlxsw_rx_listener *rxl,
1158 struct mlxsw_rx_listener_item *rxl_item;
1160 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1163 list_del_rcu(&rxl_item->list);
1167 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1169 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1172 struct mlxsw_event_listener_item *event_listener_item = priv;
1173 struct mlxsw_reg_info reg;
1175 char *op_tlv = mlxsw_emad_op_tlv(skb);
1176 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1178 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1179 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1180 payload = mlxsw_emad_reg_payload(op_tlv);
1181 event_listener_item->el.func(®, payload, event_listener_item->priv);
1185 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1186 const struct mlxsw_event_listener *el_b)
1188 return (el_a->func == el_b->func &&
1189 el_a->trap_id == el_b->trap_id);
1192 static struct mlxsw_event_listener_item *
1193 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1194 const struct mlxsw_event_listener *el,
1197 struct mlxsw_event_listener_item *el_item;
1199 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1200 if (__is_event_listener_equal(&el_item->el, el) &&
1201 el_item->priv == priv)
1207 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1208 const struct mlxsw_event_listener *el,
1212 struct mlxsw_event_listener_item *el_item;
1213 const struct mlxsw_rx_listener rxl = {
1214 .func = mlxsw_core_event_listener_func,
1215 .local_port = MLXSW_PORT_DONT_CARE,
1216 .trap_id = el->trap_id,
1219 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1222 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1226 el_item->priv = priv;
1228 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1230 goto err_rx_listener_register;
1232 /* No reason to save item if we did not manage to register an RX
1235 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1239 err_rx_listener_register:
1243 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1245 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1246 const struct mlxsw_event_listener *el,
1249 struct mlxsw_event_listener_item *el_item;
1250 const struct mlxsw_rx_listener rxl = {
1251 .func = mlxsw_core_event_listener_func,
1252 .local_port = MLXSW_PORT_DONT_CARE,
1253 .trap_id = el->trap_id,
1256 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1259 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1260 list_del(&el_item->list);
1263 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1265 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1266 const struct mlxsw_listener *listener,
1269 if (listener->is_event)
1270 return mlxsw_core_event_listener_register(mlxsw_core,
1271 &listener->u.event_listener,
1274 return mlxsw_core_rx_listener_register(mlxsw_core,
1275 &listener->u.rx_listener,
1279 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1280 const struct mlxsw_listener *listener,
1283 if (listener->is_event)
1284 mlxsw_core_event_listener_unregister(mlxsw_core,
1285 &listener->u.event_listener,
1288 mlxsw_core_rx_listener_unregister(mlxsw_core,
1289 &listener->u.rx_listener,
1293 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1294 const struct mlxsw_listener *listener, void *priv)
1296 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1299 err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1303 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1304 listener->trap_group, listener->is_ctrl);
1305 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1312 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1315 EXPORT_SYMBOL(mlxsw_core_trap_register);
1317 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1318 const struct mlxsw_listener *listener,
1321 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1323 if (!listener->is_event) {
1324 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1325 listener->trap_id, listener->trap_group,
1327 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1330 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1332 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1334 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1336 return atomic64_inc_return(&mlxsw_core->emad.tid);
1339 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1340 const struct mlxsw_reg_info *reg,
1342 enum mlxsw_core_reg_access_type type,
1343 struct list_head *bulk_list,
1344 mlxsw_reg_trans_cb_t *cb,
1345 unsigned long cb_priv)
1347 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1348 struct mlxsw_reg_trans *trans;
1351 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1355 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1356 bulk_list, cb, cb_priv, tid);
1364 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1365 const struct mlxsw_reg_info *reg, char *payload,
1366 struct list_head *bulk_list,
1367 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1369 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1370 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1371 bulk_list, cb, cb_priv);
1373 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1375 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1376 const struct mlxsw_reg_info *reg, char *payload,
1377 struct list_head *bulk_list,
1378 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1380 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1381 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1382 bulk_list, cb, cb_priv);
1384 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1386 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1388 struct mlxsw_core *mlxsw_core = trans->core;
1391 wait_for_completion(&trans->completion);
1392 cancel_delayed_work_sync(&trans->timeout_dw);
1396 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1397 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1399 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1400 trans->tid, trans->reg->id,
1401 mlxsw_reg_id_str(trans->reg->id),
1402 mlxsw_core_reg_access_type_str(trans->type),
1404 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1406 list_del(&trans->bulk_list);
1407 kfree_rcu(trans, rcu);
1411 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1413 struct mlxsw_reg_trans *trans;
1414 struct mlxsw_reg_trans *tmp;
1418 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1419 err = mlxsw_reg_trans_wait(trans);
1420 if (err && sum_err == 0)
1421 sum_err = err; /* first error to be returned */
1425 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1427 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1428 const struct mlxsw_reg_info *reg,
1430 enum mlxsw_core_reg_access_type type)
1432 enum mlxsw_emad_op_tlv_status status;
1434 char *in_mbox, *out_mbox, *tmp;
1436 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1437 reg->id, mlxsw_reg_id_str(reg->id),
1438 mlxsw_core_reg_access_type_str(type));
1440 in_mbox = mlxsw_cmd_mbox_alloc();
1444 out_mbox = mlxsw_cmd_mbox_alloc();
1450 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1451 mlxsw_core_tid_get(mlxsw_core));
1452 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1453 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1457 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1459 err = mlxsw_emad_process_status(out_mbox, &status);
1461 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1463 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1464 status, mlxsw_emad_op_tlv_status_str(status));
1469 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1472 mlxsw_cmd_mbox_free(out_mbox);
1474 mlxsw_cmd_mbox_free(in_mbox);
1476 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1477 reg->id, mlxsw_reg_id_str(reg->id),
1478 mlxsw_core_reg_access_type_str(type));
1482 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1483 char *payload, size_t payload_len,
1484 unsigned long cb_priv)
1486 char *orig_payload = (char *) cb_priv;
1488 memcpy(orig_payload, payload, payload_len);
1491 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1492 const struct mlxsw_reg_info *reg,
1494 enum mlxsw_core_reg_access_type type)
1496 LIST_HEAD(bulk_list);
1499 /* During initialization EMAD interface is not available to us,
1500 * so we default to command interface. We switch to EMAD interface
1501 * after setting the appropriate traps.
1503 if (!mlxsw_core->emad.use_emad)
1504 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1507 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1508 payload, type, &bulk_list,
1509 mlxsw_core_reg_access_cb,
1510 (unsigned long) payload);
1513 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1516 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1517 const struct mlxsw_reg_info *reg, char *payload)
1519 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1520 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1522 EXPORT_SYMBOL(mlxsw_reg_query);
1524 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1525 const struct mlxsw_reg_info *reg, char *payload)
1527 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1528 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1530 EXPORT_SYMBOL(mlxsw_reg_write);
1532 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1533 struct mlxsw_rx_info *rx_info)
1535 struct mlxsw_rx_listener_item *rxl_item;
1536 const struct mlxsw_rx_listener *rxl;
1540 if (rx_info->is_lag) {
1541 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1542 __func__, rx_info->u.lag_id,
1544 /* Upper layer does not care if the skb came from LAG or not,
1545 * so just get the local_port for the lag port and push it up.
1547 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1549 rx_info->lag_port_index);
1551 local_port = rx_info->u.sys_port;
1554 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1555 __func__, local_port, rx_info->trap_id);
1557 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1558 (local_port >= mlxsw_core->max_ports))
1562 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1563 rxl = &rxl_item->rxl;
1564 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1565 rxl->local_port == local_port) &&
1566 rxl->trap_id == rx_info->trap_id) {
1575 rxl->func(skb, local_port, rxl_item->priv);
1581 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1583 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1584 u16 lag_id, u8 port_index)
1586 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1590 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1591 u16 lag_id, u8 port_index, u8 local_port)
1593 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1594 lag_id, port_index);
1596 mlxsw_core->lag.mapping[index] = local_port;
1598 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1600 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1601 u16 lag_id, u8 port_index)
1603 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1604 lag_id, port_index);
1606 return mlxsw_core->lag.mapping[index];
1608 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1610 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1611 u16 lag_id, u8 local_port)
1615 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1616 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1619 if (mlxsw_core->lag.mapping[index] == local_port)
1620 mlxsw_core->lag.mapping[index] = 0;
1623 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1625 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1626 enum mlxsw_res_id res_id)
1628 return mlxsw_res_valid(&mlxsw_core->res, res_id);
1630 EXPORT_SYMBOL(mlxsw_core_res_valid);
1632 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1633 enum mlxsw_res_id res_id)
1635 return mlxsw_res_get(&mlxsw_core->res, res_id);
1637 EXPORT_SYMBOL(mlxsw_core_res_get);
1639 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
1641 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1642 struct mlxsw_core_port *mlxsw_core_port =
1643 &mlxsw_core->ports[local_port];
1644 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1647 mlxsw_core_port->local_port = local_port;
1648 err = devlink_port_register(devlink, devlink_port, local_port);
1650 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1653 EXPORT_SYMBOL(mlxsw_core_port_init);
1655 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1657 struct mlxsw_core_port *mlxsw_core_port =
1658 &mlxsw_core->ports[local_port];
1659 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1661 devlink_port_unregister(devlink_port);
1662 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1664 EXPORT_SYMBOL(mlxsw_core_port_fini);
1666 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1667 void *port_driver_priv, struct net_device *dev,
1668 bool split, u32 split_group)
1670 struct mlxsw_core_port *mlxsw_core_port =
1671 &mlxsw_core->ports[local_port];
1672 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1674 mlxsw_core_port->port_driver_priv = port_driver_priv;
1676 devlink_port_split_set(devlink_port, split_group);
1677 devlink_port_type_eth_set(devlink_port, dev);
1679 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1681 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1682 void *port_driver_priv)
1684 struct mlxsw_core_port *mlxsw_core_port =
1685 &mlxsw_core->ports[local_port];
1686 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1688 mlxsw_core_port->port_driver_priv = port_driver_priv;
1689 devlink_port_type_ib_set(devlink_port, NULL);
1691 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1693 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1694 void *port_driver_priv)
1696 struct mlxsw_core_port *mlxsw_core_port =
1697 &mlxsw_core->ports[local_port];
1698 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1700 mlxsw_core_port->port_driver_priv = port_driver_priv;
1701 devlink_port_type_clear(devlink_port);
1703 EXPORT_SYMBOL(mlxsw_core_port_clear);
1705 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1708 struct mlxsw_core_port *mlxsw_core_port =
1709 &mlxsw_core->ports[local_port];
1710 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1712 return devlink_port->type;
1714 EXPORT_SYMBOL(mlxsw_core_port_type_get);
1716 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1717 const char *buf, size_t size)
1719 __be32 *m = (__be32 *) buf;
1721 int count = size / sizeof(__be32);
1723 for (i = count - 1; i >= 0; i--)
1728 for (i = 0; i < count; i += 4)
1729 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1730 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1731 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1734 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1735 u32 in_mod, bool out_mbox_direct,
1736 char *in_mbox, size_t in_mbox_size,
1737 char *out_mbox, size_t out_mbox_size)
1742 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1743 if (!mlxsw_core->bus->cmd_exec)
1746 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1747 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1749 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1750 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1753 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1754 opcode_mod, in_mod, out_mbox_direct,
1755 in_mbox, in_mbox_size,
1756 out_mbox, out_mbox_size, &status);
1758 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1759 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1760 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1761 in_mod, status, mlxsw_cmd_status_str(status));
1762 } else if (err == -ETIMEDOUT) {
1763 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1764 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1768 if (!err && out_mbox) {
1769 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1770 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1774 EXPORT_SYMBOL(mlxsw_cmd_exec);
1776 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1778 return queue_delayed_work(mlxsw_wq, dwork, delay);
1780 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1782 bool mlxsw_core_schedule_work(struct work_struct *work)
1784 return queue_work(mlxsw_owq, work);
1786 EXPORT_SYMBOL(mlxsw_core_schedule_work);
1788 void mlxsw_core_flush_owq(void)
1790 flush_workqueue(mlxsw_owq);
1792 EXPORT_SYMBOL(mlxsw_core_flush_owq);
1794 static int __init mlxsw_core_module_init(void)
1798 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
1801 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
1802 mlxsw_core_driver_name);
1805 goto err_alloc_ordered_workqueue;
1809 err_alloc_ordered_workqueue:
1810 destroy_workqueue(mlxsw_wq);
1814 static void __exit mlxsw_core_module_exit(void)
1816 destroy_workqueue(mlxsw_owq);
1817 destroy_workqueue(mlxsw_wq);
1820 module_init(mlxsw_core_module_init);
1821 module_exit(mlxsw_core_module_exit);
1823 MODULE_LICENSE("Dual BSD/GPL");
1824 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1825 MODULE_DESCRIPTION("Mellanox switch device core driver");