2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <net/netevent.h>
47 #include <net/neighbour.h>
49 #include <net/ip_fib.h>
50 #include <net/fib_rules.h>
51 #include <net/l3mdev.h>
56 #include "spectrum_cnt.h"
57 #include "spectrum_dpipe.h"
58 #include "spectrum_router.h"
61 struct mlxsw_sp_lpm_tree;
62 struct mlxsw_sp_rif_ops;
64 struct mlxsw_sp_router {
65 struct mlxsw_sp *mlxsw_sp;
66 struct mlxsw_sp_rif **rifs;
67 struct mlxsw_sp_vr *vrs;
68 struct rhashtable neigh_ht;
69 struct rhashtable nexthop_group_ht;
70 struct rhashtable nexthop_ht;
72 struct mlxsw_sp_lpm_tree *trees;
73 unsigned int tree_count;
76 struct delayed_work dw;
77 unsigned long interval; /* ms */
79 struct delayed_work nexthop_probe_dw;
80 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
81 struct list_head nexthop_neighs_list;
83 struct notifier_block fib_nb;
84 const struct mlxsw_sp_rif_ops **rif_ops_arr;
88 struct list_head nexthop_list;
89 struct list_head neigh_list;
90 struct net_device *dev;
91 struct mlxsw_sp_fid *fid;
92 unsigned char addr[ETH_ALEN];
96 const struct mlxsw_sp_rif_ops *ops;
97 struct mlxsw_sp *mlxsw_sp;
99 unsigned int counter_ingress;
100 bool counter_ingress_valid;
101 unsigned int counter_egress;
102 bool counter_egress_valid;
105 struct mlxsw_sp_rif_params {
106 struct net_device *dev;
115 struct mlxsw_sp_rif_subport {
116 struct mlxsw_sp_rif common;
125 struct mlxsw_sp_rif_ops {
126 enum mlxsw_sp_rif_type type;
129 void (*setup)(struct mlxsw_sp_rif *rif,
130 const struct mlxsw_sp_rif_params *params);
131 int (*configure)(struct mlxsw_sp_rif *rif);
132 void (*deconfigure)(struct mlxsw_sp_rif *rif);
133 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
136 static unsigned int *
137 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
138 enum mlxsw_sp_rif_counter_dir dir)
141 case MLXSW_SP_RIF_COUNTER_EGRESS:
142 return &rif->counter_egress;
143 case MLXSW_SP_RIF_COUNTER_INGRESS:
144 return &rif->counter_ingress;
150 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
151 enum mlxsw_sp_rif_counter_dir dir)
154 case MLXSW_SP_RIF_COUNTER_EGRESS:
155 return rif->counter_egress_valid;
156 case MLXSW_SP_RIF_COUNTER_INGRESS:
157 return rif->counter_ingress_valid;
163 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
164 enum mlxsw_sp_rif_counter_dir dir,
168 case MLXSW_SP_RIF_COUNTER_EGRESS:
169 rif->counter_egress_valid = valid;
171 case MLXSW_SP_RIF_COUNTER_INGRESS:
172 rif->counter_ingress_valid = valid;
177 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
178 unsigned int counter_index, bool enable,
179 enum mlxsw_sp_rif_counter_dir dir)
181 char ritr_pl[MLXSW_REG_RITR_LEN];
182 bool is_egress = false;
185 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
187 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
188 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
192 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
194 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
197 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
198 struct mlxsw_sp_rif *rif,
199 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
201 char ricnt_pl[MLXSW_REG_RICNT_LEN];
202 unsigned int *p_counter_index;
206 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
210 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
211 if (!p_counter_index)
213 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
214 MLXSW_REG_RICNT_OPCODE_NOP);
215 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
218 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
222 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
223 unsigned int counter_index)
225 char ricnt_pl[MLXSW_REG_RICNT_LEN];
227 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
228 MLXSW_REG_RICNT_OPCODE_CLEAR);
229 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
232 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
233 struct mlxsw_sp_rif *rif,
234 enum mlxsw_sp_rif_counter_dir dir)
236 unsigned int *p_counter_index;
239 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
240 if (!p_counter_index)
242 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
247 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
249 goto err_counter_clear;
251 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
252 *p_counter_index, true, dir);
254 goto err_counter_edit;
255 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
260 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
265 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
266 struct mlxsw_sp_rif *rif,
267 enum mlxsw_sp_rif_counter_dir dir)
269 unsigned int *p_counter_index;
271 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
274 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
275 if (WARN_ON(!p_counter_index))
277 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
278 *p_counter_index, false, dir);
279 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
281 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
284 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
286 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
287 struct devlink *devlink;
289 devlink = priv_to_devlink(mlxsw_sp->core);
290 if (!devlink_dpipe_table_counter_enabled(devlink,
291 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
293 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
296 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
298 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
300 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
303 static struct mlxsw_sp_rif *
304 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
305 const struct net_device *dev);
307 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
309 struct mlxsw_sp_prefix_usage {
310 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
313 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
314 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
317 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
318 struct mlxsw_sp_prefix_usage *prefix_usage2)
320 unsigned char prefix;
322 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
323 if (!test_bit(prefix, prefix_usage2->b))
330 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
331 struct mlxsw_sp_prefix_usage *prefix_usage2)
333 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
337 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
339 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
341 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
345 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
346 struct mlxsw_sp_prefix_usage *prefix_usage2)
348 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
352 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
353 unsigned char prefix_len)
355 set_bit(prefix_len, prefix_usage->b);
359 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
360 unsigned char prefix_len)
362 clear_bit(prefix_len, prefix_usage->b);
365 struct mlxsw_sp_fib_key {
366 unsigned char addr[sizeof(struct in6_addr)];
367 unsigned char prefix_len;
370 enum mlxsw_sp_fib_entry_type {
371 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
372 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
373 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
376 struct mlxsw_sp_nexthop_group;
379 struct mlxsw_sp_fib_node {
380 struct list_head entry_list;
381 struct list_head list;
382 struct rhash_head ht_node;
383 struct mlxsw_sp_fib *fib;
384 struct mlxsw_sp_fib_key key;
387 struct mlxsw_sp_fib_entry_params {
394 struct mlxsw_sp_fib_entry {
395 struct list_head list;
396 struct mlxsw_sp_fib_node *fib_node;
397 enum mlxsw_sp_fib_entry_type type;
398 struct list_head nexthop_group_node;
399 struct mlxsw_sp_nexthop_group *nh_group;
400 struct mlxsw_sp_fib_entry_params params;
404 enum mlxsw_sp_l3proto {
405 MLXSW_SP_L3_PROTO_IPV4,
406 MLXSW_SP_L3_PROTO_IPV6,
409 struct mlxsw_sp_lpm_tree {
411 unsigned int ref_count;
412 enum mlxsw_sp_l3proto proto;
413 struct mlxsw_sp_prefix_usage prefix_usage;
416 struct mlxsw_sp_fib {
417 struct rhashtable ht;
418 struct list_head node_list;
419 struct mlxsw_sp_vr *vr;
420 struct mlxsw_sp_lpm_tree *lpm_tree;
421 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
422 struct mlxsw_sp_prefix_usage prefix_usage;
423 enum mlxsw_sp_l3proto proto;
427 u16 id; /* virtual router ID */
428 u32 tb_id; /* kernel fib table id */
429 unsigned int rif_count;
430 struct mlxsw_sp_fib *fib4;
433 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
435 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
436 enum mlxsw_sp_l3proto proto)
438 struct mlxsw_sp_fib *fib;
441 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
443 return ERR_PTR(-ENOMEM);
444 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
446 goto err_rhashtable_init;
447 INIT_LIST_HEAD(&fib->node_list);
457 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
459 WARN_ON(!list_empty(&fib->node_list));
460 WARN_ON(fib->lpm_tree);
461 rhashtable_destroy(&fib->ht);
465 static struct mlxsw_sp_lpm_tree *
466 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
468 static struct mlxsw_sp_lpm_tree *lpm_tree;
471 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
472 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
473 if (lpm_tree->ref_count == 0)
479 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
480 struct mlxsw_sp_lpm_tree *lpm_tree)
482 char ralta_pl[MLXSW_REG_RALTA_LEN];
484 mlxsw_reg_ralta_pack(ralta_pl, true,
485 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
490 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
491 struct mlxsw_sp_lpm_tree *lpm_tree)
493 char ralta_pl[MLXSW_REG_RALTA_LEN];
495 mlxsw_reg_ralta_pack(ralta_pl, false,
496 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
498 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
502 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
503 struct mlxsw_sp_prefix_usage *prefix_usage,
504 struct mlxsw_sp_lpm_tree *lpm_tree)
506 char ralst_pl[MLXSW_REG_RALST_LEN];
509 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
511 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
514 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
515 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
518 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
519 MLXSW_REG_RALST_BIN_NO_CHILD);
520 last_prefix = prefix;
522 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
525 static struct mlxsw_sp_lpm_tree *
526 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
527 struct mlxsw_sp_prefix_usage *prefix_usage,
528 enum mlxsw_sp_l3proto proto)
530 struct mlxsw_sp_lpm_tree *lpm_tree;
533 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
535 return ERR_PTR(-EBUSY);
536 lpm_tree->proto = proto;
537 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
541 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
544 goto err_left_struct_set;
545 memcpy(&lpm_tree->prefix_usage, prefix_usage,
546 sizeof(lpm_tree->prefix_usage));
550 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
554 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
555 struct mlxsw_sp_lpm_tree *lpm_tree)
557 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
560 static struct mlxsw_sp_lpm_tree *
561 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_prefix_usage *prefix_usage,
563 enum mlxsw_sp_l3proto proto)
565 struct mlxsw_sp_lpm_tree *lpm_tree;
568 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
569 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
570 if (lpm_tree->ref_count != 0 &&
571 lpm_tree->proto == proto &&
572 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
576 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
578 if (IS_ERR(lpm_tree))
582 lpm_tree->ref_count++;
586 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
587 struct mlxsw_sp_lpm_tree *lpm_tree)
589 if (--lpm_tree->ref_count == 0)
590 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
594 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
596 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
598 struct mlxsw_sp_lpm_tree *lpm_tree;
602 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
605 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
606 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
607 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
608 sizeof(struct mlxsw_sp_lpm_tree),
610 if (!mlxsw_sp->router->lpm.trees)
613 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
614 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
615 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
621 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
623 kfree(mlxsw_sp->router->lpm.trees);
626 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
631 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
633 struct mlxsw_sp_vr *vr;
636 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
637 vr = &mlxsw_sp->router->vrs[i];
638 if (!mlxsw_sp_vr_is_used(vr))
644 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
645 const struct mlxsw_sp_fib *fib)
647 char raltb_pl[MLXSW_REG_RALTB_LEN];
649 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
650 (enum mlxsw_reg_ralxx_protocol) fib->proto,
652 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
655 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
656 const struct mlxsw_sp_fib *fib)
658 char raltb_pl[MLXSW_REG_RALTB_LEN];
660 /* Bind to tree 0 which is default */
661 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
662 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
666 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
668 /* For our purpose, squash main and local table into one */
669 if (tb_id == RT_TABLE_LOCAL)
670 tb_id = RT_TABLE_MAIN;
674 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
677 struct mlxsw_sp_vr *vr;
680 tb_id = mlxsw_sp_fix_tb_id(tb_id);
682 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
683 vr = &mlxsw_sp->router->vrs[i];
684 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
690 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
691 enum mlxsw_sp_l3proto proto)
694 case MLXSW_SP_L3_PROTO_IPV4:
696 case MLXSW_SP_L3_PROTO_IPV6:
702 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
705 struct mlxsw_sp_vr *vr;
707 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
709 return ERR_PTR(-EBUSY);
710 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
711 if (IS_ERR(vr->fib4))
712 return ERR_CAST(vr->fib4);
717 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
719 mlxsw_sp_fib_destroy(vr->fib4);
724 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
725 struct mlxsw_sp_prefix_usage *req_prefix_usage)
727 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
728 struct mlxsw_sp_lpm_tree *new_tree;
731 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
734 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
736 if (IS_ERR(new_tree)) {
737 /* We failed to get a tree according to the required
738 * prefix usage. However, the current tree might be still good
739 * for us if our requirement is subset of the prefixes used
742 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
743 &lpm_tree->prefix_usage))
745 return PTR_ERR(new_tree);
748 /* Prevent packet loss by overwriting existing binding */
749 fib->lpm_tree = new_tree;
750 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
753 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
758 fib->lpm_tree = lpm_tree;
759 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
763 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
765 struct mlxsw_sp_vr *vr;
767 tb_id = mlxsw_sp_fix_tb_id(tb_id);
768 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
770 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
774 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
776 if (!vr->rif_count && list_empty(&vr->fib4->node_list))
777 mlxsw_sp_vr_destroy(vr);
780 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
782 struct mlxsw_sp_vr *vr;
786 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
789 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
790 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
792 if (!mlxsw_sp->router->vrs)
795 for (i = 0; i < max_vrs; i++) {
796 vr = &mlxsw_sp->router->vrs[i];
803 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
805 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
807 /* At this stage we're guaranteed not to have new incoming
808 * FIB notifications and the work queue is free from FIBs
809 * sitting on top of mlxsw netdevs. However, we can still
810 * have other FIBs queued. Flush the queue before flushing
811 * the device's tables. No need for locks, as we're the only
814 mlxsw_core_flush_owq();
815 mlxsw_sp_router_fib_flush(mlxsw_sp);
816 kfree(mlxsw_sp->router->vrs);
819 struct mlxsw_sp_neigh_key {
823 struct mlxsw_sp_neigh_entry {
824 struct list_head rif_list_node;
825 struct rhash_head ht_node;
826 struct mlxsw_sp_neigh_key key;
829 unsigned char ha[ETH_ALEN];
830 struct list_head nexthop_list; /* list of nexthops using
833 struct list_head nexthop_neighs_list_node;
836 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
837 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
838 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
839 .key_len = sizeof(struct mlxsw_sp_neigh_key),
842 static struct mlxsw_sp_neigh_entry *
843 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
846 struct mlxsw_sp_neigh_entry *neigh_entry;
848 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
852 neigh_entry->key.n = n;
853 neigh_entry->rif = rif;
854 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
859 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
865 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
866 struct mlxsw_sp_neigh_entry *neigh_entry)
868 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
869 &neigh_entry->ht_node,
870 mlxsw_sp_neigh_ht_params);
874 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
875 struct mlxsw_sp_neigh_entry *neigh_entry)
877 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
878 &neigh_entry->ht_node,
879 mlxsw_sp_neigh_ht_params);
882 static struct mlxsw_sp_neigh_entry *
883 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
885 struct mlxsw_sp_neigh_entry *neigh_entry;
886 struct mlxsw_sp_rif *rif;
889 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
891 return ERR_PTR(-EINVAL);
893 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
895 return ERR_PTR(-ENOMEM);
897 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
899 goto err_neigh_entry_insert;
901 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
905 err_neigh_entry_insert:
906 mlxsw_sp_neigh_entry_free(neigh_entry);
911 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
912 struct mlxsw_sp_neigh_entry *neigh_entry)
914 list_del(&neigh_entry->rif_list_node);
915 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
916 mlxsw_sp_neigh_entry_free(neigh_entry);
919 static struct mlxsw_sp_neigh_entry *
920 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
922 struct mlxsw_sp_neigh_key key;
925 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
926 &key, mlxsw_sp_neigh_ht_params);
930 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
932 unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
934 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
937 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
941 struct net_device *dev;
947 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
949 if (!mlxsw_sp->router->rifs[rif]) {
950 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
955 dev = mlxsw_sp->router->rifs[rif]->dev;
956 n = neigh_lookup(&arp_tbl, &dipn, dev);
958 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
963 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
964 neigh_event_send(n, NULL);
968 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
975 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
977 /* Hardware starts counting at 0, so add 1. */
980 /* Each record consists of several neighbour entries. */
981 for (i = 0; i < num_entries; i++) {
984 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
985 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
991 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
992 char *rauhtd_pl, int rec_index)
994 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
995 case MLXSW_REG_RAUHTD_TYPE_IPV4:
996 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
999 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1005 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1007 u8 num_rec, last_rec_index, num_entries;
1009 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1010 last_rec_index = num_rec - 1;
1012 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1014 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1015 MLXSW_REG_RAUHTD_TYPE_IPV6)
1018 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1020 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1025 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1031 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1035 /* Make sure the neighbour's netdev isn't removed in the
1040 mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
1041 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1044 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1047 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1048 for (i = 0; i < num_rec; i++)
1049 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1051 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
1058 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1060 struct mlxsw_sp_neigh_entry *neigh_entry;
1062 /* Take RTNL mutex here to prevent lists from changes */
1064 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
1065 nexthop_neighs_list_node)
1066 /* If this neigh have nexthops, make the kernel think this neigh
1067 * is active regardless of the traffic.
1069 neigh_event_send(neigh_entry->key.n, NULL);
1074 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1076 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
1078 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
1079 msecs_to_jiffies(interval));
1082 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1084 struct mlxsw_sp_router *router;
1087 router = container_of(work, struct mlxsw_sp_router,
1088 neighs_update.dw.work);
1089 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
1091 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
1093 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
1095 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
1098 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1100 struct mlxsw_sp_neigh_entry *neigh_entry;
1101 struct mlxsw_sp_router *router;
1103 router = container_of(work, struct mlxsw_sp_router,
1104 nexthop_probe_dw.work);
1105 /* Iterate over nexthop neighbours, find those who are unresolved and
1106 * send arp on them. This solves the chicken-egg problem when
1107 * the nexthop wouldn't get offloaded until the neighbor is resolved
1108 * but it wouldn't get resolved ever in case traffic is flowing in HW
1109 * using different nexthop.
1111 * Take RTNL mutex here to prevent lists from changes.
1114 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
1115 nexthop_neighs_list_node)
1116 if (!neigh_entry->connected)
1117 neigh_event_send(neigh_entry->key.n, NULL);
1120 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
1121 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1125 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1126 struct mlxsw_sp_neigh_entry *neigh_entry,
1129 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1131 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1132 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1136 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1137 struct mlxsw_sp_neigh_entry *neigh_entry,
1138 enum mlxsw_reg_rauht_op op)
1140 struct neighbour *n = neigh_entry->key.n;
1141 u32 dip = ntohl(*((__be32 *) n->primary_key));
1142 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1144 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1146 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1150 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1151 struct mlxsw_sp_neigh_entry *neigh_entry,
1154 if (!adding && !neigh_entry->connected)
1156 neigh_entry->connected = adding;
1157 if (neigh_entry->key.n->tbl == &arp_tbl)
1158 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1159 mlxsw_sp_rauht_op(adding));
1164 struct mlxsw_sp_neigh_event_work {
1165 struct work_struct work;
1166 struct mlxsw_sp *mlxsw_sp;
1167 struct neighbour *n;
1170 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1172 struct mlxsw_sp_neigh_event_work *neigh_work =
1173 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1174 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1175 struct mlxsw_sp_neigh_entry *neigh_entry;
1176 struct neighbour *n = neigh_work->n;
1177 unsigned char ha[ETH_ALEN];
1178 bool entry_connected;
1181 /* If these parameters are changed after we release the lock,
1182 * then we are guaranteed to receive another event letting us
1185 read_lock_bh(&n->lock);
1186 memcpy(ha, n->ha, ETH_ALEN);
1187 nud_state = n->nud_state;
1189 read_unlock_bh(&n->lock);
1192 entry_connected = nud_state & NUD_VALID && !dead;
1193 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1194 if (!entry_connected && !neigh_entry)
1197 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1198 if (IS_ERR(neigh_entry))
1202 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1203 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1204 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1206 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1207 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1215 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1216 unsigned long event, void *ptr)
1218 struct mlxsw_sp_neigh_event_work *neigh_work;
1219 struct mlxsw_sp_port *mlxsw_sp_port;
1220 struct mlxsw_sp *mlxsw_sp;
1221 unsigned long interval;
1222 struct neigh_parms *p;
1223 struct neighbour *n;
1226 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1229 /* We don't care about changes in the default table. */
1230 if (!p->dev || p->tbl != &arp_tbl)
1233 /* We are in atomic context and can't take RTNL mutex,
1234 * so use RCU variant to walk the device chain.
1236 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1240 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1241 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1242 mlxsw_sp->router->neighs_update.interval = interval;
1244 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1246 case NETEVENT_NEIGH_UPDATE:
1249 if (n->tbl != &arp_tbl)
1252 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
1256 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1258 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1262 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1263 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1266 /* Take a reference to ensure the neighbour won't be
1267 * destructed until we drop the reference in delayed
1271 mlxsw_core_schedule_work(&neigh_work->work);
1272 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1279 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1283 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
1284 &mlxsw_sp_neigh_ht_params);
1288 /* Initialize the polling interval according to the default
1291 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1293 /* Create the delayed works for the activity_update */
1294 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
1295 mlxsw_sp_router_neighs_update_work);
1296 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
1297 mlxsw_sp_router_probe_unresolved_nexthops);
1298 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1299 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
1303 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1305 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1306 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1307 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
1310 static int mlxsw_sp_neigh_rif_flush(struct mlxsw_sp *mlxsw_sp,
1311 const struct mlxsw_sp_rif *rif)
1313 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1315 mlxsw_reg_rauht_pack(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL,
1316 rif->rif_index, rif->addr);
1317 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1320 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1321 struct mlxsw_sp_rif *rif)
1323 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1325 mlxsw_sp_neigh_rif_flush(mlxsw_sp, rif);
1326 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
1328 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1331 struct mlxsw_sp_nexthop_key {
1332 struct fib_nh *fib_nh;
1335 struct mlxsw_sp_nexthop {
1336 struct list_head neigh_list_node; /* member of neigh entry list */
1337 struct list_head rif_list_node;
1338 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1341 struct rhash_head ht_node;
1342 struct mlxsw_sp_nexthop_key key;
1343 struct mlxsw_sp_rif *rif;
1344 u8 should_offload:1, /* set indicates this neigh is connected and
1345 * should be put to KVD linear area of this group.
1347 offloaded:1, /* set in case the neigh is actually put into
1348 * KVD linear area of this group.
1350 update:1; /* set indicates that MAC of this neigh should be
1353 struct mlxsw_sp_neigh_entry *neigh_entry;
1356 struct mlxsw_sp_nexthop_group_key {
1357 struct fib_info *fi;
1360 struct mlxsw_sp_nexthop_group {
1361 struct rhash_head ht_node;
1362 struct list_head fib_list; /* list of fib entries that use this group */
1363 struct mlxsw_sp_nexthop_group_key key;
1364 u8 adj_index_valid:1,
1365 gateway:1; /* routes using the group use a gateway */
1369 struct mlxsw_sp_nexthop nexthops[0];
1370 #define nh_rif nexthops[0].rif
1373 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1374 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1375 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1376 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1379 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1380 struct mlxsw_sp_nexthop_group *nh_grp)
1382 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
1384 mlxsw_sp_nexthop_group_ht_params);
1387 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1388 struct mlxsw_sp_nexthop_group *nh_grp)
1390 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
1392 mlxsw_sp_nexthop_group_ht_params);
1395 static struct mlxsw_sp_nexthop_group *
1396 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1397 struct mlxsw_sp_nexthop_group_key key)
1399 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
1400 mlxsw_sp_nexthop_group_ht_params);
1403 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1404 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1405 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1406 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1409 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1410 struct mlxsw_sp_nexthop *nh)
1412 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
1413 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1416 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1417 struct mlxsw_sp_nexthop *nh)
1419 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
1420 mlxsw_sp_nexthop_ht_params);
1423 static struct mlxsw_sp_nexthop *
1424 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1425 struct mlxsw_sp_nexthop_key key)
1427 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
1428 mlxsw_sp_nexthop_ht_params);
1431 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1432 const struct mlxsw_sp_fib *fib,
1433 u32 adj_index, u16 ecmp_size,
1437 char raleu_pl[MLXSW_REG_RALEU_LEN];
1439 mlxsw_reg_raleu_pack(raleu_pl,
1440 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1441 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1443 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1446 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1447 struct mlxsw_sp_nexthop_group *nh_grp,
1448 u32 old_adj_index, u16 old_ecmp_size)
1450 struct mlxsw_sp_fib_entry *fib_entry;
1451 struct mlxsw_sp_fib *fib = NULL;
1454 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1455 if (fib == fib_entry->fib_node->fib)
1457 fib = fib_entry->fib_node->fib;
1458 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
1469 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1470 struct mlxsw_sp_nexthop *nh)
1472 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1473 char ratr_pl[MLXSW_REG_RATR_LEN];
1475 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1476 true, adj_index, neigh_entry->rif);
1477 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1478 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1482 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1483 struct mlxsw_sp_nexthop_group *nh_grp,
1486 u32 adj_index = nh_grp->adj_index; /* base */
1487 struct mlxsw_sp_nexthop *nh;
1491 for (i = 0; i < nh_grp->count; i++) {
1492 nh = &nh_grp->nexthops[i];
1494 if (!nh->should_offload) {
1499 if (nh->update || reallocate) {
1500 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1512 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1513 struct mlxsw_sp_fib_entry *fib_entry);
1516 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1517 struct mlxsw_sp_nexthop_group *nh_grp)
1519 struct mlxsw_sp_fib_entry *fib_entry;
1522 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1523 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1531 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1532 struct mlxsw_sp_nexthop_group *nh_grp)
1534 struct mlxsw_sp_nexthop *nh;
1535 bool offload_change = false;
1538 bool old_adj_index_valid;
1544 if (!nh_grp->gateway) {
1545 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1549 for (i = 0; i < nh_grp->count; i++) {
1550 nh = &nh_grp->nexthops[i];
1552 if (nh->should_offload ^ nh->offloaded) {
1553 offload_change = true;
1554 if (nh->should_offload)
1557 if (nh->should_offload)
1560 if (!offload_change) {
1561 /* Nothing was added or removed, so no need to reallocate. Just
1562 * update MAC on existing adjacency indexes.
1564 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1567 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1573 /* No neigh of this group is connected so we just set
1574 * the trap and let everthing flow through kernel.
1578 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1580 /* We ran out of KVD linear space, just set the
1581 * trap and let everything flow through kernel.
1583 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1586 old_adj_index_valid = nh_grp->adj_index_valid;
1587 old_adj_index = nh_grp->adj_index;
1588 old_ecmp_size = nh_grp->ecmp_size;
1589 nh_grp->adj_index_valid = 1;
1590 nh_grp->adj_index = adj_index;
1591 nh_grp->ecmp_size = ecmp_size;
1592 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1594 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1598 if (!old_adj_index_valid) {
1599 /* The trap was set for fib entries, so we have to call
1600 * fib entry update to unset it and use adjacency index.
1602 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1604 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1610 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1611 old_adj_index, old_ecmp_size);
1612 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1614 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1620 old_adj_index_valid = nh_grp->adj_index_valid;
1621 nh_grp->adj_index_valid = 0;
1622 for (i = 0; i < nh_grp->count; i++) {
1623 nh = &nh_grp->nexthops[i];
1626 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1628 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1629 if (old_adj_index_valid)
1630 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1633 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1636 if (!removing && !nh->should_offload)
1637 nh->should_offload = 1;
1638 else if (removing && nh->offloaded)
1639 nh->should_offload = 0;
1644 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1645 struct mlxsw_sp_neigh_entry *neigh_entry,
1648 struct mlxsw_sp_nexthop *nh;
1650 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1652 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1653 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1657 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1658 struct mlxsw_sp_rif *rif)
1664 list_add(&nh->rif_list_node, &rif->nexthop_list);
1667 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1672 list_del(&nh->rif_list_node);
1676 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1677 struct mlxsw_sp_nexthop *nh)
1679 struct mlxsw_sp_neigh_entry *neigh_entry;
1680 struct fib_nh *fib_nh = nh->key.fib_nh;
1681 struct neighbour *n;
1685 if (!nh->nh_grp->gateway || nh->neigh_entry)
1688 /* Take a reference of neigh here ensuring that neigh would
1689 * not be detructed before the nexthop entry is finished.
1690 * The reference is taken either in neigh_lookup() or
1691 * in neigh_create() in case n is not found.
1693 n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1695 n = neigh_create(&arp_tbl, &fib_nh->nh_gw, fib_nh->nh_dev);
1698 neigh_event_send(n, NULL);
1700 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1702 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1703 if (IS_ERR(neigh_entry)) {
1705 goto err_neigh_entry_create;
1709 /* If that is the first nexthop connected to that neigh, add to
1710 * nexthop_neighs_list
1712 if (list_empty(&neigh_entry->nexthop_list))
1713 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1714 &mlxsw_sp->router->nexthop_neighs_list);
1716 nh->neigh_entry = neigh_entry;
1717 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1718 read_lock_bh(&n->lock);
1719 nud_state = n->nud_state;
1721 read_unlock_bh(&n->lock);
1722 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
1726 err_neigh_entry_create:
1731 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1732 struct mlxsw_sp_nexthop *nh)
1734 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1735 struct neighbour *n;
1739 n = neigh_entry->key.n;
1741 __mlxsw_sp_nexthop_neigh_update(nh, true);
1742 list_del(&nh->neigh_list_node);
1743 nh->neigh_entry = NULL;
1745 /* If that is the last nexthop connected to that neigh, remove from
1746 * nexthop_neighs_list
1748 if (list_empty(&neigh_entry->nexthop_list))
1749 list_del(&neigh_entry->nexthop_neighs_list_node);
1751 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1752 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1757 static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
1758 struct mlxsw_sp_nexthop_group *nh_grp,
1759 struct mlxsw_sp_nexthop *nh,
1760 struct fib_nh *fib_nh)
1762 struct net_device *dev = fib_nh->nh_dev;
1763 struct in_device *in_dev;
1764 struct mlxsw_sp_rif *rif;
1767 nh->nh_grp = nh_grp;
1768 nh->key.fib_nh = fib_nh;
1769 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1776 in_dev = __in_dev_get_rtnl(dev);
1777 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1778 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1781 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1784 mlxsw_sp_nexthop_rif_init(nh, rif);
1786 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1788 goto err_nexthop_neigh_init;
1792 err_nexthop_neigh_init:
1793 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1797 static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
1798 struct mlxsw_sp_nexthop *nh)
1800 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1801 mlxsw_sp_nexthop_rif_fini(nh);
1802 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1805 static void mlxsw_sp_nexthop_event(struct mlxsw_sp *mlxsw_sp,
1806 unsigned long event, struct fib_nh *fib_nh)
1808 struct mlxsw_sp_nexthop_key key;
1809 struct mlxsw_sp_nexthop *nh;
1810 struct mlxsw_sp_rif *rif;
1812 if (mlxsw_sp->router->aborted)
1815 key.fib_nh = fib_nh;
1816 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1817 if (WARN_ON_ONCE(!nh))
1820 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1825 case FIB_EVENT_NH_ADD:
1826 mlxsw_sp_nexthop_rif_init(nh, rif);
1827 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1829 case FIB_EVENT_NH_DEL:
1830 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1831 mlxsw_sp_nexthop_rif_fini(nh);
1835 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1838 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1839 struct mlxsw_sp_rif *rif)
1841 struct mlxsw_sp_nexthop *nh, *tmp;
1843 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
1844 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1845 mlxsw_sp_nexthop_rif_fini(nh);
1846 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1850 static struct mlxsw_sp_nexthop_group *
1851 mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
1853 struct mlxsw_sp_nexthop_group *nh_grp;
1854 struct mlxsw_sp_nexthop *nh;
1855 struct fib_nh *fib_nh;
1860 alloc_size = sizeof(*nh_grp) +
1861 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
1862 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
1864 return ERR_PTR(-ENOMEM);
1865 INIT_LIST_HEAD(&nh_grp->fib_list);
1866 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
1867 nh_grp->count = fi->fib_nhs;
1868 nh_grp->key.fi = fi;
1869 for (i = 0; i < nh_grp->count; i++) {
1870 nh = &nh_grp->nexthops[i];
1871 fib_nh = &fi->fib_nh[i];
1872 err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
1874 goto err_nexthop_init;
1876 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
1878 goto err_nexthop_group_insert;
1879 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1882 err_nexthop_group_insert:
1884 for (i--; i >= 0; i--) {
1885 nh = &nh_grp->nexthops[i];
1886 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1889 return ERR_PTR(err);
1893 mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
1894 struct mlxsw_sp_nexthop_group *nh_grp)
1896 struct mlxsw_sp_nexthop *nh;
1899 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
1900 for (i = 0; i < nh_grp->count; i++) {
1901 nh = &nh_grp->nexthops[i];
1902 mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
1904 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
1905 WARN_ON_ONCE(nh_grp->adj_index_valid);
1909 static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
1910 struct mlxsw_sp_fib_entry *fib_entry,
1911 struct fib_info *fi)
1913 struct mlxsw_sp_nexthop_group_key key;
1914 struct mlxsw_sp_nexthop_group *nh_grp;
1917 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
1919 nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
1921 return PTR_ERR(nh_grp);
1923 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
1924 fib_entry->nh_group = nh_grp;
1928 static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
1929 struct mlxsw_sp_fib_entry *fib_entry)
1931 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
1933 list_del(&fib_entry->nexthop_group_node);
1934 if (!list_empty(&nh_grp->fib_list))
1936 mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
1940 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
1942 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
1944 if (fib_entry->params.tos)
1947 switch (fib_entry->type) {
1948 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
1949 return !!nh_group->adj_index_valid;
1950 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
1951 return !!nh_group->nh_rif;
1957 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
1959 fib_entry->offloaded = true;
1961 switch (fib_entry->fib_node->fib->proto) {
1962 case MLXSW_SP_L3_PROTO_IPV4:
1963 fib_info_offload_inc(fib_entry->nh_group->key.fi);
1965 case MLXSW_SP_L3_PROTO_IPV6:
1971 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
1973 switch (fib_entry->fib_node->fib->proto) {
1974 case MLXSW_SP_L3_PROTO_IPV4:
1975 fib_info_offload_dec(fib_entry->nh_group->key.fi);
1977 case MLXSW_SP_L3_PROTO_IPV6:
1981 fib_entry->offloaded = false;
1985 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1986 enum mlxsw_reg_ralue_op op, int err)
1989 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
1990 if (!fib_entry->offloaded)
1992 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
1993 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
1996 if (mlxsw_sp_fib_entry_should_offload(fib_entry) &&
1997 !fib_entry->offloaded)
1998 mlxsw_sp_fib_entry_offload_set(fib_entry);
1999 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry) &&
2000 fib_entry->offloaded)
2001 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2008 static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
2009 struct mlxsw_sp_fib_entry *fib_entry,
2010 enum mlxsw_reg_ralue_op op)
2012 char ralue_pl[MLXSW_REG_RALUE_LEN];
2013 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2014 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
2015 enum mlxsw_reg_ralue_trap_action trap_action;
2017 u32 adjacency_index = 0;
2020 /* In case the nexthop group adjacency index is valid, use it
2021 * with provided ECMP size. Otherwise, setup trap and pass
2022 * traffic to kernel.
2024 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2025 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2026 adjacency_index = fib_entry->nh_group->adj_index;
2027 ecmp_size = fib_entry->nh_group->ecmp_size;
2029 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2030 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2033 mlxsw_reg_ralue_pack4(ralue_pl,
2034 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2035 fib->vr->id, fib_entry->fib_node->key.prefix_len,
2037 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2038 adjacency_index, ecmp_size);
2039 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2042 static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
2043 struct mlxsw_sp_fib_entry *fib_entry,
2044 enum mlxsw_reg_ralue_op op)
2046 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
2047 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2048 enum mlxsw_reg_ralue_trap_action trap_action;
2049 char ralue_pl[MLXSW_REG_RALUE_LEN];
2050 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
2054 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2055 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2056 rif_index = rif->rif_index;
2058 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2059 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2062 mlxsw_reg_ralue_pack4(ralue_pl,
2063 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2064 fib->vr->id, fib_entry->fib_node->key.prefix_len,
2066 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2068 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2071 static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
2072 struct mlxsw_sp_fib_entry *fib_entry,
2073 enum mlxsw_reg_ralue_op op)
2075 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2076 char ralue_pl[MLXSW_REG_RALUE_LEN];
2077 u32 *p_dip = (u32 *) fib_entry->fib_node->key.addr;
2079 mlxsw_reg_ralue_pack4(ralue_pl,
2080 (enum mlxsw_reg_ralxx_protocol) fib->proto, op,
2081 fib->vr->id, fib_entry->fib_node->key.prefix_len,
2083 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2084 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2087 static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
2088 struct mlxsw_sp_fib_entry *fib_entry,
2089 enum mlxsw_reg_ralue_op op)
2091 switch (fib_entry->type) {
2092 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2093 return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
2094 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2095 return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
2096 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2097 return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
2102 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2103 struct mlxsw_sp_fib_entry *fib_entry,
2104 enum mlxsw_reg_ralue_op op)
2108 switch (fib_entry->fib_node->fib->proto) {
2109 case MLXSW_SP_L3_PROTO_IPV4:
2110 err = mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
2112 case MLXSW_SP_L3_PROTO_IPV6:
2115 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2119 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2120 struct mlxsw_sp_fib_entry *fib_entry)
2122 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2123 MLXSW_REG_RALUE_OP_WRITE_WRITE);
2126 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2127 struct mlxsw_sp_fib_entry *fib_entry)
2129 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2130 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2134 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2135 const struct fib_entry_notifier_info *fen_info,
2136 struct mlxsw_sp_fib_entry *fib_entry)
2138 struct fib_info *fi = fen_info->fi;
2140 switch (fen_info->type) {
2141 case RTN_BROADCAST: /* fall through */
2143 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2145 case RTN_UNREACHABLE: /* fall through */
2146 case RTN_BLACKHOLE: /* fall through */
2148 /* Packets hitting these routes need to be trapped, but
2149 * can do so with a lower priority than packets directed
2150 * at the host, so use action type local instead of trap.
2152 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2155 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2156 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2158 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2165 static struct mlxsw_sp_fib_entry *
2166 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2167 struct mlxsw_sp_fib_node *fib_node,
2168 const struct fib_entry_notifier_info *fen_info)
2170 struct mlxsw_sp_fib_entry *fib_entry;
2173 fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
2176 goto err_fib_entry_alloc;
2179 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2181 goto err_fib4_entry_type_set;
2183 err = mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2185 goto err_nexthop_group_get;
2187 fib_entry->params.prio = fen_info->fi->fib_priority;
2188 fib_entry->params.tb_id = fen_info->tb_id;
2189 fib_entry->params.type = fen_info->type;
2190 fib_entry->params.tos = fen_info->tos;
2192 fib_entry->fib_node = fib_node;
2196 err_nexthop_group_get:
2197 err_fib4_entry_type_set:
2199 err_fib_entry_alloc:
2200 return ERR_PTR(err);
2203 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2204 struct mlxsw_sp_fib_entry *fib_entry)
2206 mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
2210 static struct mlxsw_sp_fib_node *
2211 mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2212 const struct fib_entry_notifier_info *fen_info);
2214 static struct mlxsw_sp_fib_entry *
2215 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2216 const struct fib_entry_notifier_info *fen_info)
2218 struct mlxsw_sp_fib_entry *fib_entry;
2219 struct mlxsw_sp_fib_node *fib_node;
2221 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2222 if (IS_ERR(fib_node))
2225 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2226 if (fib_entry->params.tb_id == fen_info->tb_id &&
2227 fib_entry->params.tos == fen_info->tos &&
2228 fib_entry->params.type == fen_info->type &&
2229 fib_entry->nh_group->key.fi == fen_info->fi) {
2237 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2238 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2239 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2240 .key_len = sizeof(struct mlxsw_sp_fib_key),
2241 .automatic_shrinking = true,
2244 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2245 struct mlxsw_sp_fib_node *fib_node)
2247 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2248 mlxsw_sp_fib_ht_params);
2251 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2252 struct mlxsw_sp_fib_node *fib_node)
2254 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2255 mlxsw_sp_fib_ht_params);
2258 static struct mlxsw_sp_fib_node *
2259 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2260 size_t addr_len, unsigned char prefix_len)
2262 struct mlxsw_sp_fib_key key;
2264 memset(&key, 0, sizeof(key));
2265 memcpy(key.addr, addr, addr_len);
2266 key.prefix_len = prefix_len;
2267 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2270 static struct mlxsw_sp_fib_node *
2271 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
2272 size_t addr_len, unsigned char prefix_len)
2274 struct mlxsw_sp_fib_node *fib_node;
2276 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2280 INIT_LIST_HEAD(&fib_node->entry_list);
2281 list_add(&fib_node->list, &fib->node_list);
2282 memcpy(fib_node->key.addr, addr, addr_len);
2283 fib_node->key.prefix_len = prefix_len;
2288 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2290 list_del(&fib_node->list);
2291 WARN_ON(!list_empty(&fib_node->entry_list));
2296 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2297 const struct mlxsw_sp_fib_entry *fib_entry)
2299 return list_first_entry(&fib_node->entry_list,
2300 struct mlxsw_sp_fib_entry, list) == fib_entry;
2303 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2305 unsigned char prefix_len = fib_node->key.prefix_len;
2306 struct mlxsw_sp_fib *fib = fib_node->fib;
2308 if (fib->prefix_ref_count[prefix_len]++ == 0)
2309 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2312 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2314 unsigned char prefix_len = fib_node->key.prefix_len;
2315 struct mlxsw_sp_fib *fib = fib_node->fib;
2317 if (--fib->prefix_ref_count[prefix_len] == 0)
2318 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2321 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2322 struct mlxsw_sp_fib_node *fib_node,
2323 struct mlxsw_sp_fib *fib)
2325 struct mlxsw_sp_prefix_usage req_prefix_usage;
2326 struct mlxsw_sp_lpm_tree *lpm_tree;
2329 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2332 fib_node->fib = fib;
2334 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2335 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2337 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2338 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2341 goto err_tree_check;
2343 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2345 if (IS_ERR(lpm_tree))
2346 return PTR_ERR(lpm_tree);
2347 fib->lpm_tree = lpm_tree;
2348 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2353 mlxsw_sp_fib_node_prefix_inc(fib_node);
2358 fib->lpm_tree = NULL;
2359 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2361 fib_node->fib = NULL;
2362 mlxsw_sp_fib_node_remove(fib, fib_node);
2366 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2367 struct mlxsw_sp_fib_node *fib_node)
2369 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2370 struct mlxsw_sp_fib *fib = fib_node->fib;
2372 mlxsw_sp_fib_node_prefix_dec(fib_node);
2374 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2375 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2376 fib->lpm_tree = NULL;
2377 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2379 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2382 fib_node->fib = NULL;
2383 mlxsw_sp_fib_node_remove(fib, fib_node);
2386 static struct mlxsw_sp_fib_node *
2387 mlxsw_sp_fib4_node_get(struct mlxsw_sp *mlxsw_sp,
2388 const struct fib_entry_notifier_info *fen_info)
2390 struct mlxsw_sp_fib_node *fib_node;
2391 struct mlxsw_sp_fib *fib;
2392 struct mlxsw_sp_vr *vr;
2395 vr = mlxsw_sp_vr_get(mlxsw_sp, fen_info->tb_id);
2397 return ERR_CAST(vr);
2398 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
2400 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2401 sizeof(fen_info->dst),
2406 fib_node = mlxsw_sp_fib_node_create(fib, &fen_info->dst,
2407 sizeof(fen_info->dst),
2411 goto err_fib_node_create;
2414 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2416 goto err_fib_node_init;
2421 mlxsw_sp_fib_node_destroy(fib_node);
2422 err_fib_node_create:
2423 mlxsw_sp_vr_put(vr);
2424 return ERR_PTR(err);
2427 static void mlxsw_sp_fib4_node_put(struct mlxsw_sp *mlxsw_sp,
2428 struct mlxsw_sp_fib_node *fib_node)
2430 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
2432 if (!list_empty(&fib_node->entry_list))
2434 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
2435 mlxsw_sp_fib_node_destroy(fib_node);
2436 mlxsw_sp_vr_put(vr);
2439 static struct mlxsw_sp_fib_entry *
2440 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2441 const struct mlxsw_sp_fib_entry_params *params)
2443 struct mlxsw_sp_fib_entry *fib_entry;
2445 list_for_each_entry(fib_entry, &fib_node->entry_list, list) {
2446 if (fib_entry->params.tb_id > params->tb_id)
2448 if (fib_entry->params.tb_id != params->tb_id)
2450 if (fib_entry->params.tos > params->tos)
2452 if (fib_entry->params.prio >= params->prio ||
2453 fib_entry->params.tos < params->tos)
2460 static int mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib_entry *fib_entry,
2461 struct mlxsw_sp_fib_entry *new_entry)
2463 struct mlxsw_sp_fib_node *fib_node;
2465 if (WARN_ON(!fib_entry))
2468 fib_node = fib_entry->fib_node;
2469 list_for_each_entry_from(fib_entry, &fib_node->entry_list, list) {
2470 if (fib_entry->params.tb_id != new_entry->params.tb_id ||
2471 fib_entry->params.tos != new_entry->params.tos ||
2472 fib_entry->params.prio != new_entry->params.prio)
2476 list_add_tail(&new_entry->list, &fib_entry->list);
2481 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib_node *fib_node,
2482 struct mlxsw_sp_fib_entry *new_entry,
2483 bool replace, bool append)
2485 struct mlxsw_sp_fib_entry *fib_entry;
2487 fib_entry = mlxsw_sp_fib4_node_entry_find(fib_node, &new_entry->params);
2490 return mlxsw_sp_fib4_node_list_append(fib_entry, new_entry);
2491 if (replace && WARN_ON(!fib_entry))
2494 /* Insert new entry before replaced one, so that we can later
2495 * remove the second.
2498 list_add_tail(&new_entry->list, &fib_entry->list);
2500 struct mlxsw_sp_fib_entry *last;
2502 list_for_each_entry(last, &fib_node->entry_list, list) {
2503 if (new_entry->params.tb_id > last->params.tb_id)
2509 list_add(&new_entry->list, &fib_entry->list);
2511 list_add(&new_entry->list, &fib_node->entry_list);
2518 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib_entry *fib_entry)
2520 list_del(&fib_entry->list);
2524 mlxsw_sp_fib4_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2525 const struct mlxsw_sp_fib_node *fib_node,
2526 struct mlxsw_sp_fib_entry *fib_entry)
2528 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2531 /* To prevent packet loss, overwrite the previously offloaded
2534 if (!list_is_singular(&fib_node->entry_list)) {
2535 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2536 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2538 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2541 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2545 mlxsw_sp_fib4_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2546 const struct mlxsw_sp_fib_node *fib_node,
2547 struct mlxsw_sp_fib_entry *fib_entry)
2549 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2552 /* Promote the next entry by overwriting the deleted entry */
2553 if (!list_is_singular(&fib_node->entry_list)) {
2554 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2555 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2557 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2558 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2562 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2565 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
2566 struct mlxsw_sp_fib_entry *fib_entry,
2567 bool replace, bool append)
2569 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2572 err = mlxsw_sp_fib4_node_list_insert(fib_node, fib_entry, replace,
2577 err = mlxsw_sp_fib4_node_entry_add(mlxsw_sp, fib_node, fib_entry);
2579 goto err_fib4_node_entry_add;
2583 err_fib4_node_entry_add:
2584 mlxsw_sp_fib4_node_list_remove(fib_entry);
2589 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2590 struct mlxsw_sp_fib_entry *fib_entry)
2592 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2594 mlxsw_sp_fib4_node_entry_del(mlxsw_sp, fib_node, fib_entry);
2595 mlxsw_sp_fib4_node_list_remove(fib_entry);
2598 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2599 struct mlxsw_sp_fib_entry *fib_entry,
2602 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2603 struct mlxsw_sp_fib_entry *replaced;
2608 /* We inserted the new entry before replaced one */
2609 replaced = list_next_entry(fib_entry, list);
2611 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2612 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2613 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2617 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
2618 const struct fib_entry_notifier_info *fen_info,
2619 bool replace, bool append)
2621 struct mlxsw_sp_fib_entry *fib_entry;
2622 struct mlxsw_sp_fib_node *fib_node;
2625 if (mlxsw_sp->router->aborted)
2628 fib_node = mlxsw_sp_fib4_node_get(mlxsw_sp, fen_info);
2629 if (IS_ERR(fib_node)) {
2630 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2631 return PTR_ERR(fib_node);
2634 fib_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2635 if (IS_ERR(fib_entry)) {
2636 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2637 err = PTR_ERR(fib_entry);
2638 goto err_fib4_entry_create;
2641 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib_entry, replace,
2644 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2645 goto err_fib4_node_entry_link;
2648 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib_entry, replace);
2652 err_fib4_node_entry_link:
2653 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2654 err_fib4_entry_create:
2655 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2659 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2660 struct fib_entry_notifier_info *fen_info)
2662 struct mlxsw_sp_fib_entry *fib_entry;
2663 struct mlxsw_sp_fib_node *fib_node;
2665 if (mlxsw_sp->router->aborted)
2668 fib_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2669 if (WARN_ON(!fib_entry))
2671 fib_node = fib_entry->fib_node;
2673 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2674 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2675 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2678 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
2680 char ralta_pl[MLXSW_REG_RALTA_LEN];
2681 char ralst_pl[MLXSW_REG_RALST_LEN];
2684 mlxsw_reg_ralta_pack(ralta_pl, true, MLXSW_REG_RALXX_PROTOCOL_IPV4,
2685 MLXSW_SP_LPM_TREE_MIN);
2686 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
2690 mlxsw_reg_ralst_pack(ralst_pl, 0xff, MLXSW_SP_LPM_TREE_MIN);
2691 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
2695 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2696 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
2697 char raltb_pl[MLXSW_REG_RALTB_LEN];
2698 char ralue_pl[MLXSW_REG_RALUE_LEN];
2700 if (!mlxsw_sp_vr_is_used(vr))
2703 mlxsw_reg_raltb_pack(raltb_pl, vr->id,
2704 MLXSW_REG_RALXX_PROTOCOL_IPV4,
2705 MLXSW_SP_LPM_TREE_MIN);
2706 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
2711 mlxsw_reg_ralue_pack4(ralue_pl, MLXSW_SP_L3_PROTO_IPV4,
2712 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0,
2714 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2715 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
2724 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
2725 struct mlxsw_sp_fib_node *fib_node)
2727 struct mlxsw_sp_fib_entry *fib_entry, *tmp;
2729 list_for_each_entry_safe(fib_entry, tmp, &fib_node->entry_list, list) {
2730 bool do_break = &tmp->list == &fib_node->entry_list;
2732 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib_entry);
2733 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib_entry);
2734 mlxsw_sp_fib4_node_put(mlxsw_sp, fib_node);
2735 /* Break when entry list is empty and node was freed.
2736 * Otherwise, we'll access freed memory in the next
2744 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
2745 struct mlxsw_sp_fib_node *fib_node)
2747 switch (fib_node->fib->proto) {
2748 case MLXSW_SP_L3_PROTO_IPV4:
2749 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
2751 case MLXSW_SP_L3_PROTO_IPV6:
2757 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
2758 struct mlxsw_sp_vr *vr,
2759 enum mlxsw_sp_l3proto proto)
2761 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
2762 struct mlxsw_sp_fib_node *fib_node, *tmp;
2764 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
2765 bool do_break = &tmp->list == &fib->node_list;
2767 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
2773 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
2777 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
2778 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
2780 if (!mlxsw_sp_vr_is_used(vr))
2782 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
2786 static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp)
2790 if (mlxsw_sp->router->aborted)
2792 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
2793 mlxsw_sp_router_fib_flush(mlxsw_sp);
2794 mlxsw_sp->router->aborted = true;
2795 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
2797 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
2800 struct mlxsw_sp_fib_event_work {
2801 struct work_struct work;
2803 struct fib_entry_notifier_info fen_info;
2804 struct fib_rule_notifier_info fr_info;
2805 struct fib_nh_notifier_info fnh_info;
2807 struct mlxsw_sp *mlxsw_sp;
2808 unsigned long event;
2811 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
2813 struct mlxsw_sp_fib_event_work *fib_work =
2814 container_of(work, struct mlxsw_sp_fib_event_work, work);
2815 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
2816 struct fib_rule *rule;
2817 bool replace, append;
2820 /* Protect internal structures from changes */
2822 switch (fib_work->event) {
2823 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
2824 case FIB_EVENT_ENTRY_APPEND: /* fall through */
2825 case FIB_EVENT_ENTRY_ADD:
2826 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
2827 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
2828 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
2831 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2832 fib_info_put(fib_work->fen_info.fi);
2834 case FIB_EVENT_ENTRY_DEL:
2835 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
2836 fib_info_put(fib_work->fen_info.fi);
2838 case FIB_EVENT_RULE_ADD: /* fall through */
2839 case FIB_EVENT_RULE_DEL:
2840 rule = fib_work->fr_info.rule;
2841 if (!fib4_rule_default(rule) && !rule->l3mdev)
2842 mlxsw_sp_router_fib4_abort(mlxsw_sp);
2845 case FIB_EVENT_NH_ADD: /* fall through */
2846 case FIB_EVENT_NH_DEL:
2847 mlxsw_sp_nexthop_event(mlxsw_sp, fib_work->event,
2848 fib_work->fnh_info.fib_nh);
2849 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
2856 /* Called with rcu_read_lock() */
2857 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
2858 unsigned long event, void *ptr)
2860 struct mlxsw_sp_fib_event_work *fib_work;
2861 struct fib_notifier_info *info = ptr;
2862 struct mlxsw_sp_router *router;
2864 if (!net_eq(info->net, &init_net))
2867 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
2868 if (WARN_ON(!fib_work))
2871 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib_event_work);
2872 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
2873 fib_work->mlxsw_sp = router->mlxsw_sp;
2874 fib_work->event = event;
2877 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
2878 case FIB_EVENT_ENTRY_APPEND: /* fall through */
2879 case FIB_EVENT_ENTRY_ADD: /* fall through */
2880 case FIB_EVENT_ENTRY_DEL:
2881 memcpy(&fib_work->fen_info, ptr, sizeof(fib_work->fen_info));
2882 /* Take referece on fib_info to prevent it from being
2883 * freed while work is queued. Release it afterwards.
2885 fib_info_hold(fib_work->fen_info.fi);
2887 case FIB_EVENT_RULE_ADD: /* fall through */
2888 case FIB_EVENT_RULE_DEL:
2889 memcpy(&fib_work->fr_info, ptr, sizeof(fib_work->fr_info));
2890 fib_rule_get(fib_work->fr_info.rule);
2892 case FIB_EVENT_NH_ADD: /* fall through */
2893 case FIB_EVENT_NH_DEL:
2894 memcpy(&fib_work->fnh_info, ptr, sizeof(fib_work->fnh_info));
2895 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
2899 mlxsw_core_schedule_work(&fib_work->work);
2904 static struct mlxsw_sp_rif *
2905 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
2906 const struct net_device *dev)
2910 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
2911 if (mlxsw_sp->router->rifs[i] &&
2912 mlxsw_sp->router->rifs[i]->dev == dev)
2913 return mlxsw_sp->router->rifs[i];
2918 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
2920 char ritr_pl[MLXSW_REG_RITR_LEN];
2923 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
2924 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2925 if (WARN_ON_ONCE(err))
2928 mlxsw_reg_ritr_enable_set(ritr_pl, false);
2929 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
2932 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2933 struct mlxsw_sp_rif *rif)
2935 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
2936 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
2937 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
2940 static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif,
2941 const struct in_device *in_dev,
2942 unsigned long event)
2950 if (rif && !in_dev->ifa_list &&
2951 !netif_is_l3_slave(rif->dev))
2953 /* It is possible we already removed the RIF ourselves
2954 * if it was assigned to a netdev that is now a bridge
2963 static enum mlxsw_sp_rif_type
2964 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
2965 const struct net_device *dev)
2967 enum mlxsw_sp_fid_type type;
2969 /* RIF type is derived from the type of the underlying FID */
2970 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
2971 type = MLXSW_SP_FID_TYPE_8021Q;
2972 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
2973 type = MLXSW_SP_FID_TYPE_8021Q;
2974 else if (netif_is_bridge_master(dev))
2975 type = MLXSW_SP_FID_TYPE_8021D;
2977 type = MLXSW_SP_FID_TYPE_RFID;
2979 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
2982 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
2986 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
2987 if (!mlxsw_sp->router->rifs[i]) {
2996 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
2998 struct net_device *l3_dev)
3000 struct mlxsw_sp_rif *rif;
3002 rif = kzalloc(rif_size, GFP_KERNEL);
3006 INIT_LIST_HEAD(&rif->nexthop_list);
3007 INIT_LIST_HEAD(&rif->neigh_list);
3008 ether_addr_copy(rif->addr, l3_dev->dev_addr);
3009 rif->mtu = l3_dev->mtu;
3012 rif->rif_index = rif_index;
3017 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
3020 return mlxsw_sp->router->rifs[rif_index];
3023 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
3025 return rif->rif_index;
3028 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
3030 return rif->dev->ifindex;
3033 static struct mlxsw_sp_rif *
3034 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
3035 const struct mlxsw_sp_rif_params *params)
3037 u32 tb_id = l3mdev_fib_table(params->dev);
3038 const struct mlxsw_sp_rif_ops *ops;
3039 enum mlxsw_sp_rif_type type;
3040 struct mlxsw_sp_rif *rif;
3041 struct mlxsw_sp_fid *fid;
3042 struct mlxsw_sp_vr *vr;
3046 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
3047 ops = mlxsw_sp->router->rif_ops_arr[type];
3049 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
3051 return ERR_CAST(vr);
3053 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
3055 goto err_rif_index_alloc;
3057 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
3062 rif->mlxsw_sp = mlxsw_sp;
3065 fid = ops->fid_get(rif);
3073 ops->setup(rif, params);
3075 err = ops->configure(rif);
3079 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
3080 mlxsw_sp_fid_index(fid), true);
3082 goto err_rif_fdb_op;
3084 mlxsw_sp_rif_counters_alloc(rif);
3085 mlxsw_sp_fid_rif_set(fid, rif);
3086 mlxsw_sp->router->rifs[rif_index] = rif;
3092 ops->deconfigure(rif);
3094 mlxsw_sp_fid_put(fid);
3098 err_rif_index_alloc:
3099 mlxsw_sp_vr_put(vr);
3100 return ERR_PTR(err);
3103 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
3105 const struct mlxsw_sp_rif_ops *ops = rif->ops;
3106 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3107 struct mlxsw_sp_fid *fid = rif->fid;
3108 struct mlxsw_sp_vr *vr;
3110 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
3111 vr = &mlxsw_sp->router->vrs[rif->vr_id];
3114 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
3115 mlxsw_sp_fid_rif_set(fid, NULL);
3116 mlxsw_sp_rif_counters_free(rif);
3117 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
3118 mlxsw_sp_fid_index(fid), false);
3119 ops->deconfigure(rif);
3120 mlxsw_sp_fid_put(fid);
3122 mlxsw_sp_vr_put(vr);
3126 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
3127 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3129 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3131 params->vid = mlxsw_sp_port_vlan->vid;
3132 params->lag = mlxsw_sp_port->lagged;
3134 params->lag_id = mlxsw_sp_port->lag_id;
3136 params->system_port = mlxsw_sp_port->local_port;
3140 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
3141 struct net_device *l3_dev)
3143 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3144 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3145 u16 vid = mlxsw_sp_port_vlan->vid;
3146 struct mlxsw_sp_rif *rif;
3147 struct mlxsw_sp_fid *fid;
3150 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3152 struct mlxsw_sp_rif_params params = {
3156 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
3157 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
3159 return PTR_ERR(rif);
3162 /* FID was already created, just take a reference */
3163 fid = rif->ops->fid_get(rif);
3164 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
3166 goto err_fid_port_vid_map;
3168 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
3170 goto err_port_vid_learning_set;
3172 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
3173 BR_STATE_FORWARDING);
3175 goto err_port_vid_stp_set;
3177 mlxsw_sp_port_vlan->fid = fid;
3181 err_port_vid_stp_set:
3182 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3183 err_port_vid_learning_set:
3184 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3185 err_fid_port_vid_map:
3186 mlxsw_sp_fid_put(fid);
3191 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
3193 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
3194 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
3195 u16 vid = mlxsw_sp_port_vlan->vid;
3197 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
3200 mlxsw_sp_port_vlan->fid = NULL;
3201 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
3202 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
3203 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
3204 /* If router port holds the last reference on the rFID, then the
3205 * associated Sub-port RIF will be destroyed.
3207 mlxsw_sp_fid_put(fid);
3210 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
3211 struct net_device *port_dev,
3212 unsigned long event, u16 vid)
3214 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
3215 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3217 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
3218 if (WARN_ON(!mlxsw_sp_port_vlan))
3223 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
3226 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
3233 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
3234 unsigned long event)
3236 if (netif_is_bridge_port(port_dev) ||
3237 netif_is_lag_port(port_dev) ||
3238 netif_is_ovs_port(port_dev))
3241 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
3244 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
3245 struct net_device *lag_dev,
3246 unsigned long event, u16 vid)
3248 struct net_device *port_dev;
3249 struct list_head *iter;
3252 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
3253 if (mlxsw_sp_port_dev_check(port_dev)) {
3254 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
3265 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
3266 unsigned long event)
3268 if (netif_is_bridge_port(lag_dev))
3271 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
3274 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
3275 unsigned long event)
3277 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3278 struct mlxsw_sp_rif_params params = {
3281 struct mlxsw_sp_rif *rif;
3285 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
3287 return PTR_ERR(rif);
3290 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3291 mlxsw_sp_rif_destroy(rif);
3298 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
3299 unsigned long event)
3301 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
3302 u16 vid = vlan_dev_vlan_id(vlan_dev);
3304 if (netif_is_bridge_port(vlan_dev))
3307 if (mlxsw_sp_port_dev_check(real_dev))
3308 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
3310 else if (netif_is_lag_master(real_dev))
3311 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
3313 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
3314 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
3319 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
3320 unsigned long event)
3322 if (mlxsw_sp_port_dev_check(dev))
3323 return mlxsw_sp_inetaddr_port_event(dev, event);
3324 else if (netif_is_lag_master(dev))
3325 return mlxsw_sp_inetaddr_lag_event(dev, event);
3326 else if (netif_is_bridge_master(dev))
3327 return mlxsw_sp_inetaddr_bridge_event(dev, event);
3328 else if (is_vlan_dev(dev))
3329 return mlxsw_sp_inetaddr_vlan_event(dev, event);
3334 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
3335 unsigned long event, void *ptr)
3337 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3338 struct net_device *dev = ifa->ifa_dev->dev;
3339 struct mlxsw_sp *mlxsw_sp;
3340 struct mlxsw_sp_rif *rif;
3343 mlxsw_sp = mlxsw_sp_lower_get(dev);
3347 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3348 if (!mlxsw_sp_rif_should_config(rif, ifa->ifa_dev, event))
3351 err = __mlxsw_sp_inetaddr_event(dev, event);
3353 return notifier_from_errno(err);
3356 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
3357 const char *mac, int mtu)
3359 char ritr_pl[MLXSW_REG_RITR_LEN];
3362 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
3363 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3367 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
3368 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
3369 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
3370 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3373 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
3375 struct mlxsw_sp *mlxsw_sp;
3376 struct mlxsw_sp_rif *rif;
3380 mlxsw_sp = mlxsw_sp_lower_get(dev);
3384 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3387 fid_index = mlxsw_sp_fid_index(rif->fid);
3389 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
3393 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
3398 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
3400 goto err_rif_fdb_op;
3402 ether_addr_copy(rif->addr, dev->dev_addr);
3403 rif->mtu = dev->mtu;
3405 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
3410 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
3412 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
3416 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
3417 struct net_device *l3_dev)
3419 struct mlxsw_sp_rif *rif;
3421 /* If netdev is already associated with a RIF, then we need to
3422 * destroy it and create a new one with the new virtual router ID.
3424 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3426 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
3428 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
3431 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
3432 struct net_device *l3_dev)
3434 struct mlxsw_sp_rif *rif;
3436 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
3439 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
3442 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
3443 struct netdev_notifier_changeupper_info *info)
3445 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
3452 case NETDEV_PRECHANGEUPPER:
3454 case NETDEV_CHANGEUPPER:
3456 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
3458 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
3465 static struct mlxsw_sp_rif_subport *
3466 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
3468 return container_of(rif, struct mlxsw_sp_rif_subport, common);
3471 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
3472 const struct mlxsw_sp_rif_params *params)
3474 struct mlxsw_sp_rif_subport *rif_subport;
3476 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3477 rif_subport->vid = params->vid;
3478 rif_subport->lag = params->lag;
3480 rif_subport->lag_id = params->lag_id;
3482 rif_subport->system_port = params->system_port;
3485 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
3487 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3488 struct mlxsw_sp_rif_subport *rif_subport;
3489 char ritr_pl[MLXSW_REG_RITR_LEN];
3491 rif_subport = mlxsw_sp_rif_subport_rif(rif);
3492 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
3493 rif->rif_index, rif->vr_id, rif->dev->mtu,
3494 rif->dev->dev_addr);
3495 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
3496 rif_subport->lag ? rif_subport->lag_id :
3497 rif_subport->system_port,
3500 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3503 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
3505 return mlxsw_sp_rif_subport_op(rif, true);
3508 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
3510 mlxsw_sp_rif_subport_op(rif, false);
3513 static struct mlxsw_sp_fid *
3514 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
3516 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
3519 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
3520 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
3521 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
3522 .setup = mlxsw_sp_rif_subport_setup,
3523 .configure = mlxsw_sp_rif_subport_configure,
3524 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
3525 .fid_get = mlxsw_sp_rif_subport_fid_get,
3528 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
3529 enum mlxsw_reg_ritr_if_type type,
3530 u16 vid_fid, bool enable)
3532 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3533 char ritr_pl[MLXSW_REG_RITR_LEN];
3535 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
3536 rif->dev->mtu, rif->dev->dev_addr);
3537 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
3539 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3542 static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
3544 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
3547 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
3549 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3550 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3553 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
3557 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3558 mlxsw_sp_router_port(mlxsw_sp), true);
3560 goto err_fid_bc_flood_set;
3564 err_fid_bc_flood_set:
3565 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3569 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
3571 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3572 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
3574 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3575 mlxsw_sp_router_port(mlxsw_sp), false);
3576 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
3579 static struct mlxsw_sp_fid *
3580 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
3582 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
3584 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
3587 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
3588 .type = MLXSW_SP_RIF_TYPE_VLAN,
3589 .rif_size = sizeof(struct mlxsw_sp_rif),
3590 .configure = mlxsw_sp_rif_vlan_configure,
3591 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
3592 .fid_get = mlxsw_sp_rif_vlan_fid_get,
3595 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
3597 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3598 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3601 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
3606 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3607 mlxsw_sp_router_port(mlxsw_sp), true);
3609 goto err_fid_bc_flood_set;
3613 err_fid_bc_flood_set:
3614 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3618 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
3620 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
3621 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
3623 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
3624 mlxsw_sp_router_port(mlxsw_sp), false);
3625 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
3628 static struct mlxsw_sp_fid *
3629 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
3631 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
3634 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
3635 .type = MLXSW_SP_RIF_TYPE_FID,
3636 .rif_size = sizeof(struct mlxsw_sp_rif),
3637 .configure = mlxsw_sp_rif_fid_configure,
3638 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
3639 .fid_get = mlxsw_sp_rif_fid_fid_get,
3642 static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
3643 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
3644 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
3645 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
3648 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
3650 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3652 mlxsw_sp->router->rifs = kcalloc(max_rifs,
3653 sizeof(struct mlxsw_sp_rif *),
3655 if (!mlxsw_sp->router->rifs)
3658 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
3663 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
3667 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3668 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
3670 kfree(mlxsw_sp->router->rifs);
3673 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
3675 struct mlxsw_sp_router *router;
3677 /* Flush pending FIB notifications and then flush the device's
3678 * table before requesting another dump. The FIB notification
3679 * block is unregistered, so no need to take RTNL.
3681 mlxsw_core_flush_owq();
3682 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3683 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
3686 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3688 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3692 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
3694 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
3696 mlxsw_reg_rgcr_pack(rgcr_pl, true);
3697 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
3698 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3704 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3706 char rgcr_pl[MLXSW_REG_RGCR_LEN];
3708 mlxsw_reg_rgcr_pack(rgcr_pl, false);
3709 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
3712 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
3714 struct mlxsw_sp_router *router;
3717 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
3720 mlxsw_sp->router = router;
3721 router->mlxsw_sp = mlxsw_sp;
3723 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
3724 err = __mlxsw_sp_router_init(mlxsw_sp);
3726 goto err_router_init;
3728 err = mlxsw_sp_rifs_init(mlxsw_sp);
3732 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
3733 &mlxsw_sp_nexthop_ht_params);
3735 goto err_nexthop_ht_init;
3737 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
3738 &mlxsw_sp_nexthop_group_ht_params);
3740 goto err_nexthop_group_ht_init;
3742 err = mlxsw_sp_lpm_init(mlxsw_sp);
3746 err = mlxsw_sp_vrs_init(mlxsw_sp);
3750 err = mlxsw_sp_neigh_init(mlxsw_sp);
3752 goto err_neigh_init;
3754 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
3755 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
3756 mlxsw_sp_router_fib_dump_flush);
3758 goto err_register_fib_notifier;
3762 err_register_fib_notifier:
3763 mlxsw_sp_neigh_fini(mlxsw_sp);
3765 mlxsw_sp_vrs_fini(mlxsw_sp);
3767 mlxsw_sp_lpm_fini(mlxsw_sp);
3769 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3770 err_nexthop_group_ht_init:
3771 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
3772 err_nexthop_ht_init:
3773 mlxsw_sp_rifs_fini(mlxsw_sp);
3775 __mlxsw_sp_router_fini(mlxsw_sp);
3777 kfree(mlxsw_sp->router);
3781 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
3783 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
3784 mlxsw_sp_neigh_fini(mlxsw_sp);
3785 mlxsw_sp_vrs_fini(mlxsw_sp);
3786 mlxsw_sp_lpm_fini(mlxsw_sp);
3787 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
3788 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
3789 mlxsw_sp_rifs_fini(mlxsw_sp);
3790 __mlxsw_sp_router_fini(mlxsw_sp);
3791 kfree(mlxsw_sp->router);