2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <linux/socket.h>
47 #include <linux/route.h>
48 #include <net/netevent.h>
49 #include <net/neighbour.h>
51 #include <net/ip_fib.h>
52 #include <net/ip6_fib.h>
53 #include <net/fib_rules.h>
54 #include <net/l3mdev.h>
55 #include <net/addrconf.h>
56 #include <net/ndisc.h>
58 #include <net/fib_notifier.h>
63 #include "spectrum_cnt.h"
64 #include "spectrum_dpipe.h"
65 #include "spectrum_router.h"
68 struct mlxsw_sp_lpm_tree;
69 struct mlxsw_sp_rif_ops;
71 struct mlxsw_sp_router {
72 struct mlxsw_sp *mlxsw_sp;
73 struct mlxsw_sp_rif **rifs;
74 struct mlxsw_sp_vr *vrs;
75 struct rhashtable neigh_ht;
76 struct rhashtable nexthop_group_ht;
77 struct rhashtable nexthop_ht;
79 struct mlxsw_sp_lpm_tree *trees;
80 unsigned int tree_count;
83 struct delayed_work dw;
84 unsigned long interval; /* ms */
86 struct delayed_work nexthop_probe_dw;
87 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
88 struct list_head nexthop_neighs_list;
90 struct notifier_block fib_nb;
91 const struct mlxsw_sp_rif_ops **rif_ops_arr;
95 struct list_head nexthop_list;
96 struct list_head neigh_list;
97 struct net_device *dev;
98 struct mlxsw_sp_fid *fid;
99 unsigned char addr[ETH_ALEN];
103 const struct mlxsw_sp_rif_ops *ops;
104 struct mlxsw_sp *mlxsw_sp;
106 unsigned int counter_ingress;
107 bool counter_ingress_valid;
108 unsigned int counter_egress;
109 bool counter_egress_valid;
112 struct mlxsw_sp_rif_params {
113 struct net_device *dev;
122 struct mlxsw_sp_rif_subport {
123 struct mlxsw_sp_rif common;
132 struct mlxsw_sp_rif_ops {
133 enum mlxsw_sp_rif_type type;
136 void (*setup)(struct mlxsw_sp_rif *rif,
137 const struct mlxsw_sp_rif_params *params);
138 int (*configure)(struct mlxsw_sp_rif *rif);
139 void (*deconfigure)(struct mlxsw_sp_rif *rif);
140 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
143 static unsigned int *
144 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
145 enum mlxsw_sp_rif_counter_dir dir)
148 case MLXSW_SP_RIF_COUNTER_EGRESS:
149 return &rif->counter_egress;
150 case MLXSW_SP_RIF_COUNTER_INGRESS:
151 return &rif->counter_ingress;
157 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
158 enum mlxsw_sp_rif_counter_dir dir)
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 return rif->counter_egress_valid;
163 case MLXSW_SP_RIF_COUNTER_INGRESS:
164 return rif->counter_ingress_valid;
170 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
171 enum mlxsw_sp_rif_counter_dir dir,
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 rif->counter_egress_valid = valid;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 rif->counter_ingress_valid = valid;
184 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
185 unsigned int counter_index, bool enable,
186 enum mlxsw_sp_rif_counter_dir dir)
188 char ritr_pl[MLXSW_REG_RITR_LEN];
189 bool is_egress = false;
192 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
194 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
199 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
204 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
205 struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
208 char ricnt_pl[MLXSW_REG_RICNT_LEN];
209 unsigned int *p_counter_index;
213 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
217 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
218 if (!p_counter_index)
220 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
221 MLXSW_REG_RICNT_OPCODE_NOP);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
229 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
230 unsigned int counter_index)
232 char ricnt_pl[MLXSW_REG_RICNT_LEN];
234 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
235 MLXSW_REG_RICNT_OPCODE_CLEAR);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
239 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_rif *rif,
241 enum mlxsw_sp_rif_counter_dir dir)
243 unsigned int *p_counter_index;
246 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 if (!p_counter_index)
249 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
256 goto err_counter_clear;
258 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
259 *p_counter_index, true, dir);
261 goto err_counter_edit;
262 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
267 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
273 struct mlxsw_sp_rif *rif,
274 enum mlxsw_sp_rif_counter_dir dir)
276 unsigned int *p_counter_index;
278 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
281 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
282 if (WARN_ON(!p_counter_index))
284 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
285 *p_counter_index, false, dir);
286 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
288 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
291 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
293 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
294 struct devlink *devlink;
296 devlink = priv_to_devlink(mlxsw_sp->core);
297 if (!devlink_dpipe_table_counter_enabled(devlink,
298 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
300 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
303 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
305 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
307 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
310 static struct mlxsw_sp_rif *
311 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
312 const struct net_device *dev);
314 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
316 struct mlxsw_sp_prefix_usage {
317 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
320 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
321 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
324 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
325 struct mlxsw_sp_prefix_usage *prefix_usage2)
327 unsigned char prefix;
329 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
330 if (!test_bit(prefix, prefix_usage2->b))
337 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
338 struct mlxsw_sp_prefix_usage *prefix_usage2)
340 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
344 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
346 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
348 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
352 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
353 struct mlxsw_sp_prefix_usage *prefix_usage2)
355 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
359 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
360 unsigned char prefix_len)
362 set_bit(prefix_len, prefix_usage->b);
366 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
367 unsigned char prefix_len)
369 clear_bit(prefix_len, prefix_usage->b);
372 struct mlxsw_sp_fib_key {
373 unsigned char addr[sizeof(struct in6_addr)];
374 unsigned char prefix_len;
377 enum mlxsw_sp_fib_entry_type {
378 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
379 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
380 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
383 struct mlxsw_sp_nexthop_group;
386 struct mlxsw_sp_fib_node {
387 struct list_head entry_list;
388 struct list_head list;
389 struct rhash_head ht_node;
390 struct mlxsw_sp_fib *fib;
391 struct mlxsw_sp_fib_key key;
394 struct mlxsw_sp_fib_entry {
395 struct list_head list;
396 struct mlxsw_sp_fib_node *fib_node;
397 enum mlxsw_sp_fib_entry_type type;
398 struct list_head nexthop_group_node;
399 struct mlxsw_sp_nexthop_group *nh_group;
402 struct mlxsw_sp_fib4_entry {
403 struct mlxsw_sp_fib_entry common;
410 struct mlxsw_sp_fib6_entry {
411 struct mlxsw_sp_fib_entry common;
412 struct list_head rt6_list;
416 struct mlxsw_sp_rt6 {
417 struct list_head list;
421 enum mlxsw_sp_l3proto {
422 MLXSW_SP_L3_PROTO_IPV4,
423 MLXSW_SP_L3_PROTO_IPV6,
426 struct mlxsw_sp_lpm_tree {
428 unsigned int ref_count;
429 enum mlxsw_sp_l3proto proto;
430 struct mlxsw_sp_prefix_usage prefix_usage;
433 struct mlxsw_sp_fib {
434 struct rhashtable ht;
435 struct list_head node_list;
436 struct mlxsw_sp_vr *vr;
437 struct mlxsw_sp_lpm_tree *lpm_tree;
438 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
439 struct mlxsw_sp_prefix_usage prefix_usage;
440 enum mlxsw_sp_l3proto proto;
444 u16 id; /* virtual router ID */
445 u32 tb_id; /* kernel fib table id */
446 unsigned int rif_count;
447 struct mlxsw_sp_fib *fib4;
448 struct mlxsw_sp_fib *fib6;
451 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
453 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
454 enum mlxsw_sp_l3proto proto)
456 struct mlxsw_sp_fib *fib;
459 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
461 return ERR_PTR(-ENOMEM);
462 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
464 goto err_rhashtable_init;
465 INIT_LIST_HEAD(&fib->node_list);
475 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
477 WARN_ON(!list_empty(&fib->node_list));
478 WARN_ON(fib->lpm_tree);
479 rhashtable_destroy(&fib->ht);
483 static struct mlxsw_sp_lpm_tree *
484 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
486 static struct mlxsw_sp_lpm_tree *lpm_tree;
489 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
490 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
491 if (lpm_tree->ref_count == 0)
497 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
498 struct mlxsw_sp_lpm_tree *lpm_tree)
500 char ralta_pl[MLXSW_REG_RALTA_LEN];
502 mlxsw_reg_ralta_pack(ralta_pl, true,
503 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
508 static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
509 struct mlxsw_sp_lpm_tree *lpm_tree)
511 char ralta_pl[MLXSW_REG_RALTA_LEN];
513 mlxsw_reg_ralta_pack(ralta_pl, false,
514 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
516 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
520 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
521 struct mlxsw_sp_prefix_usage *prefix_usage,
522 struct mlxsw_sp_lpm_tree *lpm_tree)
524 char ralst_pl[MLXSW_REG_RALST_LEN];
527 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
529 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
532 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
533 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
536 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
537 MLXSW_REG_RALST_BIN_NO_CHILD);
538 last_prefix = prefix;
540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
543 static struct mlxsw_sp_lpm_tree *
544 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
545 struct mlxsw_sp_prefix_usage *prefix_usage,
546 enum mlxsw_sp_l3proto proto)
548 struct mlxsw_sp_lpm_tree *lpm_tree;
551 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
553 return ERR_PTR(-EBUSY);
554 lpm_tree->proto = proto;
555 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
559 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
562 goto err_left_struct_set;
563 memcpy(&lpm_tree->prefix_usage, prefix_usage,
564 sizeof(lpm_tree->prefix_usage));
568 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
572 static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
573 struct mlxsw_sp_lpm_tree *lpm_tree)
575 return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
578 static struct mlxsw_sp_lpm_tree *
579 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
580 struct mlxsw_sp_prefix_usage *prefix_usage,
581 enum mlxsw_sp_l3proto proto)
583 struct mlxsw_sp_lpm_tree *lpm_tree;
586 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
587 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
588 if (lpm_tree->ref_count != 0 &&
589 lpm_tree->proto == proto &&
590 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
594 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
596 if (IS_ERR(lpm_tree))
600 lpm_tree->ref_count++;
604 static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
605 struct mlxsw_sp_lpm_tree *lpm_tree)
607 if (--lpm_tree->ref_count == 0)
608 return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
612 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
614 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
616 struct mlxsw_sp_lpm_tree *lpm_tree;
620 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
623 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
624 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
625 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
626 sizeof(struct mlxsw_sp_lpm_tree),
628 if (!mlxsw_sp->router->lpm.trees)
631 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
632 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
633 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
639 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
641 kfree(mlxsw_sp->router->lpm.trees);
644 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
646 return !!vr->fib4 || !!vr->fib6;
649 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
651 struct mlxsw_sp_vr *vr;
654 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
655 vr = &mlxsw_sp->router->vrs[i];
656 if (!mlxsw_sp_vr_is_used(vr))
662 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
663 const struct mlxsw_sp_fib *fib)
665 char raltb_pl[MLXSW_REG_RALTB_LEN];
667 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
668 (enum mlxsw_reg_ralxx_protocol) fib->proto,
670 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
673 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
674 const struct mlxsw_sp_fib *fib)
676 char raltb_pl[MLXSW_REG_RALTB_LEN];
678 /* Bind to tree 0 which is default */
679 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
680 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
681 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
684 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
686 /* For our purpose, squash main and local table into one */
687 if (tb_id == RT_TABLE_LOCAL)
688 tb_id = RT_TABLE_MAIN;
692 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
695 struct mlxsw_sp_vr *vr;
698 tb_id = mlxsw_sp_fix_tb_id(tb_id);
700 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
701 vr = &mlxsw_sp->router->vrs[i];
702 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
708 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
709 enum mlxsw_sp_l3proto proto)
712 case MLXSW_SP_L3_PROTO_IPV4:
714 case MLXSW_SP_L3_PROTO_IPV6:
720 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
723 struct mlxsw_sp_vr *vr;
726 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
728 return ERR_PTR(-EBUSY);
729 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
730 if (IS_ERR(vr->fib4))
731 return ERR_CAST(vr->fib4);
732 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
733 if (IS_ERR(vr->fib6)) {
734 err = PTR_ERR(vr->fib6);
735 goto err_fib6_create;
741 mlxsw_sp_fib_destroy(vr->fib4);
746 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
748 mlxsw_sp_fib_destroy(vr->fib6);
750 mlxsw_sp_fib_destroy(vr->fib4);
755 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
756 struct mlxsw_sp_prefix_usage *req_prefix_usage)
758 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
759 struct mlxsw_sp_lpm_tree *new_tree;
762 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
765 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
767 if (IS_ERR(new_tree)) {
768 /* We failed to get a tree according to the required
769 * prefix usage. However, the current tree might be still good
770 * for us if our requirement is subset of the prefixes used
773 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
774 &lpm_tree->prefix_usage))
776 return PTR_ERR(new_tree);
779 /* Prevent packet loss by overwriting existing binding */
780 fib->lpm_tree = new_tree;
781 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
784 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
789 fib->lpm_tree = lpm_tree;
790 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
794 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
796 struct mlxsw_sp_vr *vr;
798 tb_id = mlxsw_sp_fix_tb_id(tb_id);
799 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
801 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
805 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
807 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
808 list_empty(&vr->fib6->node_list))
809 mlxsw_sp_vr_destroy(vr);
812 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
814 struct mlxsw_sp_vr *vr;
818 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
821 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
822 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
824 if (!mlxsw_sp->router->vrs)
827 for (i = 0; i < max_vrs; i++) {
828 vr = &mlxsw_sp->router->vrs[i];
835 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
837 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
839 /* At this stage we're guaranteed not to have new incoming
840 * FIB notifications and the work queue is free from FIBs
841 * sitting on top of mlxsw netdevs. However, we can still
842 * have other FIBs queued. Flush the queue before flushing
843 * the device's tables. No need for locks, as we're the only
846 mlxsw_core_flush_owq();
847 mlxsw_sp_router_fib_flush(mlxsw_sp);
848 kfree(mlxsw_sp->router->vrs);
851 struct mlxsw_sp_neigh_key {
855 struct mlxsw_sp_neigh_entry {
856 struct list_head rif_list_node;
857 struct rhash_head ht_node;
858 struct mlxsw_sp_neigh_key key;
861 unsigned char ha[ETH_ALEN];
862 struct list_head nexthop_list; /* list of nexthops using
865 struct list_head nexthop_neighs_list_node;
868 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
869 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
870 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
871 .key_len = sizeof(struct mlxsw_sp_neigh_key),
874 static struct mlxsw_sp_neigh_entry *
875 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
878 struct mlxsw_sp_neigh_entry *neigh_entry;
880 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
884 neigh_entry->key.n = n;
885 neigh_entry->rif = rif;
886 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
891 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
897 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
898 struct mlxsw_sp_neigh_entry *neigh_entry)
900 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
901 &neigh_entry->ht_node,
902 mlxsw_sp_neigh_ht_params);
906 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
907 struct mlxsw_sp_neigh_entry *neigh_entry)
909 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
910 &neigh_entry->ht_node,
911 mlxsw_sp_neigh_ht_params);
914 static struct mlxsw_sp_neigh_entry *
915 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
917 struct mlxsw_sp_neigh_entry *neigh_entry;
918 struct mlxsw_sp_rif *rif;
921 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
923 return ERR_PTR(-EINVAL);
925 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
927 return ERR_PTR(-ENOMEM);
929 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
931 goto err_neigh_entry_insert;
933 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
937 err_neigh_entry_insert:
938 mlxsw_sp_neigh_entry_free(neigh_entry);
943 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
944 struct mlxsw_sp_neigh_entry *neigh_entry)
946 list_del(&neigh_entry->rif_list_node);
947 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
948 mlxsw_sp_neigh_entry_free(neigh_entry);
951 static struct mlxsw_sp_neigh_entry *
952 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
954 struct mlxsw_sp_neigh_key key;
957 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
958 &key, mlxsw_sp_neigh_ht_params);
962 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
964 unsigned long interval;
966 #if IS_ENABLED(CONFIG_IPV6)
967 interval = min_t(unsigned long,
968 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
969 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
971 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
973 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
976 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
980 struct net_device *dev;
986 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
988 if (!mlxsw_sp->router->rifs[rif]) {
989 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
994 dev = mlxsw_sp->router->rifs[rif]->dev;
995 n = neigh_lookup(&arp_tbl, &dipn, dev);
997 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1002 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1003 neigh_event_send(n, NULL);
1007 #if IS_ENABLED(IPV6)
1008 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1012 struct net_device *dev;
1013 struct neighbour *n;
1014 struct in6_addr dip;
1017 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1020 if (!mlxsw_sp->router->rifs[rif]) {
1021 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1025 dev = mlxsw_sp->router->rifs[rif]->dev;
1026 n = neigh_lookup(&nd_tbl, &dip, dev);
1028 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1033 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1034 neigh_event_send(n, NULL);
1038 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1045 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1052 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1054 /* Hardware starts counting at 0, so add 1. */
1057 /* Each record consists of several neighbour entries. */
1058 for (i = 0; i < num_entries; i++) {
1061 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1062 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1068 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1072 /* One record contains one entry. */
1073 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1077 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1078 char *rauhtd_pl, int rec_index)
1080 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1081 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1082 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1085 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1086 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1092 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1094 u8 num_rec, last_rec_index, num_entries;
1096 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1097 last_rec_index = num_rec - 1;
1099 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1101 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1102 MLXSW_REG_RAUHTD_TYPE_IPV6)
1105 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1107 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1113 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1115 enum mlxsw_reg_rauhtd_type type)
1120 /* Make sure the neighbour's netdev isn't removed in the
1125 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
1126 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1129 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1132 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1133 for (i = 0; i < num_rec; i++)
1134 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1136 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
1142 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1144 enum mlxsw_reg_rauhtd_type type;
1148 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1152 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1153 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1157 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1158 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1164 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1166 struct mlxsw_sp_neigh_entry *neigh_entry;
1168 /* Take RTNL mutex here to prevent lists from changes */
1170 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
1171 nexthop_neighs_list_node)
1172 /* If this neigh have nexthops, make the kernel think this neigh
1173 * is active regardless of the traffic.
1175 neigh_event_send(neigh_entry->key.n, NULL);
1180 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1182 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
1184 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
1185 msecs_to_jiffies(interval));
1188 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1190 struct mlxsw_sp_router *router;
1193 router = container_of(work, struct mlxsw_sp_router,
1194 neighs_update.dw.work);
1195 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
1197 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
1199 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
1201 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
1204 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1206 struct mlxsw_sp_neigh_entry *neigh_entry;
1207 struct mlxsw_sp_router *router;
1209 router = container_of(work, struct mlxsw_sp_router,
1210 nexthop_probe_dw.work);
1211 /* Iterate over nexthop neighbours, find those who are unresolved and
1212 * send arp on them. This solves the chicken-egg problem when
1213 * the nexthop wouldn't get offloaded until the neighbor is resolved
1214 * but it wouldn't get resolved ever in case traffic is flowing in HW
1215 * using different nexthop.
1217 * Take RTNL mutex here to prevent lists from changes.
1220 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
1221 nexthop_neighs_list_node)
1222 if (!neigh_entry->connected)
1223 neigh_event_send(neigh_entry->key.n, NULL);
1226 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
1227 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1231 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1232 struct mlxsw_sp_neigh_entry *neigh_entry,
1235 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1237 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1238 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1242 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1243 struct mlxsw_sp_neigh_entry *neigh_entry,
1244 enum mlxsw_reg_rauht_op op)
1246 struct neighbour *n = neigh_entry->key.n;
1247 u32 dip = ntohl(*((__be32 *) n->primary_key));
1248 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1250 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1252 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1256 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1257 struct mlxsw_sp_neigh_entry *neigh_entry,
1258 enum mlxsw_reg_rauht_op op)
1260 struct neighbour *n = neigh_entry->key.n;
1261 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1262 const char *dip = n->primary_key;
1264 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1266 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1269 static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n)
1271 /* Packets with a link-local destination address are trapped
1272 * after LPM lookup and never reach the neighbour table, so
1273 * there is no need to program such neighbours to the device.
1275 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1276 IPV6_ADDR_LINKLOCAL)
1282 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1283 struct mlxsw_sp_neigh_entry *neigh_entry,
1286 if (!adding && !neigh_entry->connected)
1288 neigh_entry->connected = adding;
1289 if (neigh_entry->key.n->tbl->family == AF_INET) {
1290 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1291 mlxsw_sp_rauht_op(adding));
1292 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
1293 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n))
1295 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1296 mlxsw_sp_rauht_op(adding));
1302 struct mlxsw_sp_neigh_event_work {
1303 struct work_struct work;
1304 struct mlxsw_sp *mlxsw_sp;
1305 struct neighbour *n;
1308 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1310 struct mlxsw_sp_neigh_event_work *neigh_work =
1311 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1312 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1313 struct mlxsw_sp_neigh_entry *neigh_entry;
1314 struct neighbour *n = neigh_work->n;
1315 unsigned char ha[ETH_ALEN];
1316 bool entry_connected;
1319 /* If these parameters are changed after we release the lock,
1320 * then we are guaranteed to receive another event letting us
1323 read_lock_bh(&n->lock);
1324 memcpy(ha, n->ha, ETH_ALEN);
1325 nud_state = n->nud_state;
1327 read_unlock_bh(&n->lock);
1330 entry_connected = nud_state & NUD_VALID && !dead;
1331 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1332 if (!entry_connected && !neigh_entry)
1335 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1336 if (IS_ERR(neigh_entry))
1340 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1341 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1342 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1344 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1345 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1353 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1354 unsigned long event, void *ptr)
1356 struct mlxsw_sp_neigh_event_work *neigh_work;
1357 struct mlxsw_sp_port *mlxsw_sp_port;
1358 struct mlxsw_sp *mlxsw_sp;
1359 unsigned long interval;
1360 struct neigh_parms *p;
1361 struct neighbour *n;
1364 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1367 /* We don't care about changes in the default table. */
1368 if (!p->dev || (p->tbl->family != AF_INET &&
1369 p->tbl->family != AF_INET6))
1372 /* We are in atomic context and can't take RTNL mutex,
1373 * so use RCU variant to walk the device chain.
1375 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1379 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1380 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1381 mlxsw_sp->router->neighs_update.interval = interval;
1383 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1385 case NETEVENT_NEIGH_UPDATE:
1388 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
1391 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
1395 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1397 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1401 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1402 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1405 /* Take a reference to ensure the neighbour won't be
1406 * destructed until we drop the reference in delayed
1410 mlxsw_core_schedule_work(&neigh_work->work);
1411 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1418 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1422 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
1423 &mlxsw_sp_neigh_ht_params);
1427 /* Initialize the polling interval according to the default
1430 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1432 /* Create the delayed works for the activity_update */
1433 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
1434 mlxsw_sp_router_neighs_update_work);
1435 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
1436 mlxsw_sp_router_probe_unresolved_nexthops);
1437 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1438 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
1442 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1444 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1445 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1446 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
1449 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1450 struct mlxsw_sp_rif *rif)
1452 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1454 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
1456 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
1457 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1461 struct mlxsw_sp_nexthop_key {
1462 struct fib_nh *fib_nh;
1465 struct mlxsw_sp_nexthop {
1466 struct list_head neigh_list_node; /* member of neigh entry list */
1467 struct list_head rif_list_node;
1468 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1471 struct rhash_head ht_node;
1472 struct mlxsw_sp_nexthop_key key;
1473 unsigned char gw_addr[sizeof(struct in6_addr)];
1474 struct mlxsw_sp_rif *rif;
1475 u8 should_offload:1, /* set indicates this neigh is connected and
1476 * should be put to KVD linear area of this group.
1478 offloaded:1, /* set in case the neigh is actually put into
1479 * KVD linear area of this group.
1481 update:1; /* set indicates that MAC of this neigh should be
1484 struct mlxsw_sp_neigh_entry *neigh_entry;
1487 struct mlxsw_sp_nexthop_group_key {
1488 struct fib_info *fi;
1491 struct mlxsw_sp_nexthop_group {
1492 struct rhash_head ht_node;
1493 struct list_head fib_list; /* list of fib entries that use this group */
1494 struct neigh_table *neigh_tbl;
1495 struct mlxsw_sp_nexthop_group_key key;
1496 u8 adj_index_valid:1,
1497 gateway:1; /* routes using the group use a gateway */
1501 struct mlxsw_sp_nexthop nexthops[0];
1502 #define nh_rif nexthops[0].rif
1505 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1506 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1507 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1508 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1511 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1512 struct mlxsw_sp_nexthop_group *nh_grp)
1514 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
1516 mlxsw_sp_nexthop_group_ht_params);
1519 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1520 struct mlxsw_sp_nexthop_group *nh_grp)
1522 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
1524 mlxsw_sp_nexthop_group_ht_params);
1527 static struct mlxsw_sp_nexthop_group *
1528 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1529 struct mlxsw_sp_nexthop_group_key key)
1531 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
1532 mlxsw_sp_nexthop_group_ht_params);
1535 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1536 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1537 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1538 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1541 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1542 struct mlxsw_sp_nexthop *nh)
1544 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
1545 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1548 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1549 struct mlxsw_sp_nexthop *nh)
1551 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
1552 mlxsw_sp_nexthop_ht_params);
1555 static struct mlxsw_sp_nexthop *
1556 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1557 struct mlxsw_sp_nexthop_key key)
1559 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
1560 mlxsw_sp_nexthop_ht_params);
1563 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1564 const struct mlxsw_sp_fib *fib,
1565 u32 adj_index, u16 ecmp_size,
1569 char raleu_pl[MLXSW_REG_RALEU_LEN];
1571 mlxsw_reg_raleu_pack(raleu_pl,
1572 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1573 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1575 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1578 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1579 struct mlxsw_sp_nexthop_group *nh_grp,
1580 u32 old_adj_index, u16 old_ecmp_size)
1582 struct mlxsw_sp_fib_entry *fib_entry;
1583 struct mlxsw_sp_fib *fib = NULL;
1586 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1587 if (fib == fib_entry->fib_node->fib)
1589 fib = fib_entry->fib_node->fib;
1590 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
1601 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1602 struct mlxsw_sp_nexthop *nh)
1604 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1605 char ratr_pl[MLXSW_REG_RATR_LEN];
1607 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1608 true, adj_index, neigh_entry->rif);
1609 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1610 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1614 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1615 struct mlxsw_sp_nexthop_group *nh_grp,
1618 u32 adj_index = nh_grp->adj_index; /* base */
1619 struct mlxsw_sp_nexthop *nh;
1623 for (i = 0; i < nh_grp->count; i++) {
1624 nh = &nh_grp->nexthops[i];
1626 if (!nh->should_offload) {
1631 if (nh->update || reallocate) {
1632 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1644 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1645 struct mlxsw_sp_fib_entry *fib_entry);
1648 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
1649 const struct mlxsw_sp_fib_entry *fib_entry);
1652 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1653 struct mlxsw_sp_nexthop_group *nh_grp)
1655 struct mlxsw_sp_fib_entry *fib_entry;
1658 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1659 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1662 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1670 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1671 enum mlxsw_reg_ralue_op op, int err);
1674 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
1676 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
1677 struct mlxsw_sp_fib_entry *fib_entry;
1679 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1680 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1683 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
1688 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1689 struct mlxsw_sp_nexthop_group *nh_grp)
1691 struct mlxsw_sp_nexthop *nh;
1692 bool offload_change = false;
1695 bool old_adj_index_valid;
1701 if (!nh_grp->gateway) {
1702 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1706 for (i = 0; i < nh_grp->count; i++) {
1707 nh = &nh_grp->nexthops[i];
1709 if (nh->should_offload != nh->offloaded) {
1710 offload_change = true;
1711 if (nh->should_offload)
1714 if (nh->should_offload)
1717 if (!offload_change) {
1718 /* Nothing was added or removed, so no need to reallocate. Just
1719 * update MAC on existing adjacency indexes.
1721 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1724 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1730 /* No neigh of this group is connected so we just set
1731 * the trap and let everthing flow through kernel.
1735 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1737 /* We ran out of KVD linear space, just set the
1738 * trap and let everything flow through kernel.
1740 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1743 old_adj_index_valid = nh_grp->adj_index_valid;
1744 old_adj_index = nh_grp->adj_index;
1745 old_ecmp_size = nh_grp->ecmp_size;
1746 nh_grp->adj_index_valid = 1;
1747 nh_grp->adj_index = adj_index;
1748 nh_grp->ecmp_size = ecmp_size;
1749 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1751 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1755 if (!old_adj_index_valid) {
1756 /* The trap was set for fib entries, so we have to call
1757 * fib entry update to unset it and use adjacency index.
1759 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1761 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1767 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1768 old_adj_index, old_ecmp_size);
1769 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1771 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1775 /* Offload state within the group changed, so update the flags. */
1776 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
1781 old_adj_index_valid = nh_grp->adj_index_valid;
1782 nh_grp->adj_index_valid = 0;
1783 for (i = 0; i < nh_grp->count; i++) {
1784 nh = &nh_grp->nexthops[i];
1787 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1789 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1790 if (old_adj_index_valid)
1791 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1794 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1798 nh->should_offload = 1;
1799 else if (nh->offloaded)
1800 nh->should_offload = 0;
1805 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1806 struct mlxsw_sp_neigh_entry *neigh_entry,
1809 struct mlxsw_sp_nexthop *nh;
1811 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1813 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1814 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1818 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1819 struct mlxsw_sp_rif *rif)
1825 list_add(&nh->rif_list_node, &rif->nexthop_list);
1828 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1833 list_del(&nh->rif_list_node);
1837 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1838 struct mlxsw_sp_nexthop *nh)
1840 struct mlxsw_sp_neigh_entry *neigh_entry;
1841 struct neighbour *n;
1845 if (!nh->nh_grp->gateway || nh->neigh_entry)
1848 /* Take a reference of neigh here ensuring that neigh would
1849 * not be destructed before the nexthop entry is finished.
1850 * The reference is taken either in neigh_lookup() or
1851 * in neigh_create() in case n is not found.
1853 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
1855 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
1859 neigh_event_send(n, NULL);
1861 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1863 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1864 if (IS_ERR(neigh_entry)) {
1866 goto err_neigh_entry_create;
1870 /* If that is the first nexthop connected to that neigh, add to
1871 * nexthop_neighs_list
1873 if (list_empty(&neigh_entry->nexthop_list))
1874 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1875 &mlxsw_sp->router->nexthop_neighs_list);
1877 nh->neigh_entry = neigh_entry;
1878 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1879 read_lock_bh(&n->lock);
1880 nud_state = n->nud_state;
1882 read_unlock_bh(&n->lock);
1883 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
1887 err_neigh_entry_create:
1892 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1893 struct mlxsw_sp_nexthop *nh)
1895 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1896 struct neighbour *n;
1900 n = neigh_entry->key.n;
1902 __mlxsw_sp_nexthop_neigh_update(nh, true);
1903 list_del(&nh->neigh_list_node);
1904 nh->neigh_entry = NULL;
1906 /* If that is the last nexthop connected to that neigh, remove from
1907 * nexthop_neighs_list
1909 if (list_empty(&neigh_entry->nexthop_list))
1910 list_del(&neigh_entry->nexthop_neighs_list_node);
1912 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1913 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1918 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
1919 struct mlxsw_sp_nexthop_group *nh_grp,
1920 struct mlxsw_sp_nexthop *nh,
1921 struct fib_nh *fib_nh)
1923 struct net_device *dev = fib_nh->nh_dev;
1924 struct in_device *in_dev;
1925 struct mlxsw_sp_rif *rif;
1928 nh->nh_grp = nh_grp;
1929 nh->key.fib_nh = fib_nh;
1930 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
1931 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1938 in_dev = __in_dev_get_rtnl(dev);
1939 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1940 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1943 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1946 mlxsw_sp_nexthop_rif_init(nh, rif);
1948 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1950 goto err_nexthop_neigh_init;
1954 err_nexthop_neigh_init:
1955 mlxsw_sp_nexthop_rif_fini(nh);
1956 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1960 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
1961 struct mlxsw_sp_nexthop *nh)
1963 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1964 mlxsw_sp_nexthop_rif_fini(nh);
1965 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1968 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
1969 unsigned long event, struct fib_nh *fib_nh)
1971 struct mlxsw_sp_nexthop_key key;
1972 struct mlxsw_sp_nexthop *nh;
1973 struct mlxsw_sp_rif *rif;
1975 if (mlxsw_sp->router->aborted)
1978 key.fib_nh = fib_nh;
1979 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1980 if (WARN_ON_ONCE(!nh))
1983 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1988 case FIB_EVENT_NH_ADD:
1989 mlxsw_sp_nexthop_rif_init(nh, rif);
1990 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1992 case FIB_EVENT_NH_DEL:
1993 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1994 mlxsw_sp_nexthop_rif_fini(nh);
1998 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2001 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2002 struct mlxsw_sp_rif *rif)
2004 struct mlxsw_sp_nexthop *nh, *tmp;
2006 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
2007 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
2008 mlxsw_sp_nexthop_rif_fini(nh);
2009 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2013 static struct mlxsw_sp_nexthop_group *
2014 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
2016 struct mlxsw_sp_nexthop_group *nh_grp;
2017 struct mlxsw_sp_nexthop *nh;
2018 struct fib_nh *fib_nh;
2023 alloc_size = sizeof(*nh_grp) +
2024 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
2025 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
2027 return ERR_PTR(-ENOMEM);
2028 INIT_LIST_HEAD(&nh_grp->fib_list);
2029 nh_grp->neigh_tbl = &arp_tbl;
2031 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
2032 nh_grp->count = fi->fib_nhs;
2033 nh_grp->key.fi = fi;
2035 for (i = 0; i < nh_grp->count; i++) {
2036 nh = &nh_grp->nexthops[i];
2037 fib_nh = &fi->fib_nh[i];
2038 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
2040 goto err_nexthop4_init;
2042 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
2044 goto err_nexthop_group_insert;
2045 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2048 err_nexthop_group_insert:
2050 for (i--; i >= 0; i--) {
2051 nh = &nh_grp->nexthops[i];
2052 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2054 fib_info_put(nh_grp->key.fi);
2056 return ERR_PTR(err);
2060 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
2061 struct mlxsw_sp_nexthop_group *nh_grp)
2063 struct mlxsw_sp_nexthop *nh;
2066 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
2067 for (i = 0; i < nh_grp->count; i++) {
2068 nh = &nh_grp->nexthops[i];
2069 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2071 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2072 WARN_ON_ONCE(nh_grp->adj_index_valid);
2073 fib_info_put(nh_grp->key.fi);
2077 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
2078 struct mlxsw_sp_fib_entry *fib_entry,
2079 struct fib_info *fi)
2081 struct mlxsw_sp_nexthop_group_key key;
2082 struct mlxsw_sp_nexthop_group *nh_grp;
2085 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
2087 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
2089 return PTR_ERR(nh_grp);
2091 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
2092 fib_entry->nh_group = nh_grp;
2096 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
2097 struct mlxsw_sp_fib_entry *fib_entry)
2099 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2101 list_del(&fib_entry->nexthop_group_node);
2102 if (!list_empty(&nh_grp->fib_list))
2104 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
2108 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2110 struct mlxsw_sp_fib4_entry *fib4_entry;
2112 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
2114 return !fib4_entry->tos;
2118 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2120 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
2122 switch (fib_entry->fib_node->fib->proto) {
2123 case MLXSW_SP_L3_PROTO_IPV4:
2124 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
2127 case MLXSW_SP_L3_PROTO_IPV6:
2131 switch (fib_entry->type) {
2132 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2133 return !!nh_group->adj_index_valid;
2134 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2135 return !!nh_group->nh_rif;
2141 static struct mlxsw_sp_nexthop *
2142 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
2143 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
2147 for (i = 0; i < nh_grp->count; i++) {
2148 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2149 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2151 if (nh->rif && nh->rif->dev == rt->dst.dev &&
2152 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
2162 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2164 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2167 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2168 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2172 for (i = 0; i < nh_grp->count; i++) {
2173 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2176 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2178 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2183 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2185 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2188 for (i = 0; i < nh_grp->count; i++) {
2189 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2191 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2196 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2198 struct mlxsw_sp_fib6_entry *fib6_entry;
2199 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2201 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2204 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2205 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
2206 list)->rt->rt6i_flags |= RTF_OFFLOAD;
2210 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2211 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2212 struct mlxsw_sp_nexthop *nh;
2214 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
2215 if (nh && nh->offloaded)
2216 mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD;
2218 mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD;
2223 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2225 struct mlxsw_sp_fib6_entry *fib6_entry;
2226 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2228 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2230 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2231 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2233 rt->rt6i_flags &= ~RTF_OFFLOAD;
2237 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2239 switch (fib_entry->fib_node->fib->proto) {
2240 case MLXSW_SP_L3_PROTO_IPV4:
2241 mlxsw_sp_fib4_entry_offload_set(fib_entry);
2243 case MLXSW_SP_L3_PROTO_IPV6:
2244 mlxsw_sp_fib6_entry_offload_set(fib_entry);
2250 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2252 switch (fib_entry->fib_node->fib->proto) {
2253 case MLXSW_SP_L3_PROTO_IPV4:
2254 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
2256 case MLXSW_SP_L3_PROTO_IPV6:
2257 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
2263 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2264 enum mlxsw_reg_ralue_op op, int err)
2267 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
2268 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
2269 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
2272 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
2273 mlxsw_sp_fib_entry_offload_set(fib_entry);
2274 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
2275 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2283 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
2284 const struct mlxsw_sp_fib_entry *fib_entry,
2285 enum mlxsw_reg_ralue_op op)
2287 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2288 enum mlxsw_reg_ralxx_protocol proto;
2291 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
2293 switch (fib->proto) {
2294 case MLXSW_SP_L3_PROTO_IPV4:
2295 p_dip = (u32 *) fib_entry->fib_node->key.addr;
2296 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
2297 fib_entry->fib_node->key.prefix_len,
2300 case MLXSW_SP_L3_PROTO_IPV6:
2301 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
2302 fib_entry->fib_node->key.prefix_len,
2303 fib_entry->fib_node->key.addr);
2308 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
2309 struct mlxsw_sp_fib_entry *fib_entry,
2310 enum mlxsw_reg_ralue_op op)
2312 char ralue_pl[MLXSW_REG_RALUE_LEN];
2313 enum mlxsw_reg_ralue_trap_action trap_action;
2315 u32 adjacency_index = 0;
2318 /* In case the nexthop group adjacency index is valid, use it
2319 * with provided ECMP size. Otherwise, setup trap and pass
2320 * traffic to kernel.
2322 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2323 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2324 adjacency_index = fib_entry->nh_group->adj_index;
2325 ecmp_size = fib_entry->nh_group->ecmp_size;
2327 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2328 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2331 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2332 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2333 adjacency_index, ecmp_size);
2334 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2337 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
2338 struct mlxsw_sp_fib_entry *fib_entry,
2339 enum mlxsw_reg_ralue_op op)
2341 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
2342 enum mlxsw_reg_ralue_trap_action trap_action;
2343 char ralue_pl[MLXSW_REG_RALUE_LEN];
2347 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2348 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2349 rif_index = rif->rif_index;
2351 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2352 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2355 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2356 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2358 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2361 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
2362 struct mlxsw_sp_fib_entry *fib_entry,
2363 enum mlxsw_reg_ralue_op op)
2365 char ralue_pl[MLXSW_REG_RALUE_LEN];
2367 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2368 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2369 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2372 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2373 struct mlxsw_sp_fib_entry *fib_entry,
2374 enum mlxsw_reg_ralue_op op)
2376 switch (fib_entry->type) {
2377 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2378 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
2379 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2380 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
2381 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2382 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
2387 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2388 struct mlxsw_sp_fib_entry *fib_entry,
2389 enum mlxsw_reg_ralue_op op)
2391 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
2393 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2398 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2399 struct mlxsw_sp_fib_entry *fib_entry)
2401 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2402 MLXSW_REG_RALUE_OP_WRITE_WRITE);
2405 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2406 struct mlxsw_sp_fib_entry *fib_entry)
2408 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2409 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2413 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2414 const struct fib_entry_notifier_info *fen_info,
2415 struct mlxsw_sp_fib_entry *fib_entry)
2417 struct fib_info *fi = fen_info->fi;
2419 switch (fen_info->type) {
2420 case RTN_BROADCAST: /* fall through */
2422 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2424 case RTN_UNREACHABLE: /* fall through */
2425 case RTN_BLACKHOLE: /* fall through */
2427 /* Packets hitting these routes need to be trapped, but
2428 * can do so with a lower priority than packets directed
2429 * at the host, so use action type local instead of trap.
2431 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2434 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2435 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2437 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2444 static struct mlxsw_sp_fib4_entry *
2445 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2446 struct mlxsw_sp_fib_node *fib_node,
2447 const struct fib_entry_notifier_info *fen_info)
2449 struct mlxsw_sp_fib4_entry *fib4_entry;
2450 struct mlxsw_sp_fib_entry *fib_entry;
2453 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
2455 return ERR_PTR(-ENOMEM);
2456 fib_entry = &fib4_entry->common;
2458 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2460 goto err_fib4_entry_type_set;
2462 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2464 goto err_nexthop4_group_get;
2466 fib4_entry->prio = fen_info->fi->fib_priority;
2467 fib4_entry->tb_id = fen_info->tb_id;
2468 fib4_entry->type = fen_info->type;
2469 fib4_entry->tos = fen_info->tos;
2471 fib_entry->fib_node = fib_node;
2475 err_nexthop4_group_get:
2476 err_fib4_entry_type_set:
2478 return ERR_PTR(err);
2481 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2482 struct mlxsw_sp_fib4_entry *fib4_entry)
2484 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
2488 static struct mlxsw_sp_fib_node *
2489 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2490 size_t addr_len, unsigned char prefix_len);
2492 static struct mlxsw_sp_fib4_entry *
2493 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2494 const struct fib_entry_notifier_info *fen_info)
2496 struct mlxsw_sp_fib4_entry *fib4_entry;
2497 struct mlxsw_sp_fib_node *fib_node;
2498 struct mlxsw_sp_fib *fib;
2499 struct mlxsw_sp_vr *vr;
2501 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
2504 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
2506 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2507 sizeof(fen_info->dst),
2512 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2513 if (fib4_entry->tb_id == fen_info->tb_id &&
2514 fib4_entry->tos == fen_info->tos &&
2515 fib4_entry->type == fen_info->type &&
2516 fib4_entry->common.nh_group->key.fi == fen_info->fi) {
2524 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2525 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2526 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2527 .key_len = sizeof(struct mlxsw_sp_fib_key),
2528 .automatic_shrinking = true,
2531 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2532 struct mlxsw_sp_fib_node *fib_node)
2534 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2535 mlxsw_sp_fib_ht_params);
2538 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2539 struct mlxsw_sp_fib_node *fib_node)
2541 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2542 mlxsw_sp_fib_ht_params);
2545 static struct mlxsw_sp_fib_node *
2546 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2547 size_t addr_len, unsigned char prefix_len)
2549 struct mlxsw_sp_fib_key key;
2551 memset(&key, 0, sizeof(key));
2552 memcpy(key.addr, addr, addr_len);
2553 key.prefix_len = prefix_len;
2554 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2557 static struct mlxsw_sp_fib_node *
2558 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
2559 size_t addr_len, unsigned char prefix_len)
2561 struct mlxsw_sp_fib_node *fib_node;
2563 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2567 INIT_LIST_HEAD(&fib_node->entry_list);
2568 list_add(&fib_node->list, &fib->node_list);
2569 memcpy(fib_node->key.addr, addr, addr_len);
2570 fib_node->key.prefix_len = prefix_len;
2575 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2577 list_del(&fib_node->list);
2578 WARN_ON(!list_empty(&fib_node->entry_list));
2583 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2584 const struct mlxsw_sp_fib_entry *fib_entry)
2586 return list_first_entry(&fib_node->entry_list,
2587 struct mlxsw_sp_fib_entry, list) == fib_entry;
2590 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2592 unsigned char prefix_len = fib_node->key.prefix_len;
2593 struct mlxsw_sp_fib *fib = fib_node->fib;
2595 if (fib->prefix_ref_count[prefix_len]++ == 0)
2596 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2599 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2601 unsigned char prefix_len = fib_node->key.prefix_len;
2602 struct mlxsw_sp_fib *fib = fib_node->fib;
2604 if (--fib->prefix_ref_count[prefix_len] == 0)
2605 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2608 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2609 struct mlxsw_sp_fib_node *fib_node,
2610 struct mlxsw_sp_fib *fib)
2612 struct mlxsw_sp_prefix_usage req_prefix_usage;
2613 struct mlxsw_sp_lpm_tree *lpm_tree;
2616 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2619 fib_node->fib = fib;
2621 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2622 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2624 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2625 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2628 goto err_tree_check;
2630 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2632 if (IS_ERR(lpm_tree))
2633 return PTR_ERR(lpm_tree);
2634 fib->lpm_tree = lpm_tree;
2635 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2640 mlxsw_sp_fib_node_prefix_inc(fib_node);
2645 fib->lpm_tree = NULL;
2646 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2648 fib_node->fib = NULL;
2649 mlxsw_sp_fib_node_remove(fib, fib_node);
2653 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2654 struct mlxsw_sp_fib_node *fib_node)
2656 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2657 struct mlxsw_sp_fib *fib = fib_node->fib;
2659 mlxsw_sp_fib_node_prefix_dec(fib_node);
2661 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2662 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2663 fib->lpm_tree = NULL;
2664 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2666 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2669 fib_node->fib = NULL;
2670 mlxsw_sp_fib_node_remove(fib, fib_node);
2673 static struct mlxsw_sp_fib_node *
2674 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
2675 size_t addr_len, unsigned char prefix_len,
2676 enum mlxsw_sp_l3proto proto)
2678 struct mlxsw_sp_fib_node *fib_node;
2679 struct mlxsw_sp_fib *fib;
2680 struct mlxsw_sp_vr *vr;
2683 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
2685 return ERR_CAST(vr);
2686 fib = mlxsw_sp_vr_fib(vr, proto);
2688 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
2692 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
2695 goto err_fib_node_create;
2698 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2700 goto err_fib_node_init;
2705 mlxsw_sp_fib_node_destroy(fib_node);
2706 err_fib_node_create:
2707 mlxsw_sp_vr_put(vr);
2708 return ERR_PTR(err);
2711 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
2712 struct mlxsw_sp_fib_node *fib_node)
2714 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
2716 if (!list_empty(&fib_node->entry_list))
2718 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
2719 mlxsw_sp_fib_node_destroy(fib_node);
2720 mlxsw_sp_vr_put(vr);
2723 static struct mlxsw_sp_fib4_entry *
2724 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2725 const struct mlxsw_sp_fib4_entry *new4_entry)
2727 struct mlxsw_sp_fib4_entry *fib4_entry;
2729 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2730 if (fib4_entry->tb_id > new4_entry->tb_id)
2732 if (fib4_entry->tb_id != new4_entry->tb_id)
2734 if (fib4_entry->tos > new4_entry->tos)
2736 if (fib4_entry->prio >= new4_entry->prio ||
2737 fib4_entry->tos < new4_entry->tos)
2745 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
2746 struct mlxsw_sp_fib4_entry *new4_entry)
2748 struct mlxsw_sp_fib_node *fib_node;
2750 if (WARN_ON(!fib4_entry))
2753 fib_node = fib4_entry->common.fib_node;
2754 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
2756 if (fib4_entry->tb_id != new4_entry->tb_id ||
2757 fib4_entry->tos != new4_entry->tos ||
2758 fib4_entry->prio != new4_entry->prio)
2762 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
2767 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
2768 bool replace, bool append)
2770 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
2771 struct mlxsw_sp_fib4_entry *fib4_entry;
2773 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
2776 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
2777 if (replace && WARN_ON(!fib4_entry))
2780 /* Insert new entry before replaced one, so that we can later
2781 * remove the second.
2784 list_add_tail(&new4_entry->common.list,
2785 &fib4_entry->common.list);
2787 struct mlxsw_sp_fib4_entry *last;
2789 list_for_each_entry(last, &fib_node->entry_list, common.list) {
2790 if (new4_entry->tb_id > last->tb_id)
2796 list_add(&new4_entry->common.list,
2797 &fib4_entry->common.list);
2799 list_add(&new4_entry->common.list,
2800 &fib_node->entry_list);
2807 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
2809 list_del(&fib4_entry->common.list);
2812 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2813 struct mlxsw_sp_fib_entry *fib_entry)
2815 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2817 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2820 /* To prevent packet loss, overwrite the previously offloaded
2823 if (!list_is_singular(&fib_node->entry_list)) {
2824 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2825 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2827 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2830 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2833 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2834 struct mlxsw_sp_fib_entry *fib_entry)
2836 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2838 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2841 /* Promote the next entry by overwriting the deleted entry */
2842 if (!list_is_singular(&fib_node->entry_list)) {
2843 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2844 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2846 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2847 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2851 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2854 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
2855 struct mlxsw_sp_fib4_entry *fib4_entry,
2856 bool replace, bool append)
2860 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
2864 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
2866 goto err_fib_node_entry_add;
2870 err_fib_node_entry_add:
2871 mlxsw_sp_fib4_node_list_remove(fib4_entry);
2876 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2877 struct mlxsw_sp_fib4_entry *fib4_entry)
2879 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
2880 mlxsw_sp_fib4_node_list_remove(fib4_entry);
2883 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2884 struct mlxsw_sp_fib4_entry *fib4_entry,
2887 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
2888 struct mlxsw_sp_fib4_entry *replaced;
2893 /* We inserted the new entry before replaced one */
2894 replaced = list_next_entry(fib4_entry, common.list);
2896 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2897 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2898 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2902 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
2903 const struct fib_entry_notifier_info *fen_info,
2904 bool replace, bool append)
2906 struct mlxsw_sp_fib4_entry *fib4_entry;
2907 struct mlxsw_sp_fib_node *fib_node;
2910 if (mlxsw_sp->router->aborted)
2913 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
2914 &fen_info->dst, sizeof(fen_info->dst),
2916 MLXSW_SP_L3_PROTO_IPV4);
2917 if (IS_ERR(fib_node)) {
2918 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2919 return PTR_ERR(fib_node);
2922 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2923 if (IS_ERR(fib4_entry)) {
2924 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2925 err = PTR_ERR(fib4_entry);
2926 goto err_fib4_entry_create;
2929 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
2932 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2933 goto err_fib4_node_entry_link;
2936 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
2940 err_fib4_node_entry_link:
2941 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
2942 err_fib4_entry_create:
2943 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2947 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2948 struct fib_entry_notifier_info *fen_info)
2950 struct mlxsw_sp_fib4_entry *fib4_entry;
2951 struct mlxsw_sp_fib_node *fib_node;
2953 if (mlxsw_sp->router->aborted)
2956 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2957 if (WARN_ON(!fib4_entry))
2959 fib_node = fib4_entry->common.fib_node;
2961 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
2962 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
2963 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2966 static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
2968 /* Packets with link-local destination IP arriving to the router
2969 * are trapped to the CPU, so no need to program specific routes
2972 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
2975 /* Multicast routes aren't supported, so ignore them. Neighbour
2976 * Discovery packets are specifically trapped.
2978 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
2981 /* Cloned routes are irrelevant in the forwarding path. */
2982 if (rt->rt6i_flags & RTF_CACHE)
2988 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
2990 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2992 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
2994 return ERR_PTR(-ENOMEM);
2996 /* In case of route replace, replaced route is deleted with
2997 * no notification. Take reference to prevent accessing freed
3000 mlxsw_sp_rt6->rt = rt;
3003 return mlxsw_sp_rt6;
3006 #if IS_ENABLED(CONFIG_IPV6)
3007 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3012 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3017 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3019 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
3020 kfree(mlxsw_sp_rt6);
3023 static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
3025 /* RTF_CACHE routes are ignored */
3026 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
3029 static struct rt6_info *
3030 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
3032 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
3036 static struct mlxsw_sp_fib6_entry *
3037 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3038 const struct rt6_info *nrt, bool replace)
3040 struct mlxsw_sp_fib6_entry *fib6_entry;
3042 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
3045 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3046 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3048 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
3051 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3053 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3055 if (rt->rt6i_metric < nrt->rt6i_metric)
3057 if (rt->rt6i_metric == nrt->rt6i_metric &&
3058 mlxsw_sp_fib6_rt_can_mp(rt))
3060 if (rt->rt6i_metric > nrt->rt6i_metric)
3067 static struct mlxsw_sp_rt6 *
3068 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
3069 const struct rt6_info *rt)
3071 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3073 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3074 if (mlxsw_sp_rt6->rt == rt)
3075 return mlxsw_sp_rt6;
3081 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
3082 struct mlxsw_sp_nexthop_group *nh_grp,
3083 struct mlxsw_sp_nexthop *nh,
3084 const struct rt6_info *rt)
3086 struct net_device *dev = rt->dst.dev;
3087 struct mlxsw_sp_rif *rif;
3090 nh->nh_grp = nh_grp;
3091 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
3096 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3099 mlxsw_sp_nexthop_rif_init(nh, rif);
3101 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3103 goto err_nexthop_neigh_init;
3107 err_nexthop_neigh_init:
3108 mlxsw_sp_nexthop_rif_fini(nh);
3112 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
3113 struct mlxsw_sp_nexthop *nh)
3115 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3116 mlxsw_sp_nexthop_rif_fini(nh);
3119 static struct mlxsw_sp_nexthop_group *
3120 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
3121 struct mlxsw_sp_fib6_entry *fib6_entry)
3123 struct mlxsw_sp_nexthop_group *nh_grp;
3124 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3125 struct mlxsw_sp_nexthop *nh;
3130 alloc_size = sizeof(*nh_grp) +
3131 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
3132 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3134 return ERR_PTR(-ENOMEM);
3135 INIT_LIST_HEAD(&nh_grp->fib_list);
3136 #if IS_ENABLED(CONFIG_IPV6)
3137 nh_grp->neigh_tbl = &nd_tbl;
3139 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
3140 struct mlxsw_sp_rt6, list);
3141 nh_grp->gateway = !!(mlxsw_sp_rt6->rt->rt6i_flags & RTF_GATEWAY);
3142 nh_grp->count = fib6_entry->nrt6;
3143 for (i = 0; i < nh_grp->count; i++) {
3144 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3146 nh = &nh_grp->nexthops[i];
3147 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
3149 goto err_nexthop6_init;
3150 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
3152 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3156 for (i--; i >= 0; i--) {
3157 nh = &nh_grp->nexthops[i];
3158 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3161 return ERR_PTR(err);
3165 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
3166 struct mlxsw_sp_nexthop_group *nh_grp)
3168 struct mlxsw_sp_nexthop *nh;
3169 int i = nh_grp->count;
3171 for (i--; i >= 0; i--) {
3172 nh = &nh_grp->nexthops[i];
3173 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3175 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3176 WARN_ON(nh_grp->adj_index_valid);
3180 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
3181 struct mlxsw_sp_fib6_entry *fib6_entry)
3183 struct mlxsw_sp_nexthop_group *nh_grp;
3185 /* For now, don't consolidate nexthop groups */
3186 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
3188 return PTR_ERR(nh_grp);
3190 list_add_tail(&fib6_entry->common.nexthop_group_node,
3192 fib6_entry->common.nh_group = nh_grp;
3197 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
3198 struct mlxsw_sp_fib_entry *fib_entry)
3200 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3202 list_del(&fib_entry->nexthop_group_node);
3203 if (!list_empty(&nh_grp->fib_list))
3205 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
3209 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
3210 struct mlxsw_sp_fib6_entry *fib6_entry)
3212 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
3215 fib6_entry->common.nh_group = NULL;
3216 list_del(&fib6_entry->common.nexthop_group_node);
3218 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3220 goto err_nexthop6_group_get;
3222 /* In case this entry is offloaded, then the adjacency index
3223 * currently associated with it in the device's table is that
3224 * of the old group. Start using the new one instead.
3226 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3228 goto err_fib_node_entry_add;
3230 if (list_empty(&old_nh_grp->fib_list))
3231 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
3235 err_fib_node_entry_add:
3236 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3237 err_nexthop6_group_get:
3238 list_add_tail(&fib6_entry->common.nexthop_group_node,
3239 &old_nh_grp->fib_list);
3240 fib6_entry->common.nh_group = old_nh_grp;
3245 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
3246 struct mlxsw_sp_fib6_entry *fib6_entry,
3247 struct rt6_info *rt)
3249 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3252 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3253 if (IS_ERR(mlxsw_sp_rt6))
3254 return PTR_ERR(mlxsw_sp_rt6);
3256 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3259 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3261 goto err_nexthop6_group_update;
3265 err_nexthop6_group_update:
3267 list_del(&mlxsw_sp_rt6->list);
3268 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3273 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
3274 struct mlxsw_sp_fib6_entry *fib6_entry,
3275 struct rt6_info *rt)
3277 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3279 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
3280 if (WARN_ON(!mlxsw_sp_rt6))
3284 list_del(&mlxsw_sp_rt6->list);
3285 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3286 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3289 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp_fib_entry *fib_entry,
3290 const struct rt6_info *rt)
3292 /* Packets hitting RTF_REJECT routes need to be discarded by the
3293 * stack. We can rely on their destination device not having a
3294 * RIF (it's the loopback device) and can thus use action type
3295 * local, which will cause them to be trapped with a lower
3296 * priority than packets that need to be locally received.
3298 if (rt->rt6i_flags & RTF_LOCAL)
3299 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3300 else if (rt->rt6i_flags & RTF_REJECT)
3301 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3302 else if (rt->rt6i_flags & RTF_GATEWAY)
3303 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
3305 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3309 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
3311 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
3313 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
3316 list_del(&mlxsw_sp_rt6->list);
3317 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3321 static struct mlxsw_sp_fib6_entry *
3322 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
3323 struct mlxsw_sp_fib_node *fib_node,
3324 struct rt6_info *rt)
3326 struct mlxsw_sp_fib6_entry *fib6_entry;
3327 struct mlxsw_sp_fib_entry *fib_entry;
3328 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3331 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
3333 return ERR_PTR(-ENOMEM);
3334 fib_entry = &fib6_entry->common;
3336 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3337 if (IS_ERR(mlxsw_sp_rt6)) {
3338 err = PTR_ERR(mlxsw_sp_rt6);
3339 goto err_rt6_create;
3342 mlxsw_sp_fib6_entry_type_set(fib_entry, mlxsw_sp_rt6->rt);
3344 INIT_LIST_HEAD(&fib6_entry->rt6_list);
3345 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3346 fib6_entry->nrt6 = 1;
3347 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3349 goto err_nexthop6_group_get;
3351 fib_entry->fib_node = fib_node;
3355 err_nexthop6_group_get:
3356 list_del(&mlxsw_sp_rt6->list);
3357 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3360 return ERR_PTR(err);
3363 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
3364 struct mlxsw_sp_fib6_entry *fib6_entry)
3366 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3367 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
3368 WARN_ON(fib6_entry->nrt6);
3372 static struct mlxsw_sp_fib6_entry *
3373 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3374 const struct rt6_info *nrt, bool replace)
3376 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
3378 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3379 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3381 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3383 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3385 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
3386 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
3387 mlxsw_sp_fib6_rt_can_mp(nrt))
3389 if (mlxsw_sp_fib6_rt_can_mp(nrt))
3390 fallback = fallback ?: fib6_entry;
3392 if (rt->rt6i_metric > nrt->rt6i_metric)
3393 return fallback ?: fib6_entry;
3400 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
3403 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
3404 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
3405 struct mlxsw_sp_fib6_entry *fib6_entry;
3407 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
3409 if (replace && WARN_ON(!fib6_entry))
3413 list_add_tail(&new6_entry->common.list,
3414 &fib6_entry->common.list);
3416 struct mlxsw_sp_fib6_entry *last;
3418 list_for_each_entry(last, &fib_node->entry_list, common.list) {
3419 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
3421 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
3427 list_add(&new6_entry->common.list,
3428 &fib6_entry->common.list);
3430 list_add(&new6_entry->common.list,
3431 &fib_node->entry_list);
3438 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
3440 list_del(&fib6_entry->common.list);
3443 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
3444 struct mlxsw_sp_fib6_entry *fib6_entry,
3449 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
3453 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3455 goto err_fib_node_entry_add;
3459 err_fib_node_entry_add:
3460 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3465 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
3466 struct mlxsw_sp_fib6_entry *fib6_entry)
3468 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
3469 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3472 static struct mlxsw_sp_fib6_entry *
3473 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3474 const struct rt6_info *rt)
3476 struct mlxsw_sp_fib6_entry *fib6_entry;
3477 struct mlxsw_sp_fib_node *fib_node;
3478 struct mlxsw_sp_fib *fib;
3479 struct mlxsw_sp_vr *vr;
3481 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
3484 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
3486 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
3487 sizeof(rt->rt6i_dst.addr),
3492 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3493 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3495 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
3496 rt->rt6i_metric == iter_rt->rt6i_metric &&
3497 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
3504 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
3505 struct mlxsw_sp_fib6_entry *fib6_entry,
3508 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
3509 struct mlxsw_sp_fib6_entry *replaced;
3514 replaced = list_next_entry(fib6_entry, common.list);
3516 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
3517 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
3518 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3521 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
3522 struct rt6_info *rt, bool replace)
3524 struct mlxsw_sp_fib6_entry *fib6_entry;
3525 struct mlxsw_sp_fib_node *fib_node;
3528 if (mlxsw_sp->router->aborted)
3531 if (rt->rt6i_src.plen)
3534 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3537 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
3539 sizeof(rt->rt6i_dst.addr),
3541 MLXSW_SP_L3_PROTO_IPV6);
3542 if (IS_ERR(fib_node))
3543 return PTR_ERR(fib_node);
3545 /* Before creating a new entry, try to append route to an existing
3548 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
3550 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
3552 goto err_fib6_entry_nexthop_add;
3556 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
3557 if (IS_ERR(fib6_entry)) {
3558 err = PTR_ERR(fib6_entry);
3559 goto err_fib6_entry_create;
3562 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
3564 goto err_fib6_node_entry_link;
3566 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
3570 err_fib6_node_entry_link:
3571 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3572 err_fib6_entry_create:
3573 err_fib6_entry_nexthop_add:
3574 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3578 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
3579 struct rt6_info *rt)
3581 struct mlxsw_sp_fib6_entry *fib6_entry;
3582 struct mlxsw_sp_fib_node *fib_node;
3584 if (mlxsw_sp->router->aborted)
3587 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3590 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
3591 if (WARN_ON(!fib6_entry))
3594 /* If route is part of a multipath entry, but not the last one
3595 * removed, then only reduce its nexthop group.
3597 if (!list_is_singular(&fib6_entry->rt6_list)) {
3598 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
3602 fib_node = fib6_entry->common.fib_node;
3604 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3605 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3606 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3609 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
3610 enum mlxsw_reg_ralxx_protocol proto,
3613 char ralta_pl[MLXSW_REG_RALTA_LEN];
3614 char ralst_pl[MLXSW_REG_RALST_LEN];
3617 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
3618 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
3622 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
3623 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
3627 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3628 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3629 char raltb_pl[MLXSW_REG_RALTB_LEN];
3630 char ralue_pl[MLXSW_REG_RALUE_LEN];
3632 if (!mlxsw_sp_vr_is_used(vr))
3635 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
3636 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
3641 mlxsw_reg_ralue_pack(ralue_pl, proto,
3642 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
3643 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3644 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
3653 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
3655 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
3658 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3659 MLXSW_SP_LPM_TREE_MIN);
3663 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
3664 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3665 MLXSW_SP_LPM_TREE_MIN + 1);
3668 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
3669 struct mlxsw_sp_fib_node *fib_node)
3671 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
3673 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
3675 bool do_break = &tmp->common.list == &fib_node->entry_list;
3677 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
3678 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
3679 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3680 /* Break when entry list is empty and node was freed.
3681 * Otherwise, we'll access freed memory in the next
3689 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
3690 struct mlxsw_sp_fib_node *fib_node)
3692 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
3694 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
3696 bool do_break = &tmp->common.list == &fib_node->entry_list;
3698 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3699 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3700 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3706 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
3707 struct mlxsw_sp_fib_node *fib_node)
3709 switch (fib_node->fib->proto) {
3710 case MLXSW_SP_L3_PROTO_IPV4:
3711 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
3713 case MLXSW_SP_L3_PROTO_IPV6:
3714 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
3719 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
3720 struct mlxsw_sp_vr *vr,
3721 enum mlxsw_sp_l3proto proto)
3723 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
3724 struct mlxsw_sp_fib_node *fib_node, *tmp;
3726 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
3727 bool do_break = &tmp->list == &fib->node_list;
3729 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
3735 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
3739 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3740 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3742 if (!mlxsw_sp_vr_is_used(vr))
3744 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
3746 /* If virtual router was only used for IPv4, then it's no
3749 if (!mlxsw_sp_vr_is_used(vr))
3751 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
3755 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
3759 if (mlxsw_sp->router->aborted)
3761 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
3762 mlxsw_sp_router_fib_flush(mlxsw_sp);
3763 mlxsw_sp->router->aborted = true;
3764 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
3766 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
3769 struct mlxsw_sp_fib_event_work {
3770 struct work_struct work;
3772 struct fib6_entry_notifier_info fen6_info;
3773 struct fib_entry_notifier_info fen_info;
3774 struct fib_rule_notifier_info fr_info;
3775 struct fib_nh_notifier_info fnh_info;
3777 struct mlxsw_sp *mlxsw_sp;
3778 unsigned long event;
3781 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
3783 struct mlxsw_sp_fib_event_work *fib_work =
3784 container_of(work, struct mlxsw_sp_fib_event_work, work);
3785 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
3786 struct fib_rule *rule;
3787 bool replace, append;
3790 /* Protect internal structures from changes */
3792 switch (fib_work->event) {
3793 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3794 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3795 case FIB_EVENT_ENTRY_ADD:
3796 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
3797 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
3798 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
3801 mlxsw_sp_router_fib_abort(mlxsw_sp);
3802 fib_info_put(fib_work->fen_info.fi);
3804 case FIB_EVENT_ENTRY_DEL:
3805 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
3806 fib_info_put(fib_work->fen_info.fi);
3808 case FIB_EVENT_RULE_ADD: /* fall through */
3809 case FIB_EVENT_RULE_DEL:
3810 rule = fib_work->fr_info.rule;
3811 if (!fib4_rule_default(rule) && !rule->l3mdev)
3812 mlxsw_sp_router_fib_abort(mlxsw_sp);
3815 case FIB_EVENT_NH_ADD: /* fall through */
3816 case FIB_EVENT_NH_DEL:
3817 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
3818 fib_work->fnh_info.fib_nh);
3819 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
3826 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
3828 struct mlxsw_sp_fib_event_work *fib_work =
3829 container_of(work, struct mlxsw_sp_fib_event_work, work);
3830 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
3831 struct fib_rule *rule;
3836 switch (fib_work->event) {
3837 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3838 case FIB_EVENT_ENTRY_ADD:
3839 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
3840 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
3841 fib_work->fen6_info.rt, replace);
3843 mlxsw_sp_router_fib_abort(mlxsw_sp);
3844 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
3846 case FIB_EVENT_ENTRY_DEL:
3847 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
3848 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
3850 case FIB_EVENT_RULE_ADD: /* fall through */
3851 case FIB_EVENT_RULE_DEL:
3852 rule = fib_work->fr_info.rule;
3853 if (!fib6_rule_default(rule) && !rule->l3mdev)
3854 mlxsw_sp_router_fib_abort(mlxsw_sp);
3862 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
3863 struct fib_notifier_info *info)
3865 switch (fib_work->event) {
3866 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3867 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3868 case FIB_EVENT_ENTRY_ADD: /* fall through */
3869 case FIB_EVENT_ENTRY_DEL:
3870 memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info));
3871 /* Take referece on fib_info to prevent it from being
3872 * freed while work is queued. Release it afterwards.
3874 fib_info_hold(fib_work->fen_info.fi);
3876 case FIB_EVENT_RULE_ADD: /* fall through */
3877 case FIB_EVENT_RULE_DEL:
3878 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
3879 fib_rule_get(fib_work->fr_info.rule);
3881 case FIB_EVENT_NH_ADD: /* fall through */
3882 case FIB_EVENT_NH_DEL:
3883 memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info));
3884 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
3889 static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
3890 struct fib_notifier_info *info)
3892 switch (fib_work->event) {
3893 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3894 case FIB_EVENT_ENTRY_ADD: /* fall through */
3895 case FIB_EVENT_ENTRY_DEL:
3896 memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info));
3897 rt6_hold(fib_work->fen6_info.rt);
3899 case FIB_EVENT_RULE_ADD: /* fall through */
3900 case FIB_EVENT_RULE_DEL:
3901 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
3902 fib_rule_get(fib_work->fr_info.rule);
3907 /* Called with rcu_read_lock() */
3908 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
3909 unsigned long event, void *ptr)
3911 struct mlxsw_sp_fib_event_work *fib_work;
3912 struct fib_notifier_info *info = ptr;
3913 struct mlxsw_sp_router *router;
3915 if (!net_eq(info->net, &init_net))
3918 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
3919 if (WARN_ON(!fib_work))
3922 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3923 fib_work->mlxsw_sp = router->mlxsw_sp;
3924 fib_work->event = event;
3926 switch (info->family) {
3928 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
3929 mlxsw_sp_router_fib4_event(fib_work, info);
3932 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
3933 mlxsw_sp_router_fib6_event(fib_work, info);
3937 mlxsw_core_schedule_work(&fib_work->work);
3942 static struct mlxsw_sp_rif *
3943 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
3944 const struct net_device *dev)
3948 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3949 if (mlxsw_sp->router->rifs[i] &&
3950 mlxsw_sp->router->rifs[i]->dev == dev)
3951 return mlxsw_sp->router->rifs[i];
3956 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
3958 char ritr_pl[MLXSW_REG_RITR_LEN];
3961 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3962 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3963 if (WARN_ON_ONCE(err))
3966 mlxsw_reg_ritr_enable_set(ritr_pl, false);
3967 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3970 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3971 struct mlxsw_sp_rif *rif)
3973 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
3974 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
3975 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3979 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
3980 unsigned long event)
3982 struct inet6_dev *inet6_dev;
3983 bool addr_list_empty = true;
3984 struct in_device *idev;
3990 idev = __in_dev_get_rtnl(dev);
3991 if (idev && idev->ifa_list)
3992 addr_list_empty = false;
3994 inet6_dev = __in6_dev_get(dev);
3995 if (addr_list_empty && inet6_dev &&
3996 !list_empty(&inet6_dev->addr_list))
3997 addr_list_empty = false;
3999 if (rif && addr_list_empty &&
4000 !netif_is_l3_slave(rif->dev))
4002 /* It is possible we already removed the RIF ourselves
4003 * if it was assigned to a netdev that is now a bridge
4012 static enum mlxsw_sp_rif_type
4013 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
4014 const struct net_device *dev)
4016 enum mlxsw_sp_fid_type type;
4018 /* RIF type is derived from the type of the underlying FID */
4019 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
4020 type = MLXSW_SP_FID_TYPE_8021Q;
4021 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
4022 type = MLXSW_SP_FID_TYPE_8021Q;
4023 else if (netif_is_bridge_master(dev))
4024 type = MLXSW_SP_FID_TYPE_8021D;
4026 type = MLXSW_SP_FID_TYPE_RFID;
4028 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
4031 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4035 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
4036 if (!mlxsw_sp->router->rifs[i]) {
4045 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
4047 struct net_device *l3_dev)
4049 struct mlxsw_sp_rif *rif;
4051 rif = kzalloc(rif_size, GFP_KERNEL);
4055 INIT_LIST_HEAD(&rif->nexthop_list);
4056 INIT_LIST_HEAD(&rif->neigh_list);
4057 ether_addr_copy(rif->addr, l3_dev->dev_addr);
4058 rif->mtu = l3_dev->mtu;
4061 rif->rif_index = rif_index;
4066 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
4069 return mlxsw_sp->router->rifs[rif_index];
4072 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
4074 return rif->rif_index;
4077 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
4079 return rif->dev->ifindex;
4082 static struct mlxsw_sp_rif *
4083 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
4084 const struct mlxsw_sp_rif_params *params)
4086 u32 tb_id = l3mdev_fib_table(params->dev);
4087 const struct mlxsw_sp_rif_ops *ops;
4088 enum mlxsw_sp_rif_type type;
4089 struct mlxsw_sp_rif *rif;
4090 struct mlxsw_sp_fid *fid;
4091 struct mlxsw_sp_vr *vr;
4095 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
4096 ops = mlxsw_sp->router->rif_ops_arr[type];
4098 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
4100 return ERR_CAST(vr);
4102 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
4104 goto err_rif_index_alloc;
4106 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
4111 rif->mlxsw_sp = mlxsw_sp;
4114 fid = ops->fid_get(rif);
4122 ops->setup(rif, params);
4124 err = ops->configure(rif);
4128 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
4129 mlxsw_sp_fid_index(fid), true);
4131 goto err_rif_fdb_op;
4133 mlxsw_sp_rif_counters_alloc(rif);
4134 mlxsw_sp_fid_rif_set(fid, rif);
4135 mlxsw_sp->router->rifs[rif_index] = rif;
4141 ops->deconfigure(rif);
4143 mlxsw_sp_fid_put(fid);
4147 err_rif_index_alloc:
4148 mlxsw_sp_vr_put(vr);
4149 return ERR_PTR(err);
4152 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4154 const struct mlxsw_sp_rif_ops *ops = rif->ops;
4155 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4156 struct mlxsw_sp_fid *fid = rif->fid;
4157 struct mlxsw_sp_vr *vr;
4159 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
4160 vr = &mlxsw_sp->router->vrs[rif->vr_id];
4163 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
4164 mlxsw_sp_fid_rif_set(fid, NULL);
4165 mlxsw_sp_rif_counters_free(rif);
4166 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
4167 mlxsw_sp_fid_index(fid), false);
4168 ops->deconfigure(rif);
4169 mlxsw_sp_fid_put(fid);
4171 mlxsw_sp_vr_put(vr);
4175 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
4176 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4178 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4180 params->vid = mlxsw_sp_port_vlan->vid;
4181 params->lag = mlxsw_sp_port->lagged;
4183 params->lag_id = mlxsw_sp_port->lag_id;
4185 params->system_port = mlxsw_sp_port->local_port;
4189 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
4190 struct net_device *l3_dev)
4192 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4193 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4194 u16 vid = mlxsw_sp_port_vlan->vid;
4195 struct mlxsw_sp_rif *rif;
4196 struct mlxsw_sp_fid *fid;
4199 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4201 struct mlxsw_sp_rif_params params = {
4205 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
4206 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
4208 return PTR_ERR(rif);
4211 /* FID was already created, just take a reference */
4212 fid = rif->ops->fid_get(rif);
4213 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
4215 goto err_fid_port_vid_map;
4217 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
4219 goto err_port_vid_learning_set;
4221 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
4222 BR_STATE_FORWARDING);
4224 goto err_port_vid_stp_set;
4226 mlxsw_sp_port_vlan->fid = fid;
4230 err_port_vid_stp_set:
4231 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4232 err_port_vid_learning_set:
4233 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4234 err_fid_port_vid_map:
4235 mlxsw_sp_fid_put(fid);
4240 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4242 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4243 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
4244 u16 vid = mlxsw_sp_port_vlan->vid;
4246 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
4249 mlxsw_sp_port_vlan->fid = NULL;
4250 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
4251 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4252 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4253 /* If router port holds the last reference on the rFID, then the
4254 * associated Sub-port RIF will be destroyed.
4256 mlxsw_sp_fid_put(fid);
4259 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
4260 struct net_device *port_dev,
4261 unsigned long event, u16 vid)
4263 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
4264 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4266 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
4267 if (WARN_ON(!mlxsw_sp_port_vlan))
4272 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
4275 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4282 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
4283 unsigned long event)
4285 if (netif_is_bridge_port(port_dev) ||
4286 netif_is_lag_port(port_dev) ||
4287 netif_is_ovs_port(port_dev))
4290 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4293 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
4294 struct net_device *lag_dev,
4295 unsigned long event, u16 vid)
4297 struct net_device *port_dev;
4298 struct list_head *iter;
4301 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
4302 if (mlxsw_sp_port_dev_check(port_dev)) {
4303 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
4314 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
4315 unsigned long event)
4317 if (netif_is_bridge_port(lag_dev))
4320 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
4323 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4324 unsigned long event)
4326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4327 struct mlxsw_sp_rif_params params = {
4330 struct mlxsw_sp_rif *rif;
4334 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
4336 return PTR_ERR(rif);
4339 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4340 mlxsw_sp_rif_destroy(rif);
4347 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
4348 unsigned long event)
4350 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4351 u16 vid = vlan_dev_vlan_id(vlan_dev);
4353 if (netif_is_bridge_port(vlan_dev))
4356 if (mlxsw_sp_port_dev_check(real_dev))
4357 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
4359 else if (netif_is_lag_master(real_dev))
4360 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
4362 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
4363 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4368 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
4369 unsigned long event)
4371 if (mlxsw_sp_port_dev_check(dev))
4372 return mlxsw_sp_inetaddr_port_event(dev, event);
4373 else if (netif_is_lag_master(dev))
4374 return mlxsw_sp_inetaddr_lag_event(dev, event);
4375 else if (netif_is_bridge_master(dev))
4376 return mlxsw_sp_inetaddr_bridge_event(dev, event);
4377 else if (is_vlan_dev(dev))
4378 return mlxsw_sp_inetaddr_vlan_event(dev, event);
4383 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
4384 unsigned long event, void *ptr)
4386 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
4387 struct net_device *dev = ifa->ifa_dev->dev;
4388 struct mlxsw_sp *mlxsw_sp;
4389 struct mlxsw_sp_rif *rif;
4392 mlxsw_sp = mlxsw_sp_lower_get(dev);
4396 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4397 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4400 err = __mlxsw_sp_inetaddr_event(dev, event);
4402 return notifier_from_errno(err);
4405 struct mlxsw_sp_inet6addr_event_work {
4406 struct work_struct work;
4407 struct net_device *dev;
4408 unsigned long event;
4411 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
4413 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
4414 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
4415 struct net_device *dev = inet6addr_work->dev;
4416 unsigned long event = inet6addr_work->event;
4417 struct mlxsw_sp *mlxsw_sp;
4418 struct mlxsw_sp_rif *rif;
4421 mlxsw_sp = mlxsw_sp_lower_get(dev);
4425 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4426 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4429 __mlxsw_sp_inetaddr_event(dev, event);
4433 kfree(inet6addr_work);
4436 /* Called with rcu_read_lock() */
4437 int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
4438 unsigned long event, void *ptr)
4440 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
4441 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
4442 struct net_device *dev = if6->idev->dev;
4444 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
4447 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
4448 if (!inet6addr_work)
4451 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
4452 inet6addr_work->dev = dev;
4453 inet6addr_work->event = event;
4455 mlxsw_core_schedule_work(&inet6addr_work->work);
4460 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4461 const char *mac, int mtu)
4463 char ritr_pl[MLXSW_REG_RITR_LEN];
4466 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4467 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4471 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
4472 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
4473 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
4474 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4477 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
4479 struct mlxsw_sp *mlxsw_sp;
4480 struct mlxsw_sp_rif *rif;
4484 mlxsw_sp = mlxsw_sp_lower_get(dev);
4488 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4491 fid_index = mlxsw_sp_fid_index(rif->fid);
4493 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4497 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
4502 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4504 goto err_rif_fdb_op;
4506 ether_addr_copy(rif->addr, dev->dev_addr);
4507 rif->mtu = dev->mtu;
4509 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4514 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4516 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4520 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
4521 struct net_device *l3_dev)
4523 struct mlxsw_sp_rif *rif;
4525 /* If netdev is already associated with a RIF, then we need to
4526 * destroy it and create a new one with the new virtual router ID.
4528 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4530 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4532 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
4535 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
4536 struct net_device *l3_dev)
4538 struct mlxsw_sp_rif *rif;
4540 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4543 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4546 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
4547 struct netdev_notifier_changeupper_info *info)
4549 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4556 case NETDEV_PRECHANGEUPPER:
4558 case NETDEV_CHANGEUPPER:
4560 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
4562 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
4569 static struct mlxsw_sp_rif_subport *
4570 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
4572 return container_of(rif, struct mlxsw_sp_rif_subport, common);
4575 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
4576 const struct mlxsw_sp_rif_params *params)
4578 struct mlxsw_sp_rif_subport *rif_subport;
4580 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4581 rif_subport->vid = params->vid;
4582 rif_subport->lag = params->lag;
4584 rif_subport->lag_id = params->lag_id;
4586 rif_subport->system_port = params->system_port;
4589 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
4591 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4592 struct mlxsw_sp_rif_subport *rif_subport;
4593 char ritr_pl[MLXSW_REG_RITR_LEN];
4595 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4596 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
4597 rif->rif_index, rif->vr_id, rif->dev->mtu,
4598 rif->dev->dev_addr);
4599 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
4600 rif_subport->lag ? rif_subport->lag_id :
4601 rif_subport->system_port,
4604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4607 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
4609 return mlxsw_sp_rif_subport_op(rif, true);
4612 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
4614 mlxsw_sp_rif_subport_op(rif, false);
4617 static struct mlxsw_sp_fid *
4618 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
4620 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
4623 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
4624 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
4625 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
4626 .setup = mlxsw_sp_rif_subport_setup,
4627 .configure = mlxsw_sp_rif_subport_configure,
4628 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
4629 .fid_get = mlxsw_sp_rif_subport_fid_get,
4632 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
4633 enum mlxsw_reg_ritr_if_type type,
4634 u16 vid_fid, bool enable)
4636 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4637 char ritr_pl[MLXSW_REG_RITR_LEN];
4639 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
4640 rif->dev->mtu, rif->dev->dev_addr);
4641 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
4643 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4646 static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
4648 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
4651 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
4653 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4654 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4657 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
4661 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4662 mlxsw_sp_router_port(mlxsw_sp), true);
4664 goto err_fid_mc_flood_set;
4666 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4667 mlxsw_sp_router_port(mlxsw_sp), true);
4669 goto err_fid_bc_flood_set;
4673 err_fid_bc_flood_set:
4674 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4675 mlxsw_sp_router_port(mlxsw_sp), false);
4676 err_fid_mc_flood_set:
4677 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4681 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
4683 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4684 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4686 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4687 mlxsw_sp_router_port(mlxsw_sp), false);
4688 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4689 mlxsw_sp_router_port(mlxsw_sp), false);
4690 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4693 static struct mlxsw_sp_fid *
4694 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
4696 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
4698 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
4701 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
4702 .type = MLXSW_SP_RIF_TYPE_VLAN,
4703 .rif_size = sizeof(struct mlxsw_sp_rif),
4704 .configure = mlxsw_sp_rif_vlan_configure,
4705 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
4706 .fid_get = mlxsw_sp_rif_vlan_fid_get,
4709 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
4711 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4712 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4715 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
4720 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4721 mlxsw_sp_router_port(mlxsw_sp), true);
4723 goto err_fid_mc_flood_set;
4725 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4726 mlxsw_sp_router_port(mlxsw_sp), true);
4728 goto err_fid_bc_flood_set;
4732 err_fid_bc_flood_set:
4733 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4734 mlxsw_sp_router_port(mlxsw_sp), false);
4735 err_fid_mc_flood_set:
4736 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4740 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
4742 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4743 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4745 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4746 mlxsw_sp_router_port(mlxsw_sp), false);
4747 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4748 mlxsw_sp_router_port(mlxsw_sp), false);
4749 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4752 static struct mlxsw_sp_fid *
4753 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
4755 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
4758 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
4759 .type = MLXSW_SP_RIF_TYPE_FID,
4760 .rif_size = sizeof(struct mlxsw_sp_rif),
4761 .configure = mlxsw_sp_rif_fid_configure,
4762 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
4763 .fid_get = mlxsw_sp_rif_fid_fid_get,
4766 static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
4767 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
4768 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
4769 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
4772 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
4774 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4776 mlxsw_sp->router->rifs = kcalloc(max_rifs,
4777 sizeof(struct mlxsw_sp_rif *),
4779 if (!mlxsw_sp->router->rifs)
4782 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
4787 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
4791 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
4792 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
4794 kfree(mlxsw_sp->router->rifs);
4797 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
4799 struct mlxsw_sp_router *router;
4801 /* Flush pending FIB notifications and then flush the device's
4802 * table before requesting another dump. The FIB notification
4803 * block is unregistered, so no need to take RTNL.
4805 mlxsw_core_flush_owq();
4806 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
4807 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
4810 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
4812 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4816 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
4818 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4820 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
4821 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
4822 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4828 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
4830 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4832 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
4833 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4836 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
4838 struct mlxsw_sp_router *router;
4841 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
4844 mlxsw_sp->router = router;
4845 router->mlxsw_sp = mlxsw_sp;
4847 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
4848 err = __mlxsw_sp_router_init(mlxsw_sp);
4850 goto err_router_init;
4852 err = mlxsw_sp_rifs_init(mlxsw_sp);
4856 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
4857 &mlxsw_sp_nexthop_ht_params);
4859 goto err_nexthop_ht_init;
4861 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
4862 &mlxsw_sp_nexthop_group_ht_params);
4864 goto err_nexthop_group_ht_init;
4866 err = mlxsw_sp_lpm_init(mlxsw_sp);
4870 err = mlxsw_sp_vrs_init(mlxsw_sp);
4874 err = mlxsw_sp_neigh_init(mlxsw_sp);
4876 goto err_neigh_init;
4878 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
4879 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
4880 mlxsw_sp_router_fib_dump_flush);
4882 goto err_register_fib_notifier;
4886 err_register_fib_notifier:
4887 mlxsw_sp_neigh_fini(mlxsw_sp);
4889 mlxsw_sp_vrs_fini(mlxsw_sp);
4891 mlxsw_sp_lpm_fini(mlxsw_sp);
4893 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
4894 err_nexthop_group_ht_init:
4895 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
4896 err_nexthop_ht_init:
4897 mlxsw_sp_rifs_fini(mlxsw_sp);
4899 __mlxsw_sp_router_fini(mlxsw_sp);
4901 kfree(mlxsw_sp->router);
4905 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
4907 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
4908 mlxsw_sp_neigh_fini(mlxsw_sp);
4909 mlxsw_sp_vrs_fini(mlxsw_sp);
4910 mlxsw_sp_lpm_fini(mlxsw_sp);
4911 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
4912 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
4913 mlxsw_sp_rifs_fini(mlxsw_sp);
4914 __mlxsw_sp_router_fini(mlxsw_sp);
4915 kfree(mlxsw_sp->router);