2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
3 * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/types.h>
39 #include <linux/rhashtable.h>
40 #include <linux/bitops.h>
41 #include <linux/in6.h>
42 #include <linux/notifier.h>
43 #include <linux/inetdevice.h>
44 #include <linux/netdevice.h>
45 #include <linux/if_bridge.h>
46 #include <linux/socket.h>
47 #include <linux/route.h>
48 #include <net/netevent.h>
49 #include <net/neighbour.h>
51 #include <net/ip_fib.h>
52 #include <net/ip6_fib.h>
53 #include <net/fib_rules.h>
54 #include <net/l3mdev.h>
55 #include <net/addrconf.h>
56 #include <net/ndisc.h>
58 #include <net/fib_notifier.h>
63 #include "spectrum_cnt.h"
64 #include "spectrum_dpipe.h"
65 #include "spectrum_router.h"
68 struct mlxsw_sp_lpm_tree;
69 struct mlxsw_sp_rif_ops;
71 struct mlxsw_sp_router {
72 struct mlxsw_sp *mlxsw_sp;
73 struct mlxsw_sp_rif **rifs;
74 struct mlxsw_sp_vr *vrs;
75 struct rhashtable neigh_ht;
76 struct rhashtable nexthop_group_ht;
77 struct rhashtable nexthop_ht;
79 struct mlxsw_sp_lpm_tree *trees;
80 unsigned int tree_count;
83 struct delayed_work dw;
84 unsigned long interval; /* ms */
86 struct delayed_work nexthop_probe_dw;
87 #define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
88 struct list_head nexthop_neighs_list;
90 struct notifier_block fib_nb;
91 const struct mlxsw_sp_rif_ops **rif_ops_arr;
95 struct list_head nexthop_list;
96 struct list_head neigh_list;
97 struct net_device *dev;
98 struct mlxsw_sp_fid *fid;
99 unsigned char addr[ETH_ALEN];
103 const struct mlxsw_sp_rif_ops *ops;
104 struct mlxsw_sp *mlxsw_sp;
106 unsigned int counter_ingress;
107 bool counter_ingress_valid;
108 unsigned int counter_egress;
109 bool counter_egress_valid;
112 struct mlxsw_sp_rif_params {
113 struct net_device *dev;
122 struct mlxsw_sp_rif_subport {
123 struct mlxsw_sp_rif common;
132 struct mlxsw_sp_rif_ops {
133 enum mlxsw_sp_rif_type type;
136 void (*setup)(struct mlxsw_sp_rif *rif,
137 const struct mlxsw_sp_rif_params *params);
138 int (*configure)(struct mlxsw_sp_rif *rif);
139 void (*deconfigure)(struct mlxsw_sp_rif *rif);
140 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif);
143 static unsigned int *
144 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
145 enum mlxsw_sp_rif_counter_dir dir)
148 case MLXSW_SP_RIF_COUNTER_EGRESS:
149 return &rif->counter_egress;
150 case MLXSW_SP_RIF_COUNTER_INGRESS:
151 return &rif->counter_ingress;
157 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
158 enum mlxsw_sp_rif_counter_dir dir)
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 return rif->counter_egress_valid;
163 case MLXSW_SP_RIF_COUNTER_INGRESS:
164 return rif->counter_ingress_valid;
170 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
171 enum mlxsw_sp_rif_counter_dir dir,
175 case MLXSW_SP_RIF_COUNTER_EGRESS:
176 rif->counter_egress_valid = valid;
178 case MLXSW_SP_RIF_COUNTER_INGRESS:
179 rif->counter_ingress_valid = valid;
184 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
185 unsigned int counter_index, bool enable,
186 enum mlxsw_sp_rif_counter_dir dir)
188 char ritr_pl[MLXSW_REG_RITR_LEN];
189 bool is_egress = false;
192 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
194 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
195 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
199 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
201 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
204 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
205 struct mlxsw_sp_rif *rif,
206 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
208 char ricnt_pl[MLXSW_REG_RICNT_LEN];
209 unsigned int *p_counter_index;
213 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
217 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
218 if (!p_counter_index)
220 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
221 MLXSW_REG_RICNT_OPCODE_NOP);
222 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
229 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
230 unsigned int counter_index)
232 char ricnt_pl[MLXSW_REG_RICNT_LEN];
234 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
235 MLXSW_REG_RICNT_OPCODE_CLEAR);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
239 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
240 struct mlxsw_sp_rif *rif,
241 enum mlxsw_sp_rif_counter_dir dir)
243 unsigned int *p_counter_index;
246 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
247 if (!p_counter_index)
249 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
254 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
256 goto err_counter_clear;
258 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
259 *p_counter_index, true, dir);
261 goto err_counter_edit;
262 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
267 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
272 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
273 struct mlxsw_sp_rif *rif,
274 enum mlxsw_sp_rif_counter_dir dir)
276 unsigned int *p_counter_index;
278 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
281 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
282 if (WARN_ON(!p_counter_index))
284 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
285 *p_counter_index, false, dir);
286 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
288 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
291 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
293 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
294 struct devlink *devlink;
296 devlink = priv_to_devlink(mlxsw_sp->core);
297 if (!devlink_dpipe_table_counter_enabled(devlink,
298 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
300 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
303 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
305 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
307 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
310 static struct mlxsw_sp_rif *
311 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
312 const struct net_device *dev);
314 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
316 struct mlxsw_sp_prefix_usage {
317 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
320 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
321 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
324 mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
325 struct mlxsw_sp_prefix_usage *prefix_usage2)
327 unsigned char prefix;
329 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
330 if (!test_bit(prefix, prefix_usage2->b))
337 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
338 struct mlxsw_sp_prefix_usage *prefix_usage2)
340 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
344 mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
346 struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
348 return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
352 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
353 struct mlxsw_sp_prefix_usage *prefix_usage2)
355 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
359 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
360 unsigned char prefix_len)
362 set_bit(prefix_len, prefix_usage->b);
366 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
367 unsigned char prefix_len)
369 clear_bit(prefix_len, prefix_usage->b);
372 struct mlxsw_sp_fib_key {
373 unsigned char addr[sizeof(struct in6_addr)];
374 unsigned char prefix_len;
377 enum mlxsw_sp_fib_entry_type {
378 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
379 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
380 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
383 struct mlxsw_sp_nexthop_group;
386 struct mlxsw_sp_fib_node {
387 struct list_head entry_list;
388 struct list_head list;
389 struct rhash_head ht_node;
390 struct mlxsw_sp_fib *fib;
391 struct mlxsw_sp_fib_key key;
394 struct mlxsw_sp_fib_entry {
395 struct list_head list;
396 struct mlxsw_sp_fib_node *fib_node;
397 enum mlxsw_sp_fib_entry_type type;
398 struct list_head nexthop_group_node;
399 struct mlxsw_sp_nexthop_group *nh_group;
402 struct mlxsw_sp_fib4_entry {
403 struct mlxsw_sp_fib_entry common;
410 struct mlxsw_sp_fib6_entry {
411 struct mlxsw_sp_fib_entry common;
412 struct list_head rt6_list;
416 struct mlxsw_sp_rt6 {
417 struct list_head list;
421 enum mlxsw_sp_l3proto {
422 MLXSW_SP_L3_PROTO_IPV4,
423 MLXSW_SP_L3_PROTO_IPV6,
426 struct mlxsw_sp_lpm_tree {
428 unsigned int ref_count;
429 enum mlxsw_sp_l3proto proto;
430 struct mlxsw_sp_prefix_usage prefix_usage;
433 struct mlxsw_sp_fib {
434 struct rhashtable ht;
435 struct list_head node_list;
436 struct mlxsw_sp_vr *vr;
437 struct mlxsw_sp_lpm_tree *lpm_tree;
438 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
439 struct mlxsw_sp_prefix_usage prefix_usage;
440 enum mlxsw_sp_l3proto proto;
444 u16 id; /* virtual router ID */
445 u32 tb_id; /* kernel fib table id */
446 unsigned int rif_count;
447 struct mlxsw_sp_fib *fib4;
448 struct mlxsw_sp_fib *fib6;
451 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
453 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp_vr *vr,
454 enum mlxsw_sp_l3proto proto)
456 struct mlxsw_sp_fib *fib;
459 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
461 return ERR_PTR(-ENOMEM);
462 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
464 goto err_rhashtable_init;
465 INIT_LIST_HEAD(&fib->node_list);
475 static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
477 WARN_ON(!list_empty(&fib->node_list));
478 WARN_ON(fib->lpm_tree);
479 rhashtable_destroy(&fib->ht);
483 static struct mlxsw_sp_lpm_tree *
484 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
486 static struct mlxsw_sp_lpm_tree *lpm_tree;
489 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
490 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
491 if (lpm_tree->ref_count == 0)
497 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
498 struct mlxsw_sp_lpm_tree *lpm_tree)
500 char ralta_pl[MLXSW_REG_RALTA_LEN];
502 mlxsw_reg_ralta_pack(ralta_pl, true,
503 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
505 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
508 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
509 struct mlxsw_sp_lpm_tree *lpm_tree)
511 char ralta_pl[MLXSW_REG_RALTA_LEN];
513 mlxsw_reg_ralta_pack(ralta_pl, false,
514 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
516 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
520 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
521 struct mlxsw_sp_prefix_usage *prefix_usage,
522 struct mlxsw_sp_lpm_tree *lpm_tree)
524 char ralst_pl[MLXSW_REG_RALST_LEN];
527 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
529 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
532 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
533 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
536 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
537 MLXSW_REG_RALST_BIN_NO_CHILD);
538 last_prefix = prefix;
540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
543 static struct mlxsw_sp_lpm_tree *
544 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
545 struct mlxsw_sp_prefix_usage *prefix_usage,
546 enum mlxsw_sp_l3proto proto)
548 struct mlxsw_sp_lpm_tree *lpm_tree;
551 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
553 return ERR_PTR(-EBUSY);
554 lpm_tree->proto = proto;
555 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
559 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
562 goto err_left_struct_set;
563 memcpy(&lpm_tree->prefix_usage, prefix_usage,
564 sizeof(lpm_tree->prefix_usage));
568 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
572 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
573 struct mlxsw_sp_lpm_tree *lpm_tree)
575 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
578 static struct mlxsw_sp_lpm_tree *
579 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
580 struct mlxsw_sp_prefix_usage *prefix_usage,
581 enum mlxsw_sp_l3proto proto)
583 struct mlxsw_sp_lpm_tree *lpm_tree;
586 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
587 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
588 if (lpm_tree->ref_count != 0 &&
589 lpm_tree->proto == proto &&
590 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
594 lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
596 if (IS_ERR(lpm_tree))
600 lpm_tree->ref_count++;
604 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
605 struct mlxsw_sp_lpm_tree *lpm_tree)
607 if (--lpm_tree->ref_count == 0)
608 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
611 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
613 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
615 struct mlxsw_sp_lpm_tree *lpm_tree;
619 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
622 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
623 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
624 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
625 sizeof(struct mlxsw_sp_lpm_tree),
627 if (!mlxsw_sp->router->lpm.trees)
630 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
631 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
632 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
638 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
640 kfree(mlxsw_sp->router->lpm.trees);
643 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
645 return !!vr->fib4 || !!vr->fib6;
648 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
650 struct mlxsw_sp_vr *vr;
653 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
654 vr = &mlxsw_sp->router->vrs[i];
655 if (!mlxsw_sp_vr_is_used(vr))
661 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
662 const struct mlxsw_sp_fib *fib)
664 char raltb_pl[MLXSW_REG_RALTB_LEN];
666 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
667 (enum mlxsw_reg_ralxx_protocol) fib->proto,
669 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
672 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
673 const struct mlxsw_sp_fib *fib)
675 char raltb_pl[MLXSW_REG_RALTB_LEN];
677 /* Bind to tree 0 which is default */
678 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
679 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
680 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
683 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
685 /* For our purpose, squash main and local table into one */
686 if (tb_id == RT_TABLE_LOCAL)
687 tb_id = RT_TABLE_MAIN;
691 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
694 struct mlxsw_sp_vr *vr;
697 tb_id = mlxsw_sp_fix_tb_id(tb_id);
699 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
700 vr = &mlxsw_sp->router->vrs[i];
701 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
707 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
708 enum mlxsw_sp_l3proto proto)
711 case MLXSW_SP_L3_PROTO_IPV4:
713 case MLXSW_SP_L3_PROTO_IPV6:
719 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
722 struct mlxsw_sp_vr *vr;
725 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
727 return ERR_PTR(-EBUSY);
728 vr->fib4 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV4);
729 if (IS_ERR(vr->fib4))
730 return ERR_CAST(vr->fib4);
731 vr->fib6 = mlxsw_sp_fib_create(vr, MLXSW_SP_L3_PROTO_IPV6);
732 if (IS_ERR(vr->fib6)) {
733 err = PTR_ERR(vr->fib6);
734 goto err_fib6_create;
740 mlxsw_sp_fib_destroy(vr->fib4);
745 static void mlxsw_sp_vr_destroy(struct mlxsw_sp_vr *vr)
747 mlxsw_sp_fib_destroy(vr->fib6);
749 mlxsw_sp_fib_destroy(vr->fib4);
754 mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fib *fib,
755 struct mlxsw_sp_prefix_usage *req_prefix_usage)
757 struct mlxsw_sp_lpm_tree *lpm_tree = fib->lpm_tree;
758 struct mlxsw_sp_lpm_tree *new_tree;
761 if (mlxsw_sp_prefix_usage_eq(req_prefix_usage, &lpm_tree->prefix_usage))
764 new_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
766 if (IS_ERR(new_tree)) {
767 /* We failed to get a tree according to the required
768 * prefix usage. However, the current tree might be still good
769 * for us if our requirement is subset of the prefixes used
772 if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
773 &lpm_tree->prefix_usage))
775 return PTR_ERR(new_tree);
778 /* Prevent packet loss by overwriting existing binding */
779 fib->lpm_tree = new_tree;
780 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
783 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
788 fib->lpm_tree = lpm_tree;
789 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
793 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id)
795 struct mlxsw_sp_vr *vr;
797 tb_id = mlxsw_sp_fix_tb_id(tb_id);
798 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
800 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id);
804 static void mlxsw_sp_vr_put(struct mlxsw_sp_vr *vr)
806 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
807 list_empty(&vr->fib6->node_list))
808 mlxsw_sp_vr_destroy(vr);
811 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
813 struct mlxsw_sp_vr *vr;
817 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
820 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
821 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
823 if (!mlxsw_sp->router->vrs)
826 for (i = 0; i < max_vrs; i++) {
827 vr = &mlxsw_sp->router->vrs[i];
834 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
836 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
838 /* At this stage we're guaranteed not to have new incoming
839 * FIB notifications and the work queue is free from FIBs
840 * sitting on top of mlxsw netdevs. However, we can still
841 * have other FIBs queued. Flush the queue before flushing
842 * the device's tables. No need for locks, as we're the only
845 mlxsw_core_flush_owq();
846 mlxsw_sp_router_fib_flush(mlxsw_sp);
847 kfree(mlxsw_sp->router->vrs);
850 struct mlxsw_sp_neigh_key {
854 struct mlxsw_sp_neigh_entry {
855 struct list_head rif_list_node;
856 struct rhash_head ht_node;
857 struct mlxsw_sp_neigh_key key;
860 unsigned char ha[ETH_ALEN];
861 struct list_head nexthop_list; /* list of nexthops using
864 struct list_head nexthop_neighs_list_node;
867 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
868 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
869 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
870 .key_len = sizeof(struct mlxsw_sp_neigh_key),
873 static struct mlxsw_sp_neigh_entry *
874 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
877 struct mlxsw_sp_neigh_entry *neigh_entry;
879 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
883 neigh_entry->key.n = n;
884 neigh_entry->rif = rif;
885 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
890 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
896 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
897 struct mlxsw_sp_neigh_entry *neigh_entry)
899 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
900 &neigh_entry->ht_node,
901 mlxsw_sp_neigh_ht_params);
905 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
906 struct mlxsw_sp_neigh_entry *neigh_entry)
908 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
909 &neigh_entry->ht_node,
910 mlxsw_sp_neigh_ht_params);
913 static struct mlxsw_sp_neigh_entry *
914 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
916 struct mlxsw_sp_neigh_entry *neigh_entry;
917 struct mlxsw_sp_rif *rif;
920 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
922 return ERR_PTR(-EINVAL);
924 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
926 return ERR_PTR(-ENOMEM);
928 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
930 goto err_neigh_entry_insert;
932 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
936 err_neigh_entry_insert:
937 mlxsw_sp_neigh_entry_free(neigh_entry);
942 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
943 struct mlxsw_sp_neigh_entry *neigh_entry)
945 list_del(&neigh_entry->rif_list_node);
946 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
947 mlxsw_sp_neigh_entry_free(neigh_entry);
950 static struct mlxsw_sp_neigh_entry *
951 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
953 struct mlxsw_sp_neigh_key key;
956 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
957 &key, mlxsw_sp_neigh_ht_params);
961 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
963 unsigned long interval;
965 #if IS_ENABLED(CONFIG_IPV6)
966 interval = min_t(unsigned long,
967 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
968 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
970 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
972 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
975 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
979 struct net_device *dev;
985 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
987 if (!mlxsw_sp->router->rifs[rif]) {
988 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
993 dev = mlxsw_sp->router->rifs[rif]->dev;
994 n = neigh_lookup(&arp_tbl, &dipn, dev);
996 netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
1001 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
1002 neigh_event_send(n, NULL);
1006 #if IS_ENABLED(IPV6)
1007 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1011 struct net_device *dev;
1012 struct neighbour *n;
1013 struct in6_addr dip;
1016 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
1019 if (!mlxsw_sp->router->rifs[rif]) {
1020 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
1024 dev = mlxsw_sp->router->rifs[rif]->dev;
1025 n = neigh_lookup(&nd_tbl, &dip, dev);
1027 netdev_err(dev, "Failed to find matching neighbour for IP=%pI6c\n",
1032 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
1033 neigh_event_send(n, NULL);
1037 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1044 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
1051 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1053 /* Hardware starts counting at 0, so add 1. */
1056 /* Each record consists of several neighbour entries. */
1057 for (i = 0; i < num_entries; i++) {
1060 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
1061 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
1067 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
1071 /* One record contains one entry. */
1072 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
1076 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
1077 char *rauhtd_pl, int rec_index)
1079 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
1080 case MLXSW_REG_RAUHTD_TYPE_IPV4:
1081 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
1084 case MLXSW_REG_RAUHTD_TYPE_IPV6:
1085 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
1091 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
1093 u8 num_rec, last_rec_index, num_entries;
1095 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1096 last_rec_index = num_rec - 1;
1098 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
1100 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
1101 MLXSW_REG_RAUHTD_TYPE_IPV6)
1104 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
1106 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
1112 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
1114 enum mlxsw_reg_rauhtd_type type)
1119 /* Make sure the neighbour's netdev isn't removed in the
1124 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
1125 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
1128 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
1131 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
1132 for (i = 0; i < num_rec; i++)
1133 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
1135 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
1141 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
1143 enum mlxsw_reg_rauhtd_type type;
1147 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
1151 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
1152 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1156 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
1157 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
1163 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
1165 struct mlxsw_sp_neigh_entry *neigh_entry;
1167 /* Take RTNL mutex here to prevent lists from changes */
1169 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
1170 nexthop_neighs_list_node)
1171 /* If this neigh have nexthops, make the kernel think this neigh
1172 * is active regardless of the traffic.
1174 neigh_event_send(neigh_entry->key.n, NULL);
1179 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
1181 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
1183 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
1184 msecs_to_jiffies(interval));
1187 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
1189 struct mlxsw_sp_router *router;
1192 router = container_of(work, struct mlxsw_sp_router,
1193 neighs_update.dw.work);
1194 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
1196 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
1198 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
1200 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
1203 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
1205 struct mlxsw_sp_neigh_entry *neigh_entry;
1206 struct mlxsw_sp_router *router;
1208 router = container_of(work, struct mlxsw_sp_router,
1209 nexthop_probe_dw.work);
1210 /* Iterate over nexthop neighbours, find those who are unresolved and
1211 * send arp on them. This solves the chicken-egg problem when
1212 * the nexthop wouldn't get offloaded until the neighbor is resolved
1213 * but it wouldn't get resolved ever in case traffic is flowing in HW
1214 * using different nexthop.
1216 * Take RTNL mutex here to prevent lists from changes.
1219 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
1220 nexthop_neighs_list_node)
1221 if (!neigh_entry->connected)
1222 neigh_event_send(neigh_entry->key.n, NULL);
1225 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
1226 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
1230 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1231 struct mlxsw_sp_neigh_entry *neigh_entry,
1234 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
1236 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
1237 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
1241 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
1242 struct mlxsw_sp_neigh_entry *neigh_entry,
1243 enum mlxsw_reg_rauht_op op)
1245 struct neighbour *n = neigh_entry->key.n;
1246 u32 dip = ntohl(*((__be32 *) n->primary_key));
1247 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1249 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1251 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1255 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
1256 struct mlxsw_sp_neigh_entry *neigh_entry,
1257 enum mlxsw_reg_rauht_op op)
1259 struct neighbour *n = neigh_entry->key.n;
1260 char rauht_pl[MLXSW_REG_RAUHT_LEN];
1261 const char *dip = n->primary_key;
1263 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
1265 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
1268 static bool mlxsw_sp_neigh_ipv6_ignore(struct neighbour *n)
1270 /* Packets with a link-local destination address are trapped
1271 * after LPM lookup and never reach the neighbour table, so
1272 * there is no need to program such neighbours to the device.
1274 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
1275 IPV6_ADDR_LINKLOCAL)
1281 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
1282 struct mlxsw_sp_neigh_entry *neigh_entry,
1285 if (!adding && !neigh_entry->connected)
1287 neigh_entry->connected = adding;
1288 if (neigh_entry->key.n->tbl->family == AF_INET) {
1289 mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
1290 mlxsw_sp_rauht_op(adding));
1291 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
1292 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry->key.n))
1294 mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
1295 mlxsw_sp_rauht_op(adding));
1301 struct mlxsw_sp_neigh_event_work {
1302 struct work_struct work;
1303 struct mlxsw_sp *mlxsw_sp;
1304 struct neighbour *n;
1307 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
1309 struct mlxsw_sp_neigh_event_work *neigh_work =
1310 container_of(work, struct mlxsw_sp_neigh_event_work, work);
1311 struct mlxsw_sp *mlxsw_sp = neigh_work->mlxsw_sp;
1312 struct mlxsw_sp_neigh_entry *neigh_entry;
1313 struct neighbour *n = neigh_work->n;
1314 unsigned char ha[ETH_ALEN];
1315 bool entry_connected;
1318 /* If these parameters are changed after we release the lock,
1319 * then we are guaranteed to receive another event letting us
1322 read_lock_bh(&n->lock);
1323 memcpy(ha, n->ha, ETH_ALEN);
1324 nud_state = n->nud_state;
1326 read_unlock_bh(&n->lock);
1329 entry_connected = nud_state & NUD_VALID && !dead;
1330 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1331 if (!entry_connected && !neigh_entry)
1334 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1335 if (IS_ERR(neigh_entry))
1339 memcpy(neigh_entry->ha, ha, ETH_ALEN);
1340 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
1341 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected);
1343 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1344 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1352 int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
1353 unsigned long event, void *ptr)
1355 struct mlxsw_sp_neigh_event_work *neigh_work;
1356 struct mlxsw_sp_port *mlxsw_sp_port;
1357 struct mlxsw_sp *mlxsw_sp;
1358 unsigned long interval;
1359 struct neigh_parms *p;
1360 struct neighbour *n;
1363 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
1366 /* We don't care about changes in the default table. */
1367 if (!p->dev || (p->tbl->family != AF_INET &&
1368 p->tbl->family != AF_INET6))
1371 /* We are in atomic context and can't take RTNL mutex,
1372 * so use RCU variant to walk the device chain.
1374 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
1378 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1379 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
1380 mlxsw_sp->router->neighs_update.interval = interval;
1382 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1384 case NETEVENT_NEIGH_UPDATE:
1387 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
1390 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
1394 neigh_work = kzalloc(sizeof(*neigh_work), GFP_ATOMIC);
1396 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1400 INIT_WORK(&neigh_work->work, mlxsw_sp_router_neigh_event_work);
1401 neigh_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1404 /* Take a reference to ensure the neighbour won't be
1405 * destructed until we drop the reference in delayed
1409 mlxsw_core_schedule_work(&neigh_work->work);
1410 mlxsw_sp_port_dev_put(mlxsw_sp_port);
1417 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
1421 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
1422 &mlxsw_sp_neigh_ht_params);
1426 /* Initialize the polling interval according to the default
1429 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
1431 /* Create the delayed works for the activity_update */
1432 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
1433 mlxsw_sp_router_neighs_update_work);
1434 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
1435 mlxsw_sp_router_probe_unresolved_nexthops);
1436 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
1437 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
1441 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
1443 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
1444 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
1445 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
1448 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
1449 struct mlxsw_sp_rif *rif)
1451 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
1453 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
1455 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
1456 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1460 struct mlxsw_sp_nexthop_key {
1461 struct fib_nh *fib_nh;
1464 struct mlxsw_sp_nexthop {
1465 struct list_head neigh_list_node; /* member of neigh entry list */
1466 struct list_head rif_list_node;
1467 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
1470 struct rhash_head ht_node;
1471 struct mlxsw_sp_nexthop_key key;
1472 unsigned char gw_addr[sizeof(struct in6_addr)];
1473 struct mlxsw_sp_rif *rif;
1474 u8 should_offload:1, /* set indicates this neigh is connected and
1475 * should be put to KVD linear area of this group.
1477 offloaded:1, /* set in case the neigh is actually put into
1478 * KVD linear area of this group.
1480 update:1; /* set indicates that MAC of this neigh should be
1483 struct mlxsw_sp_neigh_entry *neigh_entry;
1486 struct mlxsw_sp_nexthop_group_key {
1487 struct fib_info *fi;
1490 struct mlxsw_sp_nexthop_group {
1491 struct rhash_head ht_node;
1492 struct list_head fib_list; /* list of fib entries that use this group */
1493 struct neigh_table *neigh_tbl;
1494 struct mlxsw_sp_nexthop_group_key key;
1495 u8 adj_index_valid:1,
1496 gateway:1; /* routes using the group use a gateway */
1500 struct mlxsw_sp_nexthop nexthops[0];
1501 #define nh_rif nexthops[0].rif
1504 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
1505 .key_offset = offsetof(struct mlxsw_sp_nexthop_group, key),
1506 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
1507 .key_len = sizeof(struct mlxsw_sp_nexthop_group_key),
1510 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
1511 struct mlxsw_sp_nexthop_group *nh_grp)
1513 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
1515 mlxsw_sp_nexthop_group_ht_params);
1518 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
1519 struct mlxsw_sp_nexthop_group *nh_grp)
1521 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
1523 mlxsw_sp_nexthop_group_ht_params);
1526 static struct mlxsw_sp_nexthop_group *
1527 mlxsw_sp_nexthop_group_lookup(struct mlxsw_sp *mlxsw_sp,
1528 struct mlxsw_sp_nexthop_group_key key)
1530 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht, &key,
1531 mlxsw_sp_nexthop_group_ht_params);
1534 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
1535 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
1536 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
1537 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
1540 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
1541 struct mlxsw_sp_nexthop *nh)
1543 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
1544 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
1547 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
1548 struct mlxsw_sp_nexthop *nh)
1550 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
1551 mlxsw_sp_nexthop_ht_params);
1554 static struct mlxsw_sp_nexthop *
1555 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
1556 struct mlxsw_sp_nexthop_key key)
1558 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
1559 mlxsw_sp_nexthop_ht_params);
1562 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
1563 const struct mlxsw_sp_fib *fib,
1564 u32 adj_index, u16 ecmp_size,
1568 char raleu_pl[MLXSW_REG_RALEU_LEN];
1570 mlxsw_reg_raleu_pack(raleu_pl,
1571 (enum mlxsw_reg_ralxx_protocol) fib->proto,
1572 fib->vr->id, adj_index, ecmp_size, new_adj_index,
1574 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
1577 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
1578 struct mlxsw_sp_nexthop_group *nh_grp,
1579 u32 old_adj_index, u16 old_ecmp_size)
1581 struct mlxsw_sp_fib_entry *fib_entry;
1582 struct mlxsw_sp_fib *fib = NULL;
1585 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1586 if (fib == fib_entry->fib_node->fib)
1588 fib = fib_entry->fib_node->fib;
1589 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
1600 static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
1601 struct mlxsw_sp_nexthop *nh)
1603 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1604 char ratr_pl[MLXSW_REG_RATR_LEN];
1606 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
1607 true, adj_index, neigh_entry->rif);
1608 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
1609 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
1613 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
1614 struct mlxsw_sp_nexthop_group *nh_grp,
1617 u32 adj_index = nh_grp->adj_index; /* base */
1618 struct mlxsw_sp_nexthop *nh;
1622 for (i = 0; i < nh_grp->count; i++) {
1623 nh = &nh_grp->nexthops[i];
1625 if (!nh->should_offload) {
1630 if (nh->update || reallocate) {
1631 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
1643 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1644 struct mlxsw_sp_fib_entry *fib_entry);
1647 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
1648 const struct mlxsw_sp_fib_entry *fib_entry);
1651 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
1652 struct mlxsw_sp_nexthop_group *nh_grp)
1654 struct mlxsw_sp_fib_entry *fib_entry;
1657 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1658 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1661 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1669 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
1670 enum mlxsw_reg_ralue_op op, int err);
1673 mlxsw_sp_nexthop_fib_entries_refresh(struct mlxsw_sp_nexthop_group *nh_grp)
1675 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
1676 struct mlxsw_sp_fib_entry *fib_entry;
1678 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
1679 if (!mlxsw_sp_fib_node_entry_is_first(fib_entry->fib_node,
1682 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
1687 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
1688 struct mlxsw_sp_nexthop_group *nh_grp)
1690 struct mlxsw_sp_nexthop *nh;
1691 bool offload_change = false;
1694 bool old_adj_index_valid;
1700 if (!nh_grp->gateway) {
1701 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1705 for (i = 0; i < nh_grp->count; i++) {
1706 nh = &nh_grp->nexthops[i];
1708 if (nh->should_offload != nh->offloaded) {
1709 offload_change = true;
1710 if (nh->should_offload)
1713 if (nh->should_offload)
1716 if (!offload_change) {
1717 /* Nothing was added or removed, so no need to reallocate. Just
1718 * update MAC on existing adjacency indexes.
1720 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
1723 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1729 /* No neigh of this group is connected so we just set
1730 * the trap and let everthing flow through kernel.
1734 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size, &adj_index);
1736 /* We ran out of KVD linear space, just set the
1737 * trap and let everything flow through kernel.
1739 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
1742 old_adj_index_valid = nh_grp->adj_index_valid;
1743 old_adj_index = nh_grp->adj_index;
1744 old_ecmp_size = nh_grp->ecmp_size;
1745 nh_grp->adj_index_valid = 1;
1746 nh_grp->adj_index = adj_index;
1747 nh_grp->ecmp_size = ecmp_size;
1748 err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
1750 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
1754 if (!old_adj_index_valid) {
1755 /* The trap was set for fib entries, so we have to call
1756 * fib entry update to unset it and use adjacency index.
1758 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1760 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
1766 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
1767 old_adj_index, old_ecmp_size);
1768 mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
1770 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
1774 /* Offload state within the group changed, so update the flags. */
1775 mlxsw_sp_nexthop_fib_entries_refresh(nh_grp);
1780 old_adj_index_valid = nh_grp->adj_index_valid;
1781 nh_grp->adj_index_valid = 0;
1782 for (i = 0; i < nh_grp->count; i++) {
1783 nh = &nh_grp->nexthops[i];
1786 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
1788 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
1789 if (old_adj_index_valid)
1790 mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
1793 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
1797 nh->should_offload = 1;
1798 else if (nh->offloaded)
1799 nh->should_offload = 0;
1804 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
1805 struct mlxsw_sp_neigh_entry *neigh_entry,
1808 struct mlxsw_sp_nexthop *nh;
1810 list_for_each_entry(nh, &neigh_entry->nexthop_list,
1812 __mlxsw_sp_nexthop_neigh_update(nh, removing);
1813 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
1817 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
1818 struct mlxsw_sp_rif *rif)
1824 list_add(&nh->rif_list_node, &rif->nexthop_list);
1827 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
1832 list_del(&nh->rif_list_node);
1836 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
1837 struct mlxsw_sp_nexthop *nh)
1839 struct mlxsw_sp_neigh_entry *neigh_entry;
1840 struct neighbour *n;
1844 if (!nh->nh_grp->gateway || nh->neigh_entry)
1847 /* Take a reference of neigh here ensuring that neigh would
1848 * not be destructed before the nexthop entry is finished.
1849 * The reference is taken either in neigh_lookup() or
1850 * in neigh_create() in case n is not found.
1852 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
1854 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
1858 neigh_event_send(n, NULL);
1860 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
1862 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
1863 if (IS_ERR(neigh_entry)) {
1865 goto err_neigh_entry_create;
1869 /* If that is the first nexthop connected to that neigh, add to
1870 * nexthop_neighs_list
1872 if (list_empty(&neigh_entry->nexthop_list))
1873 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
1874 &mlxsw_sp->router->nexthop_neighs_list);
1876 nh->neigh_entry = neigh_entry;
1877 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
1878 read_lock_bh(&n->lock);
1879 nud_state = n->nud_state;
1881 read_unlock_bh(&n->lock);
1882 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
1886 err_neigh_entry_create:
1891 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
1892 struct mlxsw_sp_nexthop *nh)
1894 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
1895 struct neighbour *n;
1899 n = neigh_entry->key.n;
1901 __mlxsw_sp_nexthop_neigh_update(nh, true);
1902 list_del(&nh->neigh_list_node);
1903 nh->neigh_entry = NULL;
1905 /* If that is the last nexthop connected to that neigh, remove from
1906 * nexthop_neighs_list
1908 if (list_empty(&neigh_entry->nexthop_list))
1909 list_del(&neigh_entry->nexthop_neighs_list_node);
1911 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
1912 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
1917 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
1918 struct mlxsw_sp_nexthop_group *nh_grp,
1919 struct mlxsw_sp_nexthop *nh,
1920 struct fib_nh *fib_nh)
1922 struct net_device *dev = fib_nh->nh_dev;
1923 struct in_device *in_dev;
1924 struct mlxsw_sp_rif *rif;
1927 nh->nh_grp = nh_grp;
1928 nh->key.fib_nh = fib_nh;
1929 memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
1930 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
1937 in_dev = __in_dev_get_rtnl(dev);
1938 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
1939 fib_nh->nh_flags & RTNH_F_LINKDOWN)
1942 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1945 mlxsw_sp_nexthop_rif_init(nh, rif);
1947 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1949 goto err_nexthop_neigh_init;
1953 err_nexthop_neigh_init:
1954 mlxsw_sp_nexthop_rif_fini(nh);
1955 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1959 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
1960 struct mlxsw_sp_nexthop *nh)
1962 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1963 mlxsw_sp_nexthop_rif_fini(nh);
1964 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
1967 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
1968 unsigned long event, struct fib_nh *fib_nh)
1970 struct mlxsw_sp_nexthop_key key;
1971 struct mlxsw_sp_nexthop *nh;
1972 struct mlxsw_sp_rif *rif;
1974 if (mlxsw_sp->router->aborted)
1977 key.fib_nh = fib_nh;
1978 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
1979 if (WARN_ON_ONCE(!nh))
1982 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fib_nh->nh_dev);
1987 case FIB_EVENT_NH_ADD:
1988 mlxsw_sp_nexthop_rif_init(nh, rif);
1989 mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
1991 case FIB_EVENT_NH_DEL:
1992 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
1993 mlxsw_sp_nexthop_rif_fini(nh);
1997 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2000 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2001 struct mlxsw_sp_rif *rif)
2003 struct mlxsw_sp_nexthop *nh, *tmp;
2005 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
2006 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
2007 mlxsw_sp_nexthop_rif_fini(nh);
2008 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
2012 static struct mlxsw_sp_nexthop_group *
2013 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
2015 struct mlxsw_sp_nexthop_group *nh_grp;
2016 struct mlxsw_sp_nexthop *nh;
2017 struct fib_nh *fib_nh;
2022 alloc_size = sizeof(*nh_grp) +
2023 fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
2024 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
2026 return ERR_PTR(-ENOMEM);
2027 INIT_LIST_HEAD(&nh_grp->fib_list);
2028 nh_grp->neigh_tbl = &arp_tbl;
2030 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK;
2031 nh_grp->count = fi->fib_nhs;
2032 nh_grp->key.fi = fi;
2034 for (i = 0; i < nh_grp->count; i++) {
2035 nh = &nh_grp->nexthops[i];
2036 fib_nh = &fi->fib_nh[i];
2037 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
2039 goto err_nexthop4_init;
2041 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
2043 goto err_nexthop_group_insert;
2044 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2047 err_nexthop_group_insert:
2049 for (i--; i >= 0; i--) {
2050 nh = &nh_grp->nexthops[i];
2051 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2053 fib_info_put(nh_grp->key.fi);
2055 return ERR_PTR(err);
2059 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
2060 struct mlxsw_sp_nexthop_group *nh_grp)
2062 struct mlxsw_sp_nexthop *nh;
2065 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
2066 for (i = 0; i < nh_grp->count; i++) {
2067 nh = &nh_grp->nexthops[i];
2068 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
2070 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
2071 WARN_ON_ONCE(nh_grp->adj_index_valid);
2072 fib_info_put(nh_grp->key.fi);
2076 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
2077 struct mlxsw_sp_fib_entry *fib_entry,
2078 struct fib_info *fi)
2080 struct mlxsw_sp_nexthop_group_key key;
2081 struct mlxsw_sp_nexthop_group *nh_grp;
2084 nh_grp = mlxsw_sp_nexthop_group_lookup(mlxsw_sp, key);
2086 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
2088 return PTR_ERR(nh_grp);
2090 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
2091 fib_entry->nh_group = nh_grp;
2095 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
2096 struct mlxsw_sp_fib_entry *fib_entry)
2098 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2100 list_del(&fib_entry->nexthop_group_node);
2101 if (!list_empty(&nh_grp->fib_list))
2103 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
2107 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2109 struct mlxsw_sp_fib4_entry *fib4_entry;
2111 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
2113 return !fib4_entry->tos;
2117 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
2119 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
2121 switch (fib_entry->fib_node->fib->proto) {
2122 case MLXSW_SP_L3_PROTO_IPV4:
2123 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
2126 case MLXSW_SP_L3_PROTO_IPV6:
2130 switch (fib_entry->type) {
2131 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2132 return !!nh_group->adj_index_valid;
2133 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2134 return !!nh_group->nh_rif;
2140 static struct mlxsw_sp_nexthop *
2141 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
2142 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
2146 for (i = 0; i < nh_grp->count; i++) {
2147 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2148 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2150 if (nh->rif && nh->rif->dev == rt->dst.dev &&
2151 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
2161 mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2163 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2166 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2167 nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2171 for (i = 0; i < nh_grp->count; i++) {
2172 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2175 nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
2177 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2182 mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2184 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2187 for (i = 0; i < nh_grp->count; i++) {
2188 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
2190 nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
2195 mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2197 struct mlxsw_sp_fib6_entry *fib6_entry;
2198 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2200 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2203 if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL) {
2204 list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
2205 list)->rt->rt6i_flags |= RTF_OFFLOAD;
2209 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2210 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
2211 struct mlxsw_sp_nexthop *nh;
2213 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
2214 if (nh && nh->offloaded)
2215 mlxsw_sp_rt6->rt->rt6i_flags |= RTF_OFFLOAD;
2217 mlxsw_sp_rt6->rt->rt6i_flags &= ~RTF_OFFLOAD;
2222 mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2224 struct mlxsw_sp_fib6_entry *fib6_entry;
2225 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2227 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
2229 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
2230 struct rt6_info *rt = mlxsw_sp_rt6->rt;
2232 rt->rt6i_flags &= ~RTF_OFFLOAD;
2236 static void mlxsw_sp_fib_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
2238 switch (fib_entry->fib_node->fib->proto) {
2239 case MLXSW_SP_L3_PROTO_IPV4:
2240 mlxsw_sp_fib4_entry_offload_set(fib_entry);
2242 case MLXSW_SP_L3_PROTO_IPV6:
2243 mlxsw_sp_fib6_entry_offload_set(fib_entry);
2249 mlxsw_sp_fib_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
2251 switch (fib_entry->fib_node->fib->proto) {
2252 case MLXSW_SP_L3_PROTO_IPV4:
2253 mlxsw_sp_fib4_entry_offload_unset(fib_entry);
2255 case MLXSW_SP_L3_PROTO_IPV6:
2256 mlxsw_sp_fib6_entry_offload_unset(fib_entry);
2262 mlxsw_sp_fib_entry_offload_refresh(struct mlxsw_sp_fib_entry *fib_entry,
2263 enum mlxsw_reg_ralue_op op, int err)
2266 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
2267 return mlxsw_sp_fib_entry_offload_unset(fib_entry);
2268 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
2271 if (mlxsw_sp_fib_entry_should_offload(fib_entry))
2272 mlxsw_sp_fib_entry_offload_set(fib_entry);
2273 else if (!mlxsw_sp_fib_entry_should_offload(fib_entry))
2274 mlxsw_sp_fib_entry_offload_unset(fib_entry);
2282 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
2283 const struct mlxsw_sp_fib_entry *fib_entry,
2284 enum mlxsw_reg_ralue_op op)
2286 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
2287 enum mlxsw_reg_ralxx_protocol proto;
2290 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
2292 switch (fib->proto) {
2293 case MLXSW_SP_L3_PROTO_IPV4:
2294 p_dip = (u32 *) fib_entry->fib_node->key.addr;
2295 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
2296 fib_entry->fib_node->key.prefix_len,
2299 case MLXSW_SP_L3_PROTO_IPV6:
2300 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
2301 fib_entry->fib_node->key.prefix_len,
2302 fib_entry->fib_node->key.addr);
2307 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
2308 struct mlxsw_sp_fib_entry *fib_entry,
2309 enum mlxsw_reg_ralue_op op)
2311 char ralue_pl[MLXSW_REG_RALUE_LEN];
2312 enum mlxsw_reg_ralue_trap_action trap_action;
2314 u32 adjacency_index = 0;
2317 /* In case the nexthop group adjacency index is valid, use it
2318 * with provided ECMP size. Otherwise, setup trap and pass
2319 * traffic to kernel.
2321 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2322 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2323 adjacency_index = fib_entry->nh_group->adj_index;
2324 ecmp_size = fib_entry->nh_group->ecmp_size;
2326 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2327 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2330 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2331 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
2332 adjacency_index, ecmp_size);
2333 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2336 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
2337 struct mlxsw_sp_fib_entry *fib_entry,
2338 enum mlxsw_reg_ralue_op op)
2340 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
2341 enum mlxsw_reg_ralue_trap_action trap_action;
2342 char ralue_pl[MLXSW_REG_RALUE_LEN];
2346 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
2347 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
2348 rif_index = rif->rif_index;
2350 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
2351 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
2354 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2355 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
2357 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2360 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
2361 struct mlxsw_sp_fib_entry *fib_entry,
2362 enum mlxsw_reg_ralue_op op)
2364 char ralue_pl[MLXSW_REG_RALUE_LEN];
2366 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
2367 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
2368 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
2371 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2372 struct mlxsw_sp_fib_entry *fib_entry,
2373 enum mlxsw_reg_ralue_op op)
2375 switch (fib_entry->type) {
2376 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
2377 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
2378 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
2379 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
2380 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
2381 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
2386 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
2387 struct mlxsw_sp_fib_entry *fib_entry,
2388 enum mlxsw_reg_ralue_op op)
2390 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
2392 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, err);
2397 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
2398 struct mlxsw_sp_fib_entry *fib_entry)
2400 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2401 MLXSW_REG_RALUE_OP_WRITE_WRITE);
2404 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
2405 struct mlxsw_sp_fib_entry *fib_entry)
2407 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
2408 MLXSW_REG_RALUE_OP_WRITE_DELETE);
2412 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
2413 const struct fib_entry_notifier_info *fen_info,
2414 struct mlxsw_sp_fib_entry *fib_entry)
2416 struct fib_info *fi = fen_info->fi;
2418 switch (fen_info->type) {
2419 case RTN_BROADCAST: /* fall through */
2421 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2423 case RTN_UNREACHABLE: /* fall through */
2424 case RTN_BLACKHOLE: /* fall through */
2426 /* Packets hitting these routes need to be trapped, but
2427 * can do so with a lower priority than packets directed
2428 * at the host, so use action type local instead of trap.
2430 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2433 if (fi->fib_nh->nh_scope != RT_SCOPE_LINK)
2434 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
2436 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
2443 static struct mlxsw_sp_fib4_entry *
2444 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
2445 struct mlxsw_sp_fib_node *fib_node,
2446 const struct fib_entry_notifier_info *fen_info)
2448 struct mlxsw_sp_fib4_entry *fib4_entry;
2449 struct mlxsw_sp_fib_entry *fib_entry;
2452 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
2454 return ERR_PTR(-ENOMEM);
2455 fib_entry = &fib4_entry->common;
2457 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
2459 goto err_fib4_entry_type_set;
2461 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
2463 goto err_nexthop4_group_get;
2465 fib4_entry->prio = fen_info->fi->fib_priority;
2466 fib4_entry->tb_id = fen_info->tb_id;
2467 fib4_entry->type = fen_info->type;
2468 fib4_entry->tos = fen_info->tos;
2470 fib_entry->fib_node = fib_node;
2474 err_nexthop4_group_get:
2475 err_fib4_entry_type_set:
2477 return ERR_PTR(err);
2480 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2481 struct mlxsw_sp_fib4_entry *fib4_entry)
2483 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
2487 static struct mlxsw_sp_fib_node *
2488 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2489 size_t addr_len, unsigned char prefix_len);
2491 static struct mlxsw_sp_fib4_entry *
2492 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
2493 const struct fib_entry_notifier_info *fen_info)
2495 struct mlxsw_sp_fib4_entry *fib4_entry;
2496 struct mlxsw_sp_fib_node *fib_node;
2497 struct mlxsw_sp_fib *fib;
2498 struct mlxsw_sp_vr *vr;
2500 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
2503 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
2505 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
2506 sizeof(fen_info->dst),
2511 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2512 if (fib4_entry->tb_id == fen_info->tb_id &&
2513 fib4_entry->tos == fen_info->tos &&
2514 fib4_entry->type == fen_info->type &&
2515 fib4_entry->common.nh_group->key.fi == fen_info->fi) {
2523 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
2524 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
2525 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
2526 .key_len = sizeof(struct mlxsw_sp_fib_key),
2527 .automatic_shrinking = true,
2530 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
2531 struct mlxsw_sp_fib_node *fib_node)
2533 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
2534 mlxsw_sp_fib_ht_params);
2537 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
2538 struct mlxsw_sp_fib_node *fib_node)
2540 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
2541 mlxsw_sp_fib_ht_params);
2544 static struct mlxsw_sp_fib_node *
2545 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
2546 size_t addr_len, unsigned char prefix_len)
2548 struct mlxsw_sp_fib_key key;
2550 memset(&key, 0, sizeof(key));
2551 memcpy(key.addr, addr, addr_len);
2552 key.prefix_len = prefix_len;
2553 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
2556 static struct mlxsw_sp_fib_node *
2557 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
2558 size_t addr_len, unsigned char prefix_len)
2560 struct mlxsw_sp_fib_node *fib_node;
2562 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
2566 INIT_LIST_HEAD(&fib_node->entry_list);
2567 list_add(&fib_node->list, &fib->node_list);
2568 memcpy(fib_node->key.addr, addr, addr_len);
2569 fib_node->key.prefix_len = prefix_len;
2574 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
2576 list_del(&fib_node->list);
2577 WARN_ON(!list_empty(&fib_node->entry_list));
2582 mlxsw_sp_fib_node_entry_is_first(const struct mlxsw_sp_fib_node *fib_node,
2583 const struct mlxsw_sp_fib_entry *fib_entry)
2585 return list_first_entry(&fib_node->entry_list,
2586 struct mlxsw_sp_fib_entry, list) == fib_entry;
2589 static void mlxsw_sp_fib_node_prefix_inc(struct mlxsw_sp_fib_node *fib_node)
2591 unsigned char prefix_len = fib_node->key.prefix_len;
2592 struct mlxsw_sp_fib *fib = fib_node->fib;
2594 if (fib->prefix_ref_count[prefix_len]++ == 0)
2595 mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
2598 static void mlxsw_sp_fib_node_prefix_dec(struct mlxsw_sp_fib_node *fib_node)
2600 unsigned char prefix_len = fib_node->key.prefix_len;
2601 struct mlxsw_sp_fib *fib = fib_node->fib;
2603 if (--fib->prefix_ref_count[prefix_len] == 0)
2604 mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
2607 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
2608 struct mlxsw_sp_fib_node *fib_node,
2609 struct mlxsw_sp_fib *fib)
2611 struct mlxsw_sp_prefix_usage req_prefix_usage;
2612 struct mlxsw_sp_lpm_tree *lpm_tree;
2615 err = mlxsw_sp_fib_node_insert(fib, fib_node);
2618 fib_node->fib = fib;
2620 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &fib->prefix_usage);
2621 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
2623 if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2624 err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib,
2627 goto err_tree_check;
2629 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
2631 if (IS_ERR(lpm_tree))
2632 return PTR_ERR(lpm_tree);
2633 fib->lpm_tree = lpm_tree;
2634 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib);
2639 mlxsw_sp_fib_node_prefix_inc(fib_node);
2644 fib->lpm_tree = NULL;
2645 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2647 fib_node->fib = NULL;
2648 mlxsw_sp_fib_node_remove(fib, fib_node);
2652 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
2653 struct mlxsw_sp_fib_node *fib_node)
2655 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
2656 struct mlxsw_sp_fib *fib = fib_node->fib;
2658 mlxsw_sp_fib_node_prefix_dec(fib_node);
2660 if (mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) {
2661 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
2662 fib->lpm_tree = NULL;
2663 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
2665 mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, fib, &fib->prefix_usage);
2668 fib_node->fib = NULL;
2669 mlxsw_sp_fib_node_remove(fib, fib_node);
2672 static struct mlxsw_sp_fib_node *
2673 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
2674 size_t addr_len, unsigned char prefix_len,
2675 enum mlxsw_sp_l3proto proto)
2677 struct mlxsw_sp_fib_node *fib_node;
2678 struct mlxsw_sp_fib *fib;
2679 struct mlxsw_sp_vr *vr;
2682 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id);
2684 return ERR_CAST(vr);
2685 fib = mlxsw_sp_vr_fib(vr, proto);
2687 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
2691 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
2694 goto err_fib_node_create;
2697 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
2699 goto err_fib_node_init;
2704 mlxsw_sp_fib_node_destroy(fib_node);
2705 err_fib_node_create:
2706 mlxsw_sp_vr_put(vr);
2707 return ERR_PTR(err);
2710 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
2711 struct mlxsw_sp_fib_node *fib_node)
2713 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
2715 if (!list_empty(&fib_node->entry_list))
2717 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
2718 mlxsw_sp_fib_node_destroy(fib_node);
2719 mlxsw_sp_vr_put(vr);
2722 static struct mlxsw_sp_fib4_entry *
2723 mlxsw_sp_fib4_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
2724 const struct mlxsw_sp_fib4_entry *new4_entry)
2726 struct mlxsw_sp_fib4_entry *fib4_entry;
2728 list_for_each_entry(fib4_entry, &fib_node->entry_list, common.list) {
2729 if (fib4_entry->tb_id > new4_entry->tb_id)
2731 if (fib4_entry->tb_id != new4_entry->tb_id)
2733 if (fib4_entry->tos > new4_entry->tos)
2735 if (fib4_entry->prio >= new4_entry->prio ||
2736 fib4_entry->tos < new4_entry->tos)
2744 mlxsw_sp_fib4_node_list_append(struct mlxsw_sp_fib4_entry *fib4_entry,
2745 struct mlxsw_sp_fib4_entry *new4_entry)
2747 struct mlxsw_sp_fib_node *fib_node;
2749 if (WARN_ON(!fib4_entry))
2752 fib_node = fib4_entry->common.fib_node;
2753 list_for_each_entry_from(fib4_entry, &fib_node->entry_list,
2755 if (fib4_entry->tb_id != new4_entry->tb_id ||
2756 fib4_entry->tos != new4_entry->tos ||
2757 fib4_entry->prio != new4_entry->prio)
2761 list_add_tail(&new4_entry->common.list, &fib4_entry->common.list);
2766 mlxsw_sp_fib4_node_list_insert(struct mlxsw_sp_fib4_entry *new4_entry,
2767 bool replace, bool append)
2769 struct mlxsw_sp_fib_node *fib_node = new4_entry->common.fib_node;
2770 struct mlxsw_sp_fib4_entry *fib4_entry;
2772 fib4_entry = mlxsw_sp_fib4_node_entry_find(fib_node, new4_entry);
2775 return mlxsw_sp_fib4_node_list_append(fib4_entry, new4_entry);
2776 if (replace && WARN_ON(!fib4_entry))
2779 /* Insert new entry before replaced one, so that we can later
2780 * remove the second.
2783 list_add_tail(&new4_entry->common.list,
2784 &fib4_entry->common.list);
2786 struct mlxsw_sp_fib4_entry *last;
2788 list_for_each_entry(last, &fib_node->entry_list, common.list) {
2789 if (new4_entry->tb_id > last->tb_id)
2795 list_add(&new4_entry->common.list,
2796 &fib4_entry->common.list);
2798 list_add(&new4_entry->common.list,
2799 &fib_node->entry_list);
2806 mlxsw_sp_fib4_node_list_remove(struct mlxsw_sp_fib4_entry *fib4_entry)
2808 list_del(&fib4_entry->common.list);
2811 static int mlxsw_sp_fib_node_entry_add(struct mlxsw_sp *mlxsw_sp,
2812 struct mlxsw_sp_fib_entry *fib_entry)
2814 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2816 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2819 /* To prevent packet loss, overwrite the previously offloaded
2822 if (!list_is_singular(&fib_node->entry_list)) {
2823 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2824 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2826 mlxsw_sp_fib_entry_offload_refresh(n, op, 0);
2829 return mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2832 static void mlxsw_sp_fib_node_entry_del(struct mlxsw_sp *mlxsw_sp,
2833 struct mlxsw_sp_fib_entry *fib_entry)
2835 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
2837 if (!mlxsw_sp_fib_node_entry_is_first(fib_node, fib_entry))
2840 /* Promote the next entry by overwriting the deleted entry */
2841 if (!list_is_singular(&fib_node->entry_list)) {
2842 struct mlxsw_sp_fib_entry *n = list_next_entry(fib_entry, list);
2843 enum mlxsw_reg_ralue_op op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
2845 mlxsw_sp_fib_entry_update(mlxsw_sp, n);
2846 mlxsw_sp_fib_entry_offload_refresh(fib_entry, op, 0);
2850 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
2853 static int mlxsw_sp_fib4_node_entry_link(struct mlxsw_sp *mlxsw_sp,
2854 struct mlxsw_sp_fib4_entry *fib4_entry,
2855 bool replace, bool append)
2859 err = mlxsw_sp_fib4_node_list_insert(fib4_entry, replace, append);
2863 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib4_entry->common);
2865 goto err_fib_node_entry_add;
2869 err_fib_node_entry_add:
2870 mlxsw_sp_fib4_node_list_remove(fib4_entry);
2875 mlxsw_sp_fib4_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
2876 struct mlxsw_sp_fib4_entry *fib4_entry)
2878 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib4_entry->common);
2879 mlxsw_sp_fib4_node_list_remove(fib4_entry);
2882 static void mlxsw_sp_fib4_entry_replace(struct mlxsw_sp *mlxsw_sp,
2883 struct mlxsw_sp_fib4_entry *fib4_entry,
2886 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
2887 struct mlxsw_sp_fib4_entry *replaced;
2892 /* We inserted the new entry before replaced one */
2893 replaced = list_next_entry(fib4_entry, common.list);
2895 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, replaced);
2896 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, replaced);
2897 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2901 mlxsw_sp_router_fib4_add(struct mlxsw_sp *mlxsw_sp,
2902 const struct fib_entry_notifier_info *fen_info,
2903 bool replace, bool append)
2905 struct mlxsw_sp_fib4_entry *fib4_entry;
2906 struct mlxsw_sp_fib_node *fib_node;
2909 if (mlxsw_sp->router->aborted)
2912 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
2913 &fen_info->dst, sizeof(fen_info->dst),
2915 MLXSW_SP_L3_PROTO_IPV4);
2916 if (IS_ERR(fib_node)) {
2917 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
2918 return PTR_ERR(fib_node);
2921 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
2922 if (IS_ERR(fib4_entry)) {
2923 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
2924 err = PTR_ERR(fib4_entry);
2925 goto err_fib4_entry_create;
2928 err = mlxsw_sp_fib4_node_entry_link(mlxsw_sp, fib4_entry, replace,
2931 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
2932 goto err_fib4_node_entry_link;
2935 mlxsw_sp_fib4_entry_replace(mlxsw_sp, fib4_entry, replace);
2939 err_fib4_node_entry_link:
2940 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
2941 err_fib4_entry_create:
2942 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2946 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
2947 struct fib_entry_notifier_info *fen_info)
2949 struct mlxsw_sp_fib4_entry *fib4_entry;
2950 struct mlxsw_sp_fib_node *fib_node;
2952 if (mlxsw_sp->router->aborted)
2955 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
2956 if (WARN_ON(!fib4_entry))
2958 fib_node = fib4_entry->common.fib_node;
2960 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
2961 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
2962 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
2965 static bool mlxsw_sp_fib6_rt_should_ignore(const struct rt6_info *rt)
2967 /* Packets with link-local destination IP arriving to the router
2968 * are trapped to the CPU, so no need to program specific routes
2971 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LINKLOCAL)
2974 /* Multicast routes aren't supported, so ignore them. Neighbour
2975 * Discovery packets are specifically trapped.
2977 if (ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_MULTICAST)
2980 /* Cloned routes are irrelevant in the forwarding path. */
2981 if (rt->rt6i_flags & RTF_CACHE)
2987 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct rt6_info *rt)
2989 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
2991 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
2993 return ERR_PTR(-ENOMEM);
2995 /* In case of route replace, replaced route is deleted with
2996 * no notification. Take reference to prevent accessing freed
2999 mlxsw_sp_rt6->rt = rt;
3002 return mlxsw_sp_rt6;
3005 #if IS_ENABLED(CONFIG_IPV6)
3006 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3011 static void mlxsw_sp_rt6_release(struct rt6_info *rt)
3016 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
3018 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
3019 kfree(mlxsw_sp_rt6);
3022 static bool mlxsw_sp_fib6_rt_can_mp(const struct rt6_info *rt)
3024 /* RTF_CACHE routes are ignored */
3025 return (rt->rt6i_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
3028 static struct rt6_info *
3029 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
3031 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
3035 static struct mlxsw_sp_fib6_entry *
3036 mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3037 const struct rt6_info *nrt, bool replace)
3039 struct mlxsw_sp_fib6_entry *fib6_entry;
3041 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
3044 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3045 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3047 /* RT6_TABLE_LOCAL and RT6_TABLE_MAIN share the same
3050 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3052 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3054 if (rt->rt6i_metric < nrt->rt6i_metric)
3056 if (rt->rt6i_metric == nrt->rt6i_metric &&
3057 mlxsw_sp_fib6_rt_can_mp(rt))
3059 if (rt->rt6i_metric > nrt->rt6i_metric)
3066 static struct mlxsw_sp_rt6 *
3067 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
3068 const struct rt6_info *rt)
3070 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3072 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3073 if (mlxsw_sp_rt6->rt == rt)
3074 return mlxsw_sp_rt6;
3080 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
3081 struct mlxsw_sp_nexthop_group *nh_grp,
3082 struct mlxsw_sp_nexthop *nh,
3083 const struct rt6_info *rt)
3085 struct net_device *dev = rt->dst.dev;
3086 struct mlxsw_sp_rif *rif;
3089 nh->nh_grp = nh_grp;
3090 memcpy(&nh->gw_addr, &rt->rt6i_gateway, sizeof(nh->gw_addr));
3095 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3098 mlxsw_sp_nexthop_rif_init(nh, rif);
3100 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3102 goto err_nexthop_neigh_init;
3106 err_nexthop_neigh_init:
3107 mlxsw_sp_nexthop_rif_fini(nh);
3111 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
3112 struct mlxsw_sp_nexthop *nh)
3114 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3115 mlxsw_sp_nexthop_rif_fini(nh);
3118 static struct mlxsw_sp_nexthop_group *
3119 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
3120 struct mlxsw_sp_fib6_entry *fib6_entry)
3122 struct mlxsw_sp_nexthop_group *nh_grp;
3123 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3124 struct mlxsw_sp_nexthop *nh;
3129 alloc_size = sizeof(*nh_grp) +
3130 fib6_entry->nrt6 * sizeof(struct mlxsw_sp_nexthop);
3131 nh_grp = kzalloc(alloc_size, GFP_KERNEL);
3133 return ERR_PTR(-ENOMEM);
3134 INIT_LIST_HEAD(&nh_grp->fib_list);
3135 #if IS_ENABLED(CONFIG_IPV6)
3136 nh_grp->neigh_tbl = &nd_tbl;
3138 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
3139 struct mlxsw_sp_rt6, list);
3140 nh_grp->gateway = !!(mlxsw_sp_rt6->rt->rt6i_flags & RTF_GATEWAY);
3141 nh_grp->count = fib6_entry->nrt6;
3142 for (i = 0; i < nh_grp->count; i++) {
3143 struct rt6_info *rt = mlxsw_sp_rt6->rt;
3145 nh = &nh_grp->nexthops[i];
3146 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
3148 goto err_nexthop6_init;
3149 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
3151 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3155 for (i--; i >= 0; i--) {
3156 nh = &nh_grp->nexthops[i];
3157 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3160 return ERR_PTR(err);
3164 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
3165 struct mlxsw_sp_nexthop_group *nh_grp)
3167 struct mlxsw_sp_nexthop *nh;
3168 int i = nh_grp->count;
3170 for (i--; i >= 0; i--) {
3171 nh = &nh_grp->nexthops[i];
3172 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
3174 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
3175 WARN_ON(nh_grp->adj_index_valid);
3179 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
3180 struct mlxsw_sp_fib6_entry *fib6_entry)
3182 struct mlxsw_sp_nexthop_group *nh_grp;
3184 /* For now, don't consolidate nexthop groups */
3185 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
3187 return PTR_ERR(nh_grp);
3189 list_add_tail(&fib6_entry->common.nexthop_group_node,
3191 fib6_entry->common.nh_group = nh_grp;
3196 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
3197 struct mlxsw_sp_fib_entry *fib_entry)
3199 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3201 list_del(&fib_entry->nexthop_group_node);
3202 if (!list_empty(&nh_grp->fib_list))
3204 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
3208 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
3209 struct mlxsw_sp_fib6_entry *fib6_entry)
3211 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
3214 fib6_entry->common.nh_group = NULL;
3215 list_del(&fib6_entry->common.nexthop_group_node);
3217 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3219 goto err_nexthop6_group_get;
3221 /* In case this entry is offloaded, then the adjacency index
3222 * currently associated with it in the device's table is that
3223 * of the old group. Start using the new one instead.
3225 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3227 goto err_fib_node_entry_add;
3229 if (list_empty(&old_nh_grp->fib_list))
3230 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
3234 err_fib_node_entry_add:
3235 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3236 err_nexthop6_group_get:
3237 list_add_tail(&fib6_entry->common.nexthop_group_node,
3238 &old_nh_grp->fib_list);
3239 fib6_entry->common.nh_group = old_nh_grp;
3244 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
3245 struct mlxsw_sp_fib6_entry *fib6_entry,
3246 struct rt6_info *rt)
3248 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3251 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3252 if (IS_ERR(mlxsw_sp_rt6))
3253 return PTR_ERR(mlxsw_sp_rt6);
3255 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3258 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3260 goto err_nexthop6_group_update;
3264 err_nexthop6_group_update:
3266 list_del(&mlxsw_sp_rt6->list);
3267 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3272 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
3273 struct mlxsw_sp_fib6_entry *fib6_entry,
3274 struct rt6_info *rt)
3276 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3278 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt);
3279 if (WARN_ON(!mlxsw_sp_rt6))
3283 list_del(&mlxsw_sp_rt6->list);
3284 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
3285 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3288 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp_fib_entry *fib_entry,
3289 const struct rt6_info *rt)
3291 /* Packets hitting RTF_REJECT routes need to be discarded by the
3292 * stack. We can rely on their destination device not having a
3293 * RIF (it's the loopback device) and can thus use action type
3294 * local, which will cause them to be trapped with a lower
3295 * priority than packets that need to be locally received.
3297 if (rt->rt6i_flags & RTF_LOCAL)
3298 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
3299 else if (rt->rt6i_flags & RTF_REJECT)
3300 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3301 else if (rt->rt6i_flags & RTF_GATEWAY)
3302 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
3304 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
3308 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
3310 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
3312 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
3315 list_del(&mlxsw_sp_rt6->list);
3316 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3320 static struct mlxsw_sp_fib6_entry *
3321 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
3322 struct mlxsw_sp_fib_node *fib_node,
3323 struct rt6_info *rt)
3325 struct mlxsw_sp_fib6_entry *fib6_entry;
3326 struct mlxsw_sp_fib_entry *fib_entry;
3327 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3330 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
3332 return ERR_PTR(-ENOMEM);
3333 fib_entry = &fib6_entry->common;
3335 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt);
3336 if (IS_ERR(mlxsw_sp_rt6)) {
3337 err = PTR_ERR(mlxsw_sp_rt6);
3338 goto err_rt6_create;
3341 mlxsw_sp_fib6_entry_type_set(fib_entry, mlxsw_sp_rt6->rt);
3343 INIT_LIST_HEAD(&fib6_entry->rt6_list);
3344 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
3345 fib6_entry->nrt6 = 1;
3346 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
3348 goto err_nexthop6_group_get;
3350 fib_entry->fib_node = fib_node;
3354 err_nexthop6_group_get:
3355 list_del(&mlxsw_sp_rt6->list);
3356 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
3359 return ERR_PTR(err);
3362 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
3363 struct mlxsw_sp_fib6_entry *fib6_entry)
3365 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
3366 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
3367 WARN_ON(fib6_entry->nrt6);
3371 static struct mlxsw_sp_fib6_entry *
3372 mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
3373 const struct rt6_info *nrt, bool replace)
3375 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
3377 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3378 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3380 if (rt->rt6i_table->tb6_id > nrt->rt6i_table->tb6_id)
3382 if (rt->rt6i_table->tb6_id != nrt->rt6i_table->tb6_id)
3384 if (replace && rt->rt6i_metric == nrt->rt6i_metric) {
3385 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
3386 mlxsw_sp_fib6_rt_can_mp(nrt))
3388 if (mlxsw_sp_fib6_rt_can_mp(nrt))
3389 fallback = fallback ?: fib6_entry;
3391 if (rt->rt6i_metric > nrt->rt6i_metric)
3392 return fallback ?: fib6_entry;
3399 mlxsw_sp_fib6_node_list_insert(struct mlxsw_sp_fib6_entry *new6_entry,
3402 struct mlxsw_sp_fib_node *fib_node = new6_entry->common.fib_node;
3403 struct rt6_info *nrt = mlxsw_sp_fib6_entry_rt(new6_entry);
3404 struct mlxsw_sp_fib6_entry *fib6_entry;
3406 fib6_entry = mlxsw_sp_fib6_node_entry_find(fib_node, nrt, replace);
3408 if (replace && WARN_ON(!fib6_entry))
3412 list_add_tail(&new6_entry->common.list,
3413 &fib6_entry->common.list);
3415 struct mlxsw_sp_fib6_entry *last;
3417 list_for_each_entry(last, &fib_node->entry_list, common.list) {
3418 struct rt6_info *rt = mlxsw_sp_fib6_entry_rt(last);
3420 if (nrt->rt6i_table->tb6_id > rt->rt6i_table->tb6_id)
3426 list_add(&new6_entry->common.list,
3427 &fib6_entry->common.list);
3429 list_add(&new6_entry->common.list,
3430 &fib_node->entry_list);
3437 mlxsw_sp_fib6_node_list_remove(struct mlxsw_sp_fib6_entry *fib6_entry)
3439 list_del(&fib6_entry->common.list);
3442 static int mlxsw_sp_fib6_node_entry_link(struct mlxsw_sp *mlxsw_sp,
3443 struct mlxsw_sp_fib6_entry *fib6_entry,
3448 err = mlxsw_sp_fib6_node_list_insert(fib6_entry, replace);
3452 err = mlxsw_sp_fib_node_entry_add(mlxsw_sp, &fib6_entry->common);
3454 goto err_fib_node_entry_add;
3458 err_fib_node_entry_add:
3459 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3464 mlxsw_sp_fib6_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
3465 struct mlxsw_sp_fib6_entry *fib6_entry)
3467 mlxsw_sp_fib_node_entry_del(mlxsw_sp, &fib6_entry->common);
3468 mlxsw_sp_fib6_node_list_remove(fib6_entry);
3471 static struct mlxsw_sp_fib6_entry *
3472 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
3473 const struct rt6_info *rt)
3475 struct mlxsw_sp_fib6_entry *fib6_entry;
3476 struct mlxsw_sp_fib_node *fib_node;
3477 struct mlxsw_sp_fib *fib;
3478 struct mlxsw_sp_vr *vr;
3480 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->rt6i_table->tb6_id);
3483 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
3485 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->rt6i_dst.addr,
3486 sizeof(rt->rt6i_dst.addr),
3491 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
3492 struct rt6_info *iter_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
3494 if (rt->rt6i_table->tb6_id == iter_rt->rt6i_table->tb6_id &&
3495 rt->rt6i_metric == iter_rt->rt6i_metric &&
3496 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
3503 static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
3504 struct mlxsw_sp_fib6_entry *fib6_entry,
3507 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
3508 struct mlxsw_sp_fib6_entry *replaced;
3513 replaced = list_next_entry(fib6_entry, common.list);
3515 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, replaced);
3516 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, replaced);
3517 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3520 static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
3521 struct rt6_info *rt, bool replace)
3523 struct mlxsw_sp_fib6_entry *fib6_entry;
3524 struct mlxsw_sp_fib_node *fib_node;
3527 if (mlxsw_sp->router->aborted)
3530 if (rt->rt6i_src.plen)
3533 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3536 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->rt6i_table->tb6_id,
3538 sizeof(rt->rt6i_dst.addr),
3540 MLXSW_SP_L3_PROTO_IPV6);
3541 if (IS_ERR(fib_node))
3542 return PTR_ERR(fib_node);
3544 /* Before creating a new entry, try to append route to an existing
3547 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
3549 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
3551 goto err_fib6_entry_nexthop_add;
3555 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
3556 if (IS_ERR(fib6_entry)) {
3557 err = PTR_ERR(fib6_entry);
3558 goto err_fib6_entry_create;
3561 err = mlxsw_sp_fib6_node_entry_link(mlxsw_sp, fib6_entry, replace);
3563 goto err_fib6_node_entry_link;
3565 mlxsw_sp_fib6_entry_replace(mlxsw_sp, fib6_entry, replace);
3569 err_fib6_node_entry_link:
3570 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3571 err_fib6_entry_create:
3572 err_fib6_entry_nexthop_add:
3573 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3577 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
3578 struct rt6_info *rt)
3580 struct mlxsw_sp_fib6_entry *fib6_entry;
3581 struct mlxsw_sp_fib_node *fib_node;
3583 if (mlxsw_sp->router->aborted)
3586 if (mlxsw_sp_fib6_rt_should_ignore(rt))
3589 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
3590 if (WARN_ON(!fib6_entry))
3593 /* If route is part of a multipath entry, but not the last one
3594 * removed, then only reduce its nexthop group.
3596 if (!list_is_singular(&fib6_entry->rt6_list)) {
3597 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt);
3601 fib_node = fib6_entry->common.fib_node;
3603 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3604 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3605 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3608 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
3609 enum mlxsw_reg_ralxx_protocol proto,
3612 char ralta_pl[MLXSW_REG_RALTA_LEN];
3613 char ralst_pl[MLXSW_REG_RALST_LEN];
3616 mlxsw_reg_ralta_pack(ralta_pl, true, proto, tree_id);
3617 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
3621 mlxsw_reg_ralst_pack(ralst_pl, 0xff, tree_id);
3622 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
3626 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3627 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3628 char raltb_pl[MLXSW_REG_RALTB_LEN];
3629 char ralue_pl[MLXSW_REG_RALUE_LEN];
3631 if (!mlxsw_sp_vr_is_used(vr))
3634 mlxsw_reg_raltb_pack(raltb_pl, vr->id, proto, tree_id);
3635 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
3640 mlxsw_reg_ralue_pack(ralue_pl, proto,
3641 MLXSW_REG_RALUE_OP_WRITE_WRITE, vr->id, 0);
3642 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
3643 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
3652 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
3654 enum mlxsw_reg_ralxx_protocol proto = MLXSW_REG_RALXX_PROTOCOL_IPV4;
3657 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3658 MLXSW_SP_LPM_TREE_MIN);
3662 proto = MLXSW_REG_RALXX_PROTOCOL_IPV6;
3663 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
3664 MLXSW_SP_LPM_TREE_MIN + 1);
3667 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
3668 struct mlxsw_sp_fib_node *fib_node)
3670 struct mlxsw_sp_fib4_entry *fib4_entry, *tmp;
3672 list_for_each_entry_safe(fib4_entry, tmp, &fib_node->entry_list,
3674 bool do_break = &tmp->common.list == &fib_node->entry_list;
3676 mlxsw_sp_fib4_node_entry_unlink(mlxsw_sp, fib4_entry);
3677 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
3678 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3679 /* Break when entry list is empty and node was freed.
3680 * Otherwise, we'll access freed memory in the next
3688 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
3689 struct mlxsw_sp_fib_node *fib_node)
3691 struct mlxsw_sp_fib6_entry *fib6_entry, *tmp;
3693 list_for_each_entry_safe(fib6_entry, tmp, &fib_node->entry_list,
3695 bool do_break = &tmp->common.list == &fib_node->entry_list;
3697 mlxsw_sp_fib6_node_entry_unlink(mlxsw_sp, fib6_entry);
3698 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
3699 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
3705 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
3706 struct mlxsw_sp_fib_node *fib_node)
3708 switch (fib_node->fib->proto) {
3709 case MLXSW_SP_L3_PROTO_IPV4:
3710 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
3712 case MLXSW_SP_L3_PROTO_IPV6:
3713 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
3718 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
3719 struct mlxsw_sp_vr *vr,
3720 enum mlxsw_sp_l3proto proto)
3722 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
3723 struct mlxsw_sp_fib_node *fib_node, *tmp;
3725 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
3726 bool do_break = &tmp->list == &fib->node_list;
3728 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
3734 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
3738 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
3739 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
3741 if (!mlxsw_sp_vr_is_used(vr))
3743 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
3745 /* If virtual router was only used for IPv4, then it's no
3748 if (!mlxsw_sp_vr_is_used(vr))
3750 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
3754 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
3758 if (mlxsw_sp->router->aborted)
3760 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
3761 mlxsw_sp_router_fib_flush(mlxsw_sp);
3762 mlxsw_sp->router->aborted = true;
3763 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
3765 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
3768 struct mlxsw_sp_fib_event_work {
3769 struct work_struct work;
3771 struct fib6_entry_notifier_info fen6_info;
3772 struct fib_entry_notifier_info fen_info;
3773 struct fib_rule_notifier_info fr_info;
3774 struct fib_nh_notifier_info fnh_info;
3776 struct mlxsw_sp *mlxsw_sp;
3777 unsigned long event;
3780 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
3782 struct mlxsw_sp_fib_event_work *fib_work =
3783 container_of(work, struct mlxsw_sp_fib_event_work, work);
3784 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
3785 struct fib_rule *rule;
3786 bool replace, append;
3789 /* Protect internal structures from changes */
3791 switch (fib_work->event) {
3792 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3793 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3794 case FIB_EVENT_ENTRY_ADD:
3795 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
3796 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
3797 err = mlxsw_sp_router_fib4_add(mlxsw_sp, &fib_work->fen_info,
3800 mlxsw_sp_router_fib_abort(mlxsw_sp);
3801 fib_info_put(fib_work->fen_info.fi);
3803 case FIB_EVENT_ENTRY_DEL:
3804 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
3805 fib_info_put(fib_work->fen_info.fi);
3807 case FIB_EVENT_RULE_ADD: /* fall through */
3808 case FIB_EVENT_RULE_DEL:
3809 rule = fib_work->fr_info.rule;
3810 if (!fib4_rule_default(rule) && !rule->l3mdev)
3811 mlxsw_sp_router_fib_abort(mlxsw_sp);
3814 case FIB_EVENT_NH_ADD: /* fall through */
3815 case FIB_EVENT_NH_DEL:
3816 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
3817 fib_work->fnh_info.fib_nh);
3818 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
3825 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
3827 struct mlxsw_sp_fib_event_work *fib_work =
3828 container_of(work, struct mlxsw_sp_fib_event_work, work);
3829 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
3830 struct fib_rule *rule;
3835 switch (fib_work->event) {
3836 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3837 case FIB_EVENT_ENTRY_ADD:
3838 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
3839 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
3840 fib_work->fen6_info.rt, replace);
3842 mlxsw_sp_router_fib_abort(mlxsw_sp);
3843 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
3845 case FIB_EVENT_ENTRY_DEL:
3846 mlxsw_sp_router_fib6_del(mlxsw_sp, fib_work->fen6_info.rt);
3847 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
3849 case FIB_EVENT_RULE_ADD: /* fall through */
3850 case FIB_EVENT_RULE_DEL:
3851 rule = fib_work->fr_info.rule;
3852 if (!fib6_rule_default(rule) && !rule->l3mdev)
3853 mlxsw_sp_router_fib_abort(mlxsw_sp);
3861 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
3862 struct fib_notifier_info *info)
3864 switch (fib_work->event) {
3865 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3866 case FIB_EVENT_ENTRY_APPEND: /* fall through */
3867 case FIB_EVENT_ENTRY_ADD: /* fall through */
3868 case FIB_EVENT_ENTRY_DEL:
3869 memcpy(&fib_work->fen_info, info, sizeof(fib_work->fen_info));
3870 /* Take referece on fib_info to prevent it from being
3871 * freed while work is queued. Release it afterwards.
3873 fib_info_hold(fib_work->fen_info.fi);
3875 case FIB_EVENT_RULE_ADD: /* fall through */
3876 case FIB_EVENT_RULE_DEL:
3877 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
3878 fib_rule_get(fib_work->fr_info.rule);
3880 case FIB_EVENT_NH_ADD: /* fall through */
3881 case FIB_EVENT_NH_DEL:
3882 memcpy(&fib_work->fnh_info, info, sizeof(fib_work->fnh_info));
3883 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
3888 static void mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
3889 struct fib_notifier_info *info)
3891 switch (fib_work->event) {
3892 case FIB_EVENT_ENTRY_REPLACE: /* fall through */
3893 case FIB_EVENT_ENTRY_ADD: /* fall through */
3894 case FIB_EVENT_ENTRY_DEL:
3895 memcpy(&fib_work->fen6_info, info, sizeof(fib_work->fen6_info));
3896 rt6_hold(fib_work->fen6_info.rt);
3898 case FIB_EVENT_RULE_ADD: /* fall through */
3899 case FIB_EVENT_RULE_DEL:
3900 memcpy(&fib_work->fr_info, info, sizeof(fib_work->fr_info));
3901 fib_rule_get(fib_work->fr_info.rule);
3906 /* Called with rcu_read_lock() */
3907 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
3908 unsigned long event, void *ptr)
3910 struct mlxsw_sp_fib_event_work *fib_work;
3911 struct fib_notifier_info *info = ptr;
3912 struct mlxsw_sp_router *router;
3914 if (!net_eq(info->net, &init_net))
3917 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
3918 if (WARN_ON(!fib_work))
3921 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
3922 fib_work->mlxsw_sp = router->mlxsw_sp;
3923 fib_work->event = event;
3925 switch (info->family) {
3927 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
3928 mlxsw_sp_router_fib4_event(fib_work, info);
3931 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
3932 mlxsw_sp_router_fib6_event(fib_work, info);
3936 mlxsw_core_schedule_work(&fib_work->work);
3941 static struct mlxsw_sp_rif *
3942 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
3943 const struct net_device *dev)
3947 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
3948 if (mlxsw_sp->router->rifs[i] &&
3949 mlxsw_sp->router->rifs[i]->dev == dev)
3950 return mlxsw_sp->router->rifs[i];
3955 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
3957 char ritr_pl[MLXSW_REG_RITR_LEN];
3960 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
3961 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3962 if (WARN_ON_ONCE(err))
3965 mlxsw_reg_ritr_enable_set(ritr_pl, false);
3966 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
3969 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3970 struct mlxsw_sp_rif *rif)
3972 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
3973 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
3974 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
3978 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
3979 unsigned long event)
3981 struct inet6_dev *inet6_dev;
3982 bool addr_list_empty = true;
3983 struct in_device *idev;
3989 idev = __in_dev_get_rtnl(dev);
3990 if (idev && idev->ifa_list)
3991 addr_list_empty = false;
3993 inet6_dev = __in6_dev_get(dev);
3994 if (addr_list_empty && inet6_dev &&
3995 !list_empty(&inet6_dev->addr_list))
3996 addr_list_empty = false;
3998 if (rif && addr_list_empty &&
3999 !netif_is_l3_slave(rif->dev))
4001 /* It is possible we already removed the RIF ourselves
4002 * if it was assigned to a netdev that is now a bridge
4011 static enum mlxsw_sp_rif_type
4012 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
4013 const struct net_device *dev)
4015 enum mlxsw_sp_fid_type type;
4017 /* RIF type is derived from the type of the underlying FID */
4018 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
4019 type = MLXSW_SP_FID_TYPE_8021Q;
4020 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
4021 type = MLXSW_SP_FID_TYPE_8021Q;
4022 else if (netif_is_bridge_master(dev))
4023 type = MLXSW_SP_FID_TYPE_8021D;
4025 type = MLXSW_SP_FID_TYPE_RFID;
4027 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
4030 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
4034 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
4035 if (!mlxsw_sp->router->rifs[i]) {
4044 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
4046 struct net_device *l3_dev)
4048 struct mlxsw_sp_rif *rif;
4050 rif = kzalloc(rif_size, GFP_KERNEL);
4054 INIT_LIST_HEAD(&rif->nexthop_list);
4055 INIT_LIST_HEAD(&rif->neigh_list);
4056 ether_addr_copy(rif->addr, l3_dev->dev_addr);
4057 rif->mtu = l3_dev->mtu;
4060 rif->rif_index = rif_index;
4065 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
4068 return mlxsw_sp->router->rifs[rif_index];
4071 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
4073 return rif->rif_index;
4076 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
4078 return rif->dev->ifindex;
4081 static struct mlxsw_sp_rif *
4082 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
4083 const struct mlxsw_sp_rif_params *params)
4085 u32 tb_id = l3mdev_fib_table(params->dev);
4086 const struct mlxsw_sp_rif_ops *ops;
4087 enum mlxsw_sp_rif_type type;
4088 struct mlxsw_sp_rif *rif;
4089 struct mlxsw_sp_fid *fid;
4090 struct mlxsw_sp_vr *vr;
4094 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
4095 ops = mlxsw_sp->router->rif_ops_arr[type];
4097 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
4099 return ERR_CAST(vr);
4101 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
4103 goto err_rif_index_alloc;
4105 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
4110 rif->mlxsw_sp = mlxsw_sp;
4113 fid = ops->fid_get(rif);
4121 ops->setup(rif, params);
4123 err = ops->configure(rif);
4127 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, params->dev->dev_addr,
4128 mlxsw_sp_fid_index(fid), true);
4130 goto err_rif_fdb_op;
4132 mlxsw_sp_rif_counters_alloc(rif);
4133 mlxsw_sp_fid_rif_set(fid, rif);
4134 mlxsw_sp->router->rifs[rif_index] = rif;
4140 ops->deconfigure(rif);
4142 mlxsw_sp_fid_put(fid);
4146 err_rif_index_alloc:
4147 mlxsw_sp_vr_put(vr);
4148 return ERR_PTR(err);
4151 void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
4153 const struct mlxsw_sp_rif_ops *ops = rif->ops;
4154 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4155 struct mlxsw_sp_fid *fid = rif->fid;
4156 struct mlxsw_sp_vr *vr;
4158 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
4159 vr = &mlxsw_sp->router->vrs[rif->vr_id];
4162 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
4163 mlxsw_sp_fid_rif_set(fid, NULL);
4164 mlxsw_sp_rif_counters_free(rif);
4165 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->dev->dev_addr,
4166 mlxsw_sp_fid_index(fid), false);
4167 ops->deconfigure(rif);
4168 mlxsw_sp_fid_put(fid);
4170 mlxsw_sp_vr_put(vr);
4174 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
4175 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4177 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4179 params->vid = mlxsw_sp_port_vlan->vid;
4180 params->lag = mlxsw_sp_port->lagged;
4182 params->lag_id = mlxsw_sp_port->lag_id;
4184 params->system_port = mlxsw_sp_port->local_port;
4188 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
4189 struct net_device *l3_dev)
4191 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4192 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4193 u16 vid = mlxsw_sp_port_vlan->vid;
4194 struct mlxsw_sp_rif *rif;
4195 struct mlxsw_sp_fid *fid;
4198 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4200 struct mlxsw_sp_rif_params params = {
4204 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
4205 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
4207 return PTR_ERR(rif);
4210 /* FID was already created, just take a reference */
4211 fid = rif->ops->fid_get(rif);
4212 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
4214 goto err_fid_port_vid_map;
4216 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
4218 goto err_port_vid_learning_set;
4220 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
4221 BR_STATE_FORWARDING);
4223 goto err_port_vid_stp_set;
4225 mlxsw_sp_port_vlan->fid = fid;
4229 err_port_vid_stp_set:
4230 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4231 err_port_vid_learning_set:
4232 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4233 err_fid_port_vid_map:
4234 mlxsw_sp_fid_put(fid);
4239 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
4241 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
4242 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
4243 u16 vid = mlxsw_sp_port_vlan->vid;
4245 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
4248 mlxsw_sp_port_vlan->fid = NULL;
4249 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
4250 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4251 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
4252 /* If router port holds the last reference on the rFID, then the
4253 * associated Sub-port RIF will be destroyed.
4255 mlxsw_sp_fid_put(fid);
4258 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
4259 struct net_device *port_dev,
4260 unsigned long event, u16 vid)
4262 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
4263 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4265 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
4266 if (WARN_ON(!mlxsw_sp_port_vlan))
4271 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
4274 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4281 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
4282 unsigned long event)
4284 if (netif_is_bridge_port(port_dev) ||
4285 netif_is_lag_port(port_dev) ||
4286 netif_is_ovs_port(port_dev))
4289 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1);
4292 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
4293 struct net_device *lag_dev,
4294 unsigned long event, u16 vid)
4296 struct net_device *port_dev;
4297 struct list_head *iter;
4300 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
4301 if (mlxsw_sp_port_dev_check(port_dev)) {
4302 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
4313 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
4314 unsigned long event)
4316 if (netif_is_bridge_port(lag_dev))
4319 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
4322 static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
4323 unsigned long event)
4325 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4326 struct mlxsw_sp_rif_params params = {
4329 struct mlxsw_sp_rif *rif;
4333 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms);
4335 return PTR_ERR(rif);
4338 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4339 mlxsw_sp_rif_destroy(rif);
4346 static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
4347 unsigned long event)
4349 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4350 u16 vid = vlan_dev_vlan_id(vlan_dev);
4352 if (netif_is_bridge_port(vlan_dev))
4355 if (mlxsw_sp_port_dev_check(real_dev))
4356 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
4358 else if (netif_is_lag_master(real_dev))
4359 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
4361 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
4362 return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event);
4367 static int __mlxsw_sp_inetaddr_event(struct net_device *dev,
4368 unsigned long event)
4370 if (mlxsw_sp_port_dev_check(dev))
4371 return mlxsw_sp_inetaddr_port_event(dev, event);
4372 else if (netif_is_lag_master(dev))
4373 return mlxsw_sp_inetaddr_lag_event(dev, event);
4374 else if (netif_is_bridge_master(dev))
4375 return mlxsw_sp_inetaddr_bridge_event(dev, event);
4376 else if (is_vlan_dev(dev))
4377 return mlxsw_sp_inetaddr_vlan_event(dev, event);
4382 int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
4383 unsigned long event, void *ptr)
4385 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
4386 struct net_device *dev = ifa->ifa_dev->dev;
4387 struct mlxsw_sp *mlxsw_sp;
4388 struct mlxsw_sp_rif *rif;
4391 mlxsw_sp = mlxsw_sp_lower_get(dev);
4395 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4396 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4399 err = __mlxsw_sp_inetaddr_event(dev, event);
4401 return notifier_from_errno(err);
4404 struct mlxsw_sp_inet6addr_event_work {
4405 struct work_struct work;
4406 struct net_device *dev;
4407 unsigned long event;
4410 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
4412 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
4413 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
4414 struct net_device *dev = inet6addr_work->dev;
4415 unsigned long event = inet6addr_work->event;
4416 struct mlxsw_sp *mlxsw_sp;
4417 struct mlxsw_sp_rif *rif;
4420 mlxsw_sp = mlxsw_sp_lower_get(dev);
4424 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4425 if (!mlxsw_sp_rif_should_config(rif, dev, event))
4428 __mlxsw_sp_inetaddr_event(dev, event);
4432 kfree(inet6addr_work);
4435 /* Called with rcu_read_lock() */
4436 int mlxsw_sp_inet6addr_event(struct notifier_block *unused,
4437 unsigned long event, void *ptr)
4439 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
4440 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
4441 struct net_device *dev = if6->idev->dev;
4443 if (!mlxsw_sp_port_dev_lower_find_rcu(dev))
4446 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
4447 if (!inet6addr_work)
4450 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
4451 inet6addr_work->dev = dev;
4452 inet6addr_work->event = event;
4454 mlxsw_core_schedule_work(&inet6addr_work->work);
4459 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
4460 const char *mac, int mtu)
4462 char ritr_pl[MLXSW_REG_RITR_LEN];
4465 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
4466 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4470 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
4471 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
4472 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
4473 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4476 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
4478 struct mlxsw_sp *mlxsw_sp;
4479 struct mlxsw_sp_rif *rif;
4483 mlxsw_sp = mlxsw_sp_lower_get(dev);
4487 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4490 fid_index = mlxsw_sp_fid_index(rif->fid);
4492 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
4496 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
4501 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
4503 goto err_rif_fdb_op;
4505 ether_addr_copy(rif->addr, dev->dev_addr);
4506 rif->mtu = dev->mtu;
4508 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
4513 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
4515 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
4519 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
4520 struct net_device *l3_dev)
4522 struct mlxsw_sp_rif *rif;
4524 /* If netdev is already associated with a RIF, then we need to
4525 * destroy it and create a new one with the new virtual router ID.
4527 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4529 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4531 return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP);
4534 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
4535 struct net_device *l3_dev)
4537 struct mlxsw_sp_rif *rif;
4539 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
4542 __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN);
4545 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
4546 struct netdev_notifier_changeupper_info *info)
4548 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
4555 case NETDEV_PRECHANGEUPPER:
4557 case NETDEV_CHANGEUPPER:
4559 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev);
4561 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
4568 static struct mlxsw_sp_rif_subport *
4569 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
4571 return container_of(rif, struct mlxsw_sp_rif_subport, common);
4574 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
4575 const struct mlxsw_sp_rif_params *params)
4577 struct mlxsw_sp_rif_subport *rif_subport;
4579 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4580 rif_subport->vid = params->vid;
4581 rif_subport->lag = params->lag;
4583 rif_subport->lag_id = params->lag_id;
4585 rif_subport->system_port = params->system_port;
4588 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
4590 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4591 struct mlxsw_sp_rif_subport *rif_subport;
4592 char ritr_pl[MLXSW_REG_RITR_LEN];
4594 rif_subport = mlxsw_sp_rif_subport_rif(rif);
4595 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
4596 rif->rif_index, rif->vr_id, rif->dev->mtu,
4597 rif->dev->dev_addr);
4598 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
4599 rif_subport->lag ? rif_subport->lag_id :
4600 rif_subport->system_port,
4603 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4606 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
4608 return mlxsw_sp_rif_subport_op(rif, true);
4611 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
4613 mlxsw_sp_rif_subport_op(rif, false);
4616 static struct mlxsw_sp_fid *
4617 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif)
4619 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
4622 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
4623 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
4624 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
4625 .setup = mlxsw_sp_rif_subport_setup,
4626 .configure = mlxsw_sp_rif_subport_configure,
4627 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
4628 .fid_get = mlxsw_sp_rif_subport_fid_get,
4631 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
4632 enum mlxsw_reg_ritr_if_type type,
4633 u16 vid_fid, bool enable)
4635 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4636 char ritr_pl[MLXSW_REG_RITR_LEN];
4638 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
4639 rif->dev->mtu, rif->dev->dev_addr);
4640 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
4642 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
4645 static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
4647 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
4650 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif)
4652 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4653 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4656 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, true);
4660 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4661 mlxsw_sp_router_port(mlxsw_sp), true);
4663 goto err_fid_mc_flood_set;
4665 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4666 mlxsw_sp_router_port(mlxsw_sp), true);
4668 goto err_fid_bc_flood_set;
4672 err_fid_bc_flood_set:
4673 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4674 mlxsw_sp_router_port(mlxsw_sp), false);
4675 err_fid_mc_flood_set:
4676 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4680 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
4682 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4683 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
4685 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4686 mlxsw_sp_router_port(mlxsw_sp), false);
4687 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4688 mlxsw_sp_router_port(mlxsw_sp), false);
4689 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_VLAN_IF, vid, false);
4692 static struct mlxsw_sp_fid *
4693 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif)
4695 u16 vid = is_vlan_dev(rif->dev) ? vlan_dev_vlan_id(rif->dev) : 1;
4697 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
4700 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_ops = {
4701 .type = MLXSW_SP_RIF_TYPE_VLAN,
4702 .rif_size = sizeof(struct mlxsw_sp_rif),
4703 .configure = mlxsw_sp_rif_vlan_configure,
4704 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
4705 .fid_get = mlxsw_sp_rif_vlan_fid_get,
4708 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
4710 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4711 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4714 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
4719 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4720 mlxsw_sp_router_port(mlxsw_sp), true);
4722 goto err_fid_mc_flood_set;
4724 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4725 mlxsw_sp_router_port(mlxsw_sp), true);
4727 goto err_fid_bc_flood_set;
4731 err_fid_bc_flood_set:
4732 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4733 mlxsw_sp_router_port(mlxsw_sp), false);
4734 err_fid_mc_flood_set:
4735 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4739 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
4741 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
4742 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
4744 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
4745 mlxsw_sp_router_port(mlxsw_sp), false);
4746 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
4747 mlxsw_sp_router_port(mlxsw_sp), false);
4748 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
4751 static struct mlxsw_sp_fid *
4752 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif)
4754 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
4757 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
4758 .type = MLXSW_SP_RIF_TYPE_FID,
4759 .rif_size = sizeof(struct mlxsw_sp_rif),
4760 .configure = mlxsw_sp_rif_fid_configure,
4761 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
4762 .fid_get = mlxsw_sp_rif_fid_fid_get,
4765 static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = {
4766 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
4767 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops,
4768 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
4771 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
4773 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4775 mlxsw_sp->router->rifs = kcalloc(max_rifs,
4776 sizeof(struct mlxsw_sp_rif *),
4778 if (!mlxsw_sp->router->rifs)
4781 mlxsw_sp->router->rif_ops_arr = mlxsw_sp_rif_ops_arr;
4786 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
4790 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
4791 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
4793 kfree(mlxsw_sp->router->rifs);
4796 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
4798 struct mlxsw_sp_router *router;
4800 /* Flush pending FIB notifications and then flush the device's
4801 * table before requesting another dump. The FIB notification
4802 * block is unregistered, so no need to take RTNL.
4804 mlxsw_core_flush_owq();
4805 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
4806 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
4809 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
4811 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4815 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
4817 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
4819 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
4820 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
4821 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4827 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
4829 char rgcr_pl[MLXSW_REG_RGCR_LEN];
4831 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
4832 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
4835 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
4837 struct mlxsw_sp_router *router;
4840 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
4843 mlxsw_sp->router = router;
4844 router->mlxsw_sp = mlxsw_sp;
4846 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
4847 err = __mlxsw_sp_router_init(mlxsw_sp);
4849 goto err_router_init;
4851 err = mlxsw_sp_rifs_init(mlxsw_sp);
4855 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
4856 &mlxsw_sp_nexthop_ht_params);
4858 goto err_nexthop_ht_init;
4860 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
4861 &mlxsw_sp_nexthop_group_ht_params);
4863 goto err_nexthop_group_ht_init;
4865 err = mlxsw_sp_lpm_init(mlxsw_sp);
4869 err = mlxsw_sp_vrs_init(mlxsw_sp);
4873 err = mlxsw_sp_neigh_init(mlxsw_sp);
4875 goto err_neigh_init;
4877 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
4878 err = register_fib_notifier(&mlxsw_sp->router->fib_nb,
4879 mlxsw_sp_router_fib_dump_flush);
4881 goto err_register_fib_notifier;
4885 err_register_fib_notifier:
4886 mlxsw_sp_neigh_fini(mlxsw_sp);
4888 mlxsw_sp_vrs_fini(mlxsw_sp);
4890 mlxsw_sp_lpm_fini(mlxsw_sp);
4892 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
4893 err_nexthop_group_ht_init:
4894 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
4895 err_nexthop_ht_init:
4896 mlxsw_sp_rifs_fini(mlxsw_sp);
4898 __mlxsw_sp_router_fini(mlxsw_sp);
4900 kfree(mlxsw_sp->router);
4904 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
4906 unregister_fib_notifier(&mlxsw_sp->router->fib_nb);
4907 mlxsw_sp_neigh_fini(mlxsw_sp);
4908 mlxsw_sp_vrs_fini(mlxsw_sp);
4909 mlxsw_sp_lpm_fini(mlxsw_sp);
4910 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
4911 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
4912 mlxsw_sp_rifs_fini(mlxsw_sp);
4913 __mlxsw_sp_router_fini(mlxsw_sp);
4914 kfree(mlxsw_sp->router);