1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
6 #define DR_RULE_MAX_STE_CHAIN (DR_RULE_MAX_STES + DR_ACTION_MAX_STES)
8 struct mlx5dr_rule_action_member {
9 struct mlx5dr_action *action;
10 struct list_head list;
13 static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
14 struct list_head *miss_list,
15 struct list_head *send_list)
17 struct mlx5dr_ste_send_info *ste_info_last;
18 struct mlx5dr_ste *last_ste;
20 /* The new entry will be inserted after the last */
21 last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
24 ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
28 mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
29 mlx5dr_ste_get_icm_addr(new_last_ste));
30 list_add_tail(&new_last_ste->miss_list_node, miss_list);
32 mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
34 ste_info_last, send_list, true);
39 static struct mlx5dr_ste *
40 dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
41 struct mlx5dr_matcher_rx_tx *nic_matcher,
44 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
45 struct mlx5dr_ste_htbl *new_htbl;
46 struct mlx5dr_ste *ste;
48 /* Create new table for miss entry */
49 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
51 MLX5DR_STE_LU_TYPE_DONT_CARE,
54 mlx5dr_dbg(dmn, "Failed allocating collision table\n");
58 /* One and only entry, never grows */
59 ste = new_htbl->ste_arr;
60 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
61 mlx5dr_htbl_get(new_htbl);
66 static struct mlx5dr_ste *
67 dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
68 struct mlx5dr_matcher_rx_tx *nic_matcher,
70 struct mlx5dr_ste *orig_ste)
72 struct mlx5dr_ste *ste;
74 ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
76 mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
80 ste->ste_chain_location = orig_ste->ste_chain_location;
82 /* In collision entry, all members share the same miss_list_head */
83 ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
86 if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
88 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
95 mlx5dr_ste_free(ste, matcher, nic_matcher);
100 dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
101 struct mlx5dr_domain *dmn)
105 list_del(&ste_info->send_list);
106 ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
107 ste_info->size, ste_info->offset);
110 /* Copy data to ste, only reduced size, the last 16B (mask)
111 * is already written to the hw.
113 memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
120 static int dr_rule_send_update_list(struct list_head *send_ste_list,
121 struct mlx5dr_domain *dmn,
124 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
128 list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
129 send_ste_list, send_list) {
130 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
136 list_for_each_entry_safe(ste_info, tmp_ste_info,
137 send_ste_list, send_list) {
138 ret = dr_rule_handle_one_ste_in_update_list(ste_info,
148 static struct mlx5dr_ste *
149 dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
151 struct mlx5dr_ste *ste;
153 if (list_empty(miss_list))
156 /* Check if hw_ste is present in the list */
157 list_for_each_entry(ste, miss_list, miss_list_node) {
158 if (mlx5dr_ste_equal_tag(ste->hw_ste, hw_ste))
165 static struct mlx5dr_ste *
166 dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
167 struct mlx5dr_matcher_rx_tx *nic_matcher,
168 struct list_head *update_list,
169 struct mlx5dr_ste *col_ste,
172 struct mlx5dr_ste *new_ste;
175 new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
179 /* In collision entry, all members share the same miss_list_head */
180 new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
182 /* Update the previous from the list */
183 ret = dr_rule_append_to_miss_list(new_ste,
184 mlx5dr_ste_get_miss_list(col_ste),
187 mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
194 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
198 static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
199 struct mlx5dr_matcher_rx_tx *nic_matcher,
200 struct mlx5dr_ste *cur_ste,
201 struct mlx5dr_ste *new_ste)
203 new_ste->next_htbl = cur_ste->next_htbl;
204 new_ste->ste_chain_location = cur_ste->ste_chain_location;
206 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location))
207 new_ste->next_htbl->pointing_ste = new_ste;
209 /* We need to copy the refcount since this ste
210 * may have been traversed several times
212 refcount_set(&new_ste->refcount, refcount_read(&cur_ste->refcount));
214 /* Link old STEs rule_mem list to the new ste */
215 mlx5dr_rule_update_rule_member(cur_ste, new_ste);
216 INIT_LIST_HEAD(&new_ste->rule_list);
217 list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list);
220 static struct mlx5dr_ste *
221 dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
222 struct mlx5dr_matcher_rx_tx *nic_matcher,
223 struct mlx5dr_ste *cur_ste,
224 struct mlx5dr_ste_htbl *new_htbl,
225 struct list_head *update_list)
227 struct mlx5dr_ste_send_info *ste_info;
228 bool use_update_list = false;
229 u8 hw_ste[DR_STE_SIZE] = {};
230 struct mlx5dr_ste *new_ste;
234 /* Copy STE mask from the matcher */
235 sb_idx = cur_ste->ste_chain_location - 1;
236 mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
238 /* Copy STE control and tag */
239 memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
240 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
242 new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
243 new_ste = &new_htbl->ste_arr[new_idx];
245 if (mlx5dr_ste_not_used_ste(new_ste)) {
246 mlx5dr_htbl_get(new_htbl);
247 list_add_tail(&new_ste->miss_list_node,
248 mlx5dr_ste_get_miss_list(new_ste));
250 new_ste = dr_rule_rehash_handle_collision(matcher,
256 mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
260 new_htbl->ctrl.num_of_collisions++;
261 use_update_list = true;
264 memcpy(new_ste->hw_ste, hw_ste, DR_STE_SIZE_REDUCED);
266 new_htbl->ctrl.num_of_valid_entries++;
268 if (use_update_list) {
269 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
273 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
278 dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
283 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
287 static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
288 struct mlx5dr_matcher_rx_tx *nic_matcher,
289 struct list_head *cur_miss_list,
290 struct mlx5dr_ste_htbl *new_htbl,
291 struct list_head *update_list)
293 struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
295 if (list_empty(cur_miss_list))
298 list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
299 new_ste = dr_rule_rehash_copy_ste(matcher,
307 list_del(&cur_ste->miss_list_node);
308 mlx5dr_htbl_put(cur_ste->htbl);
313 mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
318 static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
319 struct mlx5dr_matcher_rx_tx *nic_matcher,
320 struct mlx5dr_ste_htbl *cur_htbl,
321 struct mlx5dr_ste_htbl *new_htbl,
322 struct list_head *update_list)
324 struct mlx5dr_ste *cur_ste;
329 cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk_size);
331 if (cur_entries < 1) {
332 mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
336 for (i = 0; i < cur_entries; i++) {
337 cur_ste = &cur_htbl->ste_arr[i];
338 if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
341 err = dr_rule_rehash_copy_miss_list(matcher,
343 mlx5dr_ste_get_miss_list(cur_ste),
354 static struct mlx5dr_ste_htbl *
355 dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
356 struct mlx5dr_rule_rx_tx *nic_rule,
357 struct mlx5dr_ste_htbl *cur_htbl,
359 struct list_head *update_list,
360 enum mlx5dr_icm_chunk_size new_size)
362 struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
363 struct mlx5dr_matcher *matcher = rule->matcher;
364 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
365 struct mlx5dr_matcher_rx_tx *nic_matcher;
366 struct mlx5dr_ste_send_info *ste_info;
367 struct mlx5dr_htbl_connect_info info;
368 struct mlx5dr_domain_rx_tx *nic_dmn;
369 u8 formatted_ste[DR_STE_SIZE] = {};
370 LIST_HEAD(rehash_table_send_list);
371 struct mlx5dr_ste *ste_to_update;
372 struct mlx5dr_ste_htbl *new_htbl;
375 nic_matcher = nic_rule->nic_matcher;
376 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
378 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
382 new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
385 cur_htbl->byte_mask);
387 mlx5dr_err(dmn, "Failed to allocate new hash table\n");
391 /* Write new table to HW */
392 info.type = CONNECT_MISS;
393 info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
394 mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
400 new_htbl->pointing_ste = cur_htbl->pointing_ste;
401 new_htbl->pointing_ste->next_htbl = new_htbl;
402 err = dr_rule_rehash_copy_htbl(matcher,
406 &rehash_table_send_list);
410 if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
411 nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
412 mlx5dr_err(dmn, "Failed writing table to HW\n");
416 /* Writing to the hw is done in regular order of rehash_table_send_list,
417 * in order to have the origin data written before the miss address of
418 * collision entries, if exists.
420 if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
421 mlx5dr_err(dmn, "Failed updating table to HW\n");
425 /* Connect previous hash table to current */
426 if (ste_location == 1) {
427 /* The previous table is an anchor, anchors size is always one STE */
428 struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
430 /* On matcher s_anchor we keep an extra refcount */
431 mlx5dr_htbl_get(new_htbl);
432 mlx5dr_htbl_put(cur_htbl);
434 nic_matcher->s_htbl = new_htbl;
436 /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
437 * (48B len) which works only on first 32B
439 mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
440 new_htbl->chunk->icm_addr,
441 new_htbl->chunk->num_of_entries);
443 ste_to_update = &prev_htbl->ste_arr[0];
445 mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
447 ste_to_update = cur_htbl->pointing_ste;
450 mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
451 0, ste_to_update->hw_ste, ste_info,
457 /* Clean all ste_info's from the new table */
458 list_for_each_entry_safe(del_ste_info, tmp_ste_info,
459 &rehash_table_send_list, send_list) {
460 list_del(&del_ste_info->send_list);
465 mlx5dr_ste_htbl_free(new_htbl);
468 mlx5dr_info(dmn, "Failed creating rehash table\n");
472 static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
473 struct mlx5dr_rule_rx_tx *nic_rule,
474 struct mlx5dr_ste_htbl *cur_htbl,
476 struct list_head *update_list)
478 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
479 enum mlx5dr_icm_chunk_size new_size;
481 new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk_size);
482 new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
484 if (new_size == cur_htbl->chunk_size)
485 return NULL; /* Skip rehash, we already at the max size */
487 return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
488 update_list, new_size);
491 static struct mlx5dr_ste *
492 dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
493 struct mlx5dr_matcher_rx_tx *nic_matcher,
494 struct mlx5dr_ste *ste,
496 struct list_head *miss_list,
497 struct list_head *send_list)
499 struct mlx5dr_ste_send_info *ste_info;
500 struct mlx5dr_ste *new_ste;
502 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
506 new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
510 if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
511 mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
515 mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
516 ste_info, send_list, false);
518 ste->htbl->ctrl.num_of_collisions++;
519 ste->htbl->ctrl.num_of_valid_entries++;
524 mlx5dr_ste_free(new_ste, matcher, nic_matcher);
530 static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
532 struct mlx5dr_rule_action_member *action_mem;
533 struct mlx5dr_rule_action_member *tmp;
535 list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
536 list_del(&action_mem->list);
537 refcount_dec(&action_mem->action->refcount);
542 static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
544 struct mlx5dr_action *actions[])
546 struct mlx5dr_rule_action_member *action_mem;
549 for (i = 0; i < num_actions; i++) {
550 action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
552 goto free_action_members;
554 action_mem->action = actions[i];
555 INIT_LIST_HEAD(&action_mem->list);
556 list_add_tail(&action_mem->list, &rule->rule_actions_list);
557 refcount_inc(&action_mem->action->refcount);
563 dr_rule_remove_action_members(rule);
567 /* While the pointer of ste is no longer valid, like while moving ste to be
568 * the first in the miss_list, and to be in the origin table,
569 * all rule-members that are attached to this ste should update their ste member
572 void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste,
573 struct mlx5dr_ste *new_ste)
575 struct mlx5dr_rule_member *rule_mem;
577 if (!list_empty(&ste->rule_list))
578 list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list)
579 rule_mem->ste = new_ste;
582 static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
583 struct mlx5dr_rule_rx_tx *nic_rule)
585 struct mlx5dr_rule_member *rule_mem;
586 struct mlx5dr_rule_member *tmp_mem;
588 if (list_empty(&nic_rule->rule_members_list))
590 list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) {
591 list_del(&rule_mem->list);
592 list_del(&rule_mem->use_ste_list);
593 mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher);
598 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
599 struct mlx5dr_domain *dmn,
600 struct mlx5dr_domain_rx_tx *nic_dmn)
602 struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
604 if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size)
610 if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
611 (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
617 static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule,
618 struct mlx5dr_ste *ste)
620 struct mlx5dr_rule_member *rule_mem;
622 rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL);
627 list_add_tail(&rule_mem->list, &nic_rule->rule_members_list);
629 list_add_tail(&rule_mem->use_ste_list, &ste->rule_list);
634 static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
635 struct mlx5dr_rule_rx_tx *nic_rule,
636 struct list_head *send_ste_list,
637 struct mlx5dr_ste *last_ste,
639 u32 new_hw_ste_arr_sz)
641 struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
642 struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
643 u8 num_of_builders = nic_matcher->num_of_builders;
644 struct mlx5dr_matcher *matcher = rule->matcher;
645 u8 *curr_hw_ste, *prev_hw_ste;
646 struct mlx5dr_ste *action_ste;
650 * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
651 * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
652 * to support the action.
654 if (num_of_builders == new_hw_ste_arr_sz)
657 for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
658 curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
659 prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
660 action_ste = dr_rule_create_collision_htbl(matcher,
666 mlx5dr_ste_get(action_ste);
668 /* While free ste we go over the miss list, so add this ste to the list */
669 list_add_tail(&action_ste->miss_list_node,
670 mlx5dr_ste_get_miss_list(action_ste));
672 ste_info_arr[k] = kzalloc(sizeof(*ste_info_arr[k]),
674 if (!ste_info_arr[k])
677 /* Point current ste to the new action */
678 mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
679 ret = dr_rule_add_member(nic_rule, action_ste);
681 mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
684 mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
687 send_ste_list, false);
693 kfree(ste_info_arr[k]);
695 mlx5dr_ste_put(action_ste, matcher, nic_matcher);
699 static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
700 struct mlx5dr_matcher_rx_tx *nic_matcher,
701 struct mlx5dr_ste_htbl *cur_htbl,
702 struct mlx5dr_ste *ste,
705 struct list_head *miss_list,
706 struct list_head *send_list)
708 struct mlx5dr_ste_send_info *ste_info;
710 /* Take ref on table, only on first time this ste is used */
711 mlx5dr_htbl_get(cur_htbl);
713 /* new entry -> new branch */
714 list_add_tail(&ste->miss_list_node, miss_list);
716 mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
718 ste->ste_chain_location = ste_location;
720 ste_info = kzalloc(sizeof(*ste_info), GFP_KERNEL);
722 goto clean_ste_setting;
724 if (mlx5dr_ste_create_next_htbl(matcher,
729 mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
733 cur_htbl->ctrl.num_of_valid_entries++;
735 mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
736 ste_info, send_list, false);
743 list_del_init(&ste->miss_list_node);
744 mlx5dr_htbl_put(cur_htbl);
749 static struct mlx5dr_ste *
750 dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
751 struct mlx5dr_rule_rx_tx *nic_rule,
752 struct list_head *send_ste_list,
753 struct mlx5dr_ste_htbl *cur_htbl,
756 struct mlx5dr_ste_htbl **put_htbl)
758 struct mlx5dr_matcher *matcher = rule->matcher;
759 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
760 struct mlx5dr_matcher_rx_tx *nic_matcher;
761 struct mlx5dr_domain_rx_tx *nic_dmn;
762 struct mlx5dr_ste_htbl *new_htbl;
763 struct mlx5dr_ste *matched_ste;
764 struct list_head *miss_list;
765 bool skip_rehash = false;
766 struct mlx5dr_ste *ste;
769 nic_matcher = nic_rule->nic_matcher;
770 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
773 index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
774 miss_list = &cur_htbl->chunk->miss_list[index];
775 ste = &cur_htbl->ste_arr[index];
777 if (mlx5dr_ste_not_used_ste(ste)) {
778 if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
784 /* Hash table index in use, check if this ste is in the miss list */
785 matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
787 /* If it is last STE in the chain, and has the same tag
788 * it means that all the previous stes are the same,
789 * if so, this rule is duplicated.
791 if (mlx5dr_ste_is_last_in_rule(nic_matcher,
792 matched_ste->ste_chain_location)) {
793 mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
799 if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
800 /* Hash table index in use, try to resize of the hash */
803 /* Hold the table till we update.
804 * Release in dr_rule_create_rule()
806 *put_htbl = cur_htbl;
807 mlx5dr_htbl_get(cur_htbl);
809 new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
810 ste_location, send_ste_list);
812 mlx5dr_htbl_put(cur_htbl);
813 mlx5dr_info(dmn, "failed creating rehash table, htbl-log_size: %d\n",
814 cur_htbl->chunk_size);
820 /* Hash table index in use, add another collision (miss) */
821 ste = dr_rule_handle_collision(matcher,
828 mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
837 static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
838 u32 s_idx, u32 e_idx)
842 for (i = s_idx; i < e_idx; i++) {
843 if (value[i] & ~mask[i]) {
844 pr_info("Rule parameters contains a value not specified by mask\n");
851 static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
852 struct mlx5dr_match_parameters *value,
853 struct mlx5dr_match_param *param)
855 u8 match_criteria = matcher->match_criteria;
856 size_t value_size = value->match_sz;
857 u8 *mask_p = (u8 *)&matcher->mask;
858 u8 *param_p = (u8 *)param;
862 (value_size > sizeof(struct mlx5dr_match_param) ||
863 (value_size % sizeof(u32)))) {
864 mlx5dr_dbg(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
868 mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
870 if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
871 s_idx = offsetof(struct mlx5dr_match_param, outer);
872 e_idx = min(s_idx + sizeof(param->outer), value_size);
874 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
875 mlx5dr_dbg(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
880 if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
881 s_idx = offsetof(struct mlx5dr_match_param, misc);
882 e_idx = min(s_idx + sizeof(param->misc), value_size);
884 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
885 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
890 if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
891 s_idx = offsetof(struct mlx5dr_match_param, inner);
892 e_idx = min(s_idx + sizeof(param->inner), value_size);
894 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
895 mlx5dr_dbg(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
900 if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
901 s_idx = offsetof(struct mlx5dr_match_param, misc2);
902 e_idx = min(s_idx + sizeof(param->misc2), value_size);
904 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
905 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
910 if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
911 s_idx = offsetof(struct mlx5dr_match_param, misc3);
912 e_idx = min(s_idx + sizeof(param->misc3), value_size);
914 if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
915 mlx5dr_dbg(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
922 static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
923 struct mlx5dr_rule_rx_tx *nic_rule)
925 dr_rule_clean_rule_members(rule, nic_rule);
929 static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
931 dr_rule_destroy_rule_nic(rule, &rule->rx);
932 dr_rule_destroy_rule_nic(rule, &rule->tx);
936 static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
938 struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
941 case MLX5DR_DOMAIN_TYPE_NIC_RX:
942 dr_rule_destroy_rule_nic(rule, &rule->rx);
944 case MLX5DR_DOMAIN_TYPE_NIC_TX:
945 dr_rule_destroy_rule_nic(rule, &rule->tx);
947 case MLX5DR_DOMAIN_TYPE_FDB:
948 dr_rule_destroy_rule_fdb(rule);
954 dr_rule_remove_action_members(rule);
959 static bool dr_rule_is_ipv6(struct mlx5dr_match_param *param)
961 return (param->outer.ip_version == 6 ||
962 param->inner.ip_version == 6 ||
963 param->outer.ethertype == ETH_P_IPV6 ||
964 param->inner.ethertype == ETH_P_IPV6);
967 static bool dr_rule_skip(enum mlx5dr_domain_type domain,
968 enum mlx5dr_ste_entry_type ste_type,
969 struct mlx5dr_match_param *mask,
970 struct mlx5dr_match_param *value)
972 if (domain != MLX5DR_DOMAIN_TYPE_FDB)
975 if (mask->misc.source_port) {
976 if (ste_type == MLX5DR_STE_TYPE_RX)
977 if (value->misc.source_port != WIRE_PORT)
980 if (ste_type == MLX5DR_STE_TYPE_TX)
981 if (value->misc.source_port == WIRE_PORT)
985 /* Metadata C can be used to describe the source vport */
986 if (mask->misc2.metadata_reg_c_0) {
987 if (ste_type == MLX5DR_STE_TYPE_RX)
988 if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
991 if (ste_type == MLX5DR_STE_TYPE_TX)
992 if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
999 dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1000 struct mlx5dr_rule_rx_tx *nic_rule,
1001 struct mlx5dr_match_param *param,
1003 struct mlx5dr_action *actions[])
1005 struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
1006 struct mlx5dr_matcher *matcher = rule->matcher;
1007 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1008 struct mlx5dr_matcher_rx_tx *nic_matcher;
1009 struct mlx5dr_domain_rx_tx *nic_dmn;
1010 struct mlx5dr_ste_htbl *htbl = NULL;
1011 struct mlx5dr_ste_htbl *cur_htbl;
1012 struct mlx5dr_ste *ste = NULL;
1013 LIST_HEAD(send_ste_list);
1014 u8 *hw_ste_arr = NULL;
1015 u32 new_hw_ste_arr_sz;
1018 nic_matcher = nic_rule->nic_matcher;
1019 nic_dmn = nic_matcher->nic_tbl->nic_dmn;
1021 INIT_LIST_HEAD(&nic_rule->rule_members_list);
1023 if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
1026 ret = mlx5dr_matcher_select_builders(matcher,
1028 dr_rule_is_ipv6(param));
1032 hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
1038 /* Set the tag values inside the ste array */
1039 ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
1043 /* Set the actions values/addresses inside the ste array */
1044 ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
1045 num_actions, hw_ste_arr,
1046 &new_hw_ste_arr_sz);
1050 cur_htbl = nic_matcher->s_htbl;
1052 /* Go over the array of STEs, and build dr_ste accordingly.
1053 * The loop is over only the builders which are equal or less to the
1054 * number of stes, in case we have actions that lives in other stes.
1056 for (i = 0; i < nic_matcher->num_of_builders; i++) {
1057 /* Calculate CRC and keep new ste entry */
1058 u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
1060 ste = dr_rule_handle_ste_branch(rule,
1068 mlx5dr_err(dmn, "Failed creating next branch\n");
1073 cur_htbl = ste->next_htbl;
1075 /* Keep all STEs in the rule struct */
1076 ret = dr_rule_add_member(nic_rule, ste);
1078 mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i);
1082 mlx5dr_ste_get(ste);
1085 /* Connect actions */
1086 ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
1087 ste, hw_ste_arr, new_hw_ste_arr_sz);
1089 mlx5dr_dbg(dmn, "Failed apply actions\n");
1092 ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
1094 mlx5dr_err(dmn, "Failed sending ste!\n");
1099 mlx5dr_htbl_put(htbl);
1104 mlx5dr_ste_put(ste, matcher, nic_matcher);
1106 dr_rule_clean_rule_members(rule, nic_rule);
1107 /* Clean all ste_info's */
1108 list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
1109 list_del(&ste_info->send_list);
1119 dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
1120 struct mlx5dr_match_param *param,
1122 struct mlx5dr_action *actions[])
1124 struct mlx5dr_match_param copy_param = {};
1127 /* Copy match_param since they will be consumed during the first
1128 * nic_rule insertion.
1130 memcpy(©_param, param, sizeof(struct mlx5dr_match_param));
1132 ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
1133 num_actions, actions);
1137 ret = dr_rule_create_rule_nic(rule, &rule->tx, ©_param,
1138 num_actions, actions);
1140 goto destroy_rule_nic_rx;
1144 destroy_rule_nic_rx:
1145 dr_rule_destroy_rule_nic(rule, &rule->rx);
1149 static struct mlx5dr_rule *
1150 dr_rule_create_rule(struct mlx5dr_matcher *matcher,
1151 struct mlx5dr_match_parameters *value,
1153 struct mlx5dr_action *actions[])
1155 struct mlx5dr_domain *dmn = matcher->tbl->dmn;
1156 struct mlx5dr_match_param param = {};
1157 struct mlx5dr_rule *rule;
1160 if (!dr_rule_verify(matcher, value, ¶m))
1163 rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1167 rule->matcher = matcher;
1168 INIT_LIST_HEAD(&rule->rule_actions_list);
1170 ret = dr_rule_add_action_members(rule, num_actions, actions);
1174 switch (dmn->type) {
1175 case MLX5DR_DOMAIN_TYPE_NIC_RX:
1176 rule->rx.nic_matcher = &matcher->rx;
1177 ret = dr_rule_create_rule_nic(rule, &rule->rx, ¶m,
1178 num_actions, actions);
1180 case MLX5DR_DOMAIN_TYPE_NIC_TX:
1181 rule->tx.nic_matcher = &matcher->tx;
1182 ret = dr_rule_create_rule_nic(rule, &rule->tx, ¶m,
1183 num_actions, actions);
1185 case MLX5DR_DOMAIN_TYPE_FDB:
1186 rule->rx.nic_matcher = &matcher->rx;
1187 rule->tx.nic_matcher = &matcher->tx;
1188 ret = dr_rule_create_rule_fdb(rule, ¶m,
1189 num_actions, actions);
1197 goto remove_action_members;
1201 remove_action_members:
1202 dr_rule_remove_action_members(rule);
1205 mlx5dr_info(dmn, "Failed creating rule\n");
1209 struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
1210 struct mlx5dr_match_parameters *value,
1212 struct mlx5dr_action *actions[])
1214 struct mlx5dr_rule *rule;
1216 mutex_lock(&matcher->tbl->dmn->mutex);
1217 refcount_inc(&matcher->refcount);
1219 rule = dr_rule_create_rule(matcher, value, num_actions, actions);
1221 refcount_dec(&matcher->refcount);
1223 mutex_unlock(&matcher->tbl->dmn->mutex);
1228 int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
1230 struct mlx5dr_matcher *matcher = rule->matcher;
1231 struct mlx5dr_table *tbl = rule->matcher->tbl;
1234 mutex_lock(&tbl->dmn->mutex);
1236 ret = dr_rule_destroy_rule(rule);
1238 mutex_unlock(&tbl->dmn->mutex);
1241 refcount_dec(&matcher->refcount);