mlxsw: spectrum: Add tos to the ipv4 acl block
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/bitops.h>
39 #include <linux/list.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <linux/parman.h>
43
44 #include "reg.h"
45 #include "core.h"
46 #include "resources.h"
47 #include "spectrum.h"
48 #include "core_acl_flex_keys.h"
49
50 struct mlxsw_sp_acl_tcam {
51         unsigned long *used_regions; /* bit array */
52         unsigned int max_regions;
53         unsigned long *used_groups;  /* bit array */
54         unsigned int max_groups;
55         unsigned int max_group_size;
56 };
57
58 static int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
59 {
60         struct mlxsw_sp_acl_tcam *tcam = priv;
61         u64 max_tcam_regions;
62         u64 max_regions;
63         u64 max_groups;
64         size_t alloc_size;
65         int err;
66
67         max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
68                                               ACL_MAX_TCAM_REGIONS);
69         max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
70
71         /* Use 1:1 mapping between ACL region and TCAM region */
72         if (max_tcam_regions < max_regions)
73                 max_regions = max_tcam_regions;
74
75         alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
76         tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
77         if (!tcam->used_regions)
78                 return -ENOMEM;
79         tcam->max_regions = max_regions;
80
81         max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
82         alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
83         tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
84         if (!tcam->used_groups) {
85                 err = -ENOMEM;
86                 goto err_alloc_used_groups;
87         }
88         tcam->max_groups = max_groups;
89         tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
90                                                  ACL_MAX_GROUP_SIZE);
91         return 0;
92
93 err_alloc_used_groups:
94         kfree(tcam->used_regions);
95         return err;
96 }
97
98 static void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
99 {
100         struct mlxsw_sp_acl_tcam *tcam = priv;
101
102         kfree(tcam->used_groups);
103         kfree(tcam->used_regions);
104 }
105
106 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
107                                            u16 *p_id)
108 {
109         u16 id;
110
111         id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
112         if (id < tcam->max_regions) {
113                 __set_bit(id, tcam->used_regions);
114                 *p_id = id;
115                 return 0;
116         }
117         return -ENOBUFS;
118 }
119
120 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
121                                             u16 id)
122 {
123         __clear_bit(id, tcam->used_regions);
124 }
125
126 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
127                                           u16 *p_id)
128 {
129         u16 id;
130
131         id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
132         if (id < tcam->max_groups) {
133                 __set_bit(id, tcam->used_groups);
134                 *p_id = id;
135                 return 0;
136         }
137         return -ENOBUFS;
138 }
139
140 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
141                                            u16 id)
142 {
143         __clear_bit(id, tcam->used_groups);
144 }
145
146 struct mlxsw_sp_acl_tcam_pattern {
147         const enum mlxsw_afk_element *elements;
148         unsigned int elements_count;
149 };
150
151 struct mlxsw_sp_acl_tcam_group {
152         struct mlxsw_sp_acl_tcam *tcam;
153         u16 id;
154         struct list_head region_list;
155         unsigned int region_count;
156         struct rhashtable chunk_ht;
157         struct {
158                 u16 local_port;
159                 bool ingress;
160         } bound;
161         struct mlxsw_sp_acl_tcam_group_ops *ops;
162         const struct mlxsw_sp_acl_tcam_pattern *patterns;
163         unsigned int patterns_count;
164 };
165
166 struct mlxsw_sp_acl_tcam_region {
167         struct list_head list; /* Member of a TCAM group */
168         struct list_head chunk_list; /* List of chunks under this region */
169         struct parman *parman;
170         struct mlxsw_sp *mlxsw_sp;
171         struct mlxsw_sp_acl_tcam_group *group;
172         u16 id; /* ACL ID and region ID - they are same */
173         char tcam_region_info[MLXSW_REG_PXXX_TCAM_REGION_INFO_LEN];
174         struct mlxsw_afk_key_info *key_info;
175         struct {
176                 struct parman_prio parman_prio;
177                 struct parman_item parman_item;
178                 struct mlxsw_sp_acl_rule_info *rulei;
179         } catchall;
180 };
181
182 struct mlxsw_sp_acl_tcam_chunk {
183         struct list_head list; /* Member of a TCAM region */
184         struct rhash_head ht_node; /* Member of a chunk HT */
185         unsigned int priority; /* Priority within the region and group */
186         struct parman_prio parman_prio;
187         struct mlxsw_sp_acl_tcam_group *group;
188         struct mlxsw_sp_acl_tcam_region *region;
189         unsigned int ref_count;
190 };
191
192 struct mlxsw_sp_acl_tcam_entry {
193         struct parman_item parman_item;
194         struct mlxsw_sp_acl_tcam_chunk *chunk;
195 };
196
197 static const struct rhashtable_params mlxsw_sp_acl_tcam_chunk_ht_params = {
198         .key_len = sizeof(unsigned int),
199         .key_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, priority),
200         .head_offset = offsetof(struct mlxsw_sp_acl_tcam_chunk, ht_node),
201         .automatic_shrinking = true,
202 };
203
204 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
205                                           struct mlxsw_sp_acl_tcam_group *group)
206 {
207         struct mlxsw_sp_acl_tcam_region *region;
208         char pagt_pl[MLXSW_REG_PAGT_LEN];
209         int acl_index = 0;
210
211         mlxsw_reg_pagt_pack(pagt_pl, group->id);
212         list_for_each_entry(region, &group->region_list, list)
213                 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++, region->id);
214         mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
215         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
216 }
217
218 static int
219 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp *mlxsw_sp,
220                             struct mlxsw_sp_acl_tcam *tcam,
221                             struct mlxsw_sp_acl_tcam_group *group,
222                             const struct mlxsw_sp_acl_tcam_pattern *patterns,
223                             unsigned int patterns_count)
224 {
225         int err;
226
227         group->tcam = tcam;
228         group->patterns = patterns;
229         group->patterns_count = patterns_count;
230         INIT_LIST_HEAD(&group->region_list);
231         err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
232         if (err)
233                 return err;
234
235         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
236         if (err)
237                 goto err_group_update;
238
239         err = rhashtable_init(&group->chunk_ht,
240                               &mlxsw_sp_acl_tcam_chunk_ht_params);
241         if (err)
242                 goto err_rhashtable_init;
243
244         return 0;
245
246 err_rhashtable_init:
247 err_group_update:
248         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
249         return err;
250 }
251
252 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp *mlxsw_sp,
253                                         struct mlxsw_sp_acl_tcam_group *group)
254 {
255         struct mlxsw_sp_acl_tcam *tcam = group->tcam;
256
257         rhashtable_destroy(&group->chunk_ht);
258         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
259         WARN_ON(!list_empty(&group->region_list));
260 }
261
262 static int
263 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
264                              struct mlxsw_sp_acl_tcam_group *group,
265                              struct net_device *dev, bool ingress)
266 {
267         struct mlxsw_sp_port *mlxsw_sp_port;
268         char ppbt_pl[MLXSW_REG_PPBT_LEN];
269
270         if (!mlxsw_sp_port_dev_check(dev))
271                 return -EINVAL;
272
273         mlxsw_sp_port = netdev_priv(dev);
274         group->bound.local_port = mlxsw_sp_port->local_port;
275         group->bound.ingress = ingress;
276         mlxsw_reg_ppbt_pack(ppbt_pl,
277                             group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
278                                                    MLXSW_REG_PXBT_E_EACL,
279                             MLXSW_REG_PXBT_OP_BIND, group->bound.local_port,
280                             group->id);
281         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
282 }
283
284 static void
285 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
286                                struct mlxsw_sp_acl_tcam_group *group)
287 {
288         char ppbt_pl[MLXSW_REG_PPBT_LEN];
289
290         mlxsw_reg_ppbt_pack(ppbt_pl,
291                             group->bound.ingress ? MLXSW_REG_PXBT_E_IACL :
292                                                    MLXSW_REG_PXBT_E_EACL,
293                             MLXSW_REG_PXBT_OP_UNBIND, group->bound.local_port,
294                             group->id);
295         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
296 }
297
298 static unsigned int
299 mlxsw_sp_acl_tcam_region_prio(struct mlxsw_sp_acl_tcam_region *region)
300 {
301         struct mlxsw_sp_acl_tcam_chunk *chunk;
302
303         if (list_empty(&region->chunk_list))
304                 return 0;
305         /* As a priority of a region, return priority of the first chunk */
306         chunk = list_first_entry(&region->chunk_list, typeof(*chunk), list);
307         return chunk->priority;
308 }
309
310 static unsigned int
311 mlxsw_sp_acl_tcam_region_max_prio(struct mlxsw_sp_acl_tcam_region *region)
312 {
313         struct mlxsw_sp_acl_tcam_chunk *chunk;
314
315         if (list_empty(&region->chunk_list))
316                 return 0;
317         chunk = list_last_entry(&region->chunk_list, typeof(*chunk), list);
318         return chunk->priority;
319 }
320
321 static void
322 mlxsw_sp_acl_tcam_group_list_add(struct mlxsw_sp_acl_tcam_group *group,
323                                  struct mlxsw_sp_acl_tcam_region *region)
324 {
325         struct mlxsw_sp_acl_tcam_region *region2;
326         struct list_head *pos;
327
328         /* Position the region inside the list according to priority */
329         list_for_each(pos, &group->region_list) {
330                 region2 = list_entry(pos, typeof(*region2), list);
331                 if (mlxsw_sp_acl_tcam_region_prio(region2) >
332                     mlxsw_sp_acl_tcam_region_prio(region))
333                         break;
334         }
335         list_add_tail(&region->list, pos);
336         group->region_count++;
337 }
338
339 static void
340 mlxsw_sp_acl_tcam_group_list_del(struct mlxsw_sp_acl_tcam_group *group,
341                                  struct mlxsw_sp_acl_tcam_region *region)
342 {
343         group->region_count--;
344         list_del(&region->list);
345 }
346
347 static int
348 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
349                                       struct mlxsw_sp_acl_tcam_group *group,
350                                       struct mlxsw_sp_acl_tcam_region *region)
351 {
352         int err;
353
354         if (group->region_count == group->tcam->max_group_size)
355                 return -ENOBUFS;
356
357         mlxsw_sp_acl_tcam_group_list_add(group, region);
358
359         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
360         if (err)
361                 goto err_group_update;
362         region->group = group;
363
364         return 0;
365
366 err_group_update:
367         mlxsw_sp_acl_tcam_group_list_del(group, region);
368         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
369         return err;
370 }
371
372 static void
373 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
374                                       struct mlxsw_sp_acl_tcam_region *region)
375 {
376         struct mlxsw_sp_acl_tcam_group *group = region->group;
377
378         mlxsw_sp_acl_tcam_group_list_del(group, region);
379         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
380 }
381
382 static struct mlxsw_sp_acl_tcam_region *
383 mlxsw_sp_acl_tcam_group_region_find(struct mlxsw_sp_acl_tcam_group *group,
384                                     unsigned int priority,
385                                     struct mlxsw_afk_element_usage *elusage,
386                                     bool *p_need_split)
387 {
388         struct mlxsw_sp_acl_tcam_region *region, *region2;
389         struct list_head *pos;
390         bool issubset;
391
392         list_for_each(pos, &group->region_list) {
393                 region = list_entry(pos, typeof(*region), list);
394
395                 /* First, check if the requested priority does not rather belong
396                  * under some of the next regions.
397                  */
398                 if (pos->next != &group->region_list) { /* not last */
399                         region2 = list_entry(pos->next, typeof(*region2), list);
400                         if (priority >= mlxsw_sp_acl_tcam_region_prio(region2))
401                                 continue;
402                 }
403
404                 issubset = mlxsw_afk_key_info_subset(region->key_info, elusage);
405
406                 /* If requested element usage would not fit and the priority
407                  * is lower than the currently inspected region we cannot
408                  * use this region, so return NULL to indicate new region has
409                  * to be created.
410                  */
411                 if (!issubset &&
412                     priority < mlxsw_sp_acl_tcam_region_prio(region))
413                         return NULL;
414
415                 /* If requested element usage would not fit and the priority
416                  * is higher than the currently inspected region we cannot
417                  * use this region. There is still some hope that the next
418                  * region would be the fit. So let it be processed and
419                  * eventually break at the check right above this.
420                  */
421                 if (!issubset &&
422                     priority > mlxsw_sp_acl_tcam_region_max_prio(region))
423                         continue;
424
425                 /* Indicate if the region needs to be split in order to add
426                  * the requested priority. Split is needed when requested
427                  * element usage won't fit into the found region.
428                  */
429                 *p_need_split = !issubset;
430                 return region;
431         }
432         return NULL; /* New region has to be created. */
433 }
434
435 static void
436 mlxsw_sp_acl_tcam_group_use_patterns(struct mlxsw_sp_acl_tcam_group *group,
437                                      struct mlxsw_afk_element_usage *elusage,
438                                      struct mlxsw_afk_element_usage *out)
439 {
440         const struct mlxsw_sp_acl_tcam_pattern *pattern;
441         int i;
442
443         for (i = 0; i < group->patterns_count; i++) {
444                 pattern = &group->patterns[i];
445                 mlxsw_afk_element_usage_fill(out, pattern->elements,
446                                              pattern->elements_count);
447                 if (mlxsw_afk_element_usage_subset(elusage, out))
448                         return;
449         }
450         memcpy(out, elusage, sizeof(*out));
451 }
452
453 #define MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT 16
454 #define MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP 16
455
456 static int
457 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
458                                struct mlxsw_sp_acl_tcam_region *region)
459 {
460         struct mlxsw_afk_key_info *key_info = region->key_info;
461         char ptar_pl[MLXSW_REG_PTAR_LEN];
462         unsigned int encodings_count;
463         int i;
464         int err;
465
466         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
467                             MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
468                             region->id, region->tcam_region_info);
469         encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
470         for (i = 0; i < encodings_count; i++) {
471                 u16 encoding;
472
473                 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
474                 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
475         }
476         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
477         if (err)
478                 return err;
479         mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
480         return 0;
481 }
482
483 static void
484 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
485                               struct mlxsw_sp_acl_tcam_region *region)
486 {
487         char ptar_pl[MLXSW_REG_PTAR_LEN];
488
489         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE, 0, region->id,
490                             region->tcam_region_info);
491         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
492 }
493
494 static int
495 mlxsw_sp_acl_tcam_region_resize(struct mlxsw_sp *mlxsw_sp,
496                                 struct mlxsw_sp_acl_tcam_region *region,
497                                 u16 new_size)
498 {
499         char ptar_pl[MLXSW_REG_PTAR_LEN];
500
501         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_RESIZE,
502                             new_size, region->id, region->tcam_region_info);
503         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
504 }
505
506 static int
507 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
508                                 struct mlxsw_sp_acl_tcam_region *region)
509 {
510         char pacl_pl[MLXSW_REG_PACL_LEN];
511
512         mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
513                             region->tcam_region_info);
514         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
515 }
516
517 static void
518 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
519                                  struct mlxsw_sp_acl_tcam_region *region)
520 {
521         char pacl_pl[MLXSW_REG_PACL_LEN];
522
523         mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
524                             region->tcam_region_info);
525         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
526 }
527
528 static int
529 mlxsw_sp_acl_tcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
530                                       struct mlxsw_sp_acl_tcam_region *region,
531                                       unsigned int offset,
532                                       struct mlxsw_sp_acl_rule_info *rulei)
533 {
534         char ptce2_pl[MLXSW_REG_PTCE2_LEN];
535         char *act_set;
536         char *mask;
537         char *key;
538
539         mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
540                              region->tcam_region_info, offset);
541         key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
542         mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
543         mlxsw_afk_encode(region->key_info, &rulei->values, key, mask);
544
545         /* Only the first action set belongs here, the rest is in KVD */
546         act_set = mlxsw_afa_block_first_set(rulei->act_block);
547         mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
548
549         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
550 }
551
552 static void
553 mlxsw_sp_acl_tcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
554                                       struct mlxsw_sp_acl_tcam_region *region,
555                                       unsigned int offset)
556 {
557         char ptce2_pl[MLXSW_REG_PTCE2_LEN];
558
559         mlxsw_reg_ptce2_pack(ptce2_pl, false, MLXSW_REG_PTCE2_OP_WRITE_WRITE,
560                              region->tcam_region_info, offset);
561         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
562 }
563
564 static int
565 mlxsw_sp_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
566                                             struct mlxsw_sp_acl_tcam_region *region,
567                                             unsigned int offset,
568                                             bool *activity)
569 {
570         char ptce2_pl[MLXSW_REG_PTCE2_LEN];
571         int err;
572
573         mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_QUERY_CLEAR_ON_READ,
574                              region->tcam_region_info, offset);
575         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
576         if (err)
577                 return err;
578         *activity = mlxsw_reg_ptce2_a_get(ptce2_pl);
579         return 0;
580 }
581
582 #define MLXSW_SP_ACL_TCAM_CATCHALL_PRIO (~0U)
583
584 static int
585 mlxsw_sp_acl_tcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp,
586                                       struct mlxsw_sp_acl_tcam_region *region)
587 {
588         struct parman_prio *parman_prio = &region->catchall.parman_prio;
589         struct parman_item *parman_item = &region->catchall.parman_item;
590         struct mlxsw_sp_acl_rule_info *rulei;
591         int err;
592
593         parman_prio_init(region->parman, parman_prio,
594                          MLXSW_SP_ACL_TCAM_CATCHALL_PRIO);
595         err = parman_item_add(region->parman, parman_prio, parman_item);
596         if (err)
597                 goto err_parman_item_add;
598
599         rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
600         if (IS_ERR(rulei)) {
601                 err = PTR_ERR(rulei);
602                 goto err_rulei_create;
603         }
604
605         mlxsw_sp_acl_rulei_act_continue(rulei);
606         err = mlxsw_sp_acl_rulei_commit(rulei);
607         if (err)
608                 goto err_rulei_commit;
609
610         err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
611                                                     parman_item->index, rulei);
612         region->catchall.rulei = rulei;
613         if (err)
614                 goto err_rule_insert;
615
616         return 0;
617
618 err_rule_insert:
619 err_rulei_commit:
620         mlxsw_sp_acl_rulei_destroy(rulei);
621 err_rulei_create:
622         parman_item_remove(region->parman, parman_prio, parman_item);
623 err_parman_item_add:
624         parman_prio_fini(parman_prio);
625         return err;
626 }
627
628 static void
629 mlxsw_sp_acl_tcam_region_catchall_del(struct mlxsw_sp *mlxsw_sp,
630                                       struct mlxsw_sp_acl_tcam_region *region)
631 {
632         struct parman_prio *parman_prio = &region->catchall.parman_prio;
633         struct parman_item *parman_item = &region->catchall.parman_item;
634         struct mlxsw_sp_acl_rule_info *rulei = region->catchall.rulei;
635
636         mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
637                                               parman_item->index);
638         mlxsw_sp_acl_rulei_destroy(rulei);
639         parman_item_remove(region->parman, parman_prio, parman_item);
640         parman_prio_fini(parman_prio);
641 }
642
643 static void
644 mlxsw_sp_acl_tcam_region_move(struct mlxsw_sp *mlxsw_sp,
645                               struct mlxsw_sp_acl_tcam_region *region,
646                               u16 src_offset, u16 dst_offset, u16 size)
647 {
648         char prcr_pl[MLXSW_REG_PRCR_LEN];
649
650         mlxsw_reg_prcr_pack(prcr_pl, MLXSW_REG_PRCR_OP_MOVE,
651                             region->tcam_region_info, src_offset,
652                             region->tcam_region_info, dst_offset, size);
653         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(prcr), prcr_pl);
654 }
655
656 static int mlxsw_sp_acl_tcam_region_parman_resize(void *priv,
657                                                   unsigned long new_count)
658 {
659         struct mlxsw_sp_acl_tcam_region *region = priv;
660         struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
661         u64 max_tcam_rules;
662
663         max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
664         if (new_count > max_tcam_rules)
665                 return -EINVAL;
666         return mlxsw_sp_acl_tcam_region_resize(mlxsw_sp, region, new_count);
667 }
668
669 static void mlxsw_sp_acl_tcam_region_parman_move(void *priv,
670                                                  unsigned long from_index,
671                                                  unsigned long to_index,
672                                                  unsigned long count)
673 {
674         struct mlxsw_sp_acl_tcam_region *region = priv;
675         struct mlxsw_sp *mlxsw_sp = region->mlxsw_sp;
676
677         mlxsw_sp_acl_tcam_region_move(mlxsw_sp, region,
678                                       from_index, to_index, count);
679 }
680
681 static const struct parman_ops mlxsw_sp_acl_tcam_region_parman_ops = {
682         .base_count     = MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
683         .resize_step    = MLXSW_SP_ACL_TCAM_REGION_RESIZE_STEP,
684         .resize         = mlxsw_sp_acl_tcam_region_parman_resize,
685         .move           = mlxsw_sp_acl_tcam_region_parman_move,
686         .algo           = PARMAN_ALGO_TYPE_LSORT,
687 };
688
689 static struct mlxsw_sp_acl_tcam_region *
690 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
691                                 struct mlxsw_sp_acl_tcam *tcam,
692                                 struct mlxsw_afk_element_usage *elusage)
693 {
694         struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
695         struct mlxsw_sp_acl_tcam_region *region;
696         int err;
697
698         region = kzalloc(sizeof(*region), GFP_KERNEL);
699         if (!region)
700                 return ERR_PTR(-ENOMEM);
701         INIT_LIST_HEAD(&region->chunk_list);
702         region->mlxsw_sp = mlxsw_sp;
703
704         region->parman = parman_create(&mlxsw_sp_acl_tcam_region_parman_ops,
705                                        region);
706         if (!region->parman) {
707                 err = -ENOMEM;
708                 goto err_parman_create;
709         }
710
711         region->key_info = mlxsw_afk_key_info_get(afk, elusage);
712         if (IS_ERR(region->key_info)) {
713                 err = PTR_ERR(region->key_info);
714                 goto err_key_info_get;
715         }
716
717         err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
718         if (err)
719                 goto err_region_id_get;
720
721         err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
722         if (err)
723                 goto err_tcam_region_alloc;
724
725         err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
726         if (err)
727                 goto err_tcam_region_enable;
728
729         err = mlxsw_sp_acl_tcam_region_catchall_add(mlxsw_sp, region);
730         if (err)
731                 goto err_tcam_region_catchall_add;
732
733         return region;
734
735 err_tcam_region_catchall_add:
736         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
737 err_tcam_region_enable:
738         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
739 err_tcam_region_alloc:
740         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
741 err_region_id_get:
742         mlxsw_afk_key_info_put(region->key_info);
743 err_key_info_get:
744         parman_destroy(region->parman);
745 err_parman_create:
746         kfree(region);
747         return ERR_PTR(err);
748 }
749
750 static void
751 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
752                                  struct mlxsw_sp_acl_tcam_region *region)
753 {
754         mlxsw_sp_acl_tcam_region_catchall_del(mlxsw_sp, region);
755         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
756         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
757         mlxsw_sp_acl_tcam_region_id_put(region->group->tcam, region->id);
758         mlxsw_afk_key_info_put(region->key_info);
759         parman_destroy(region->parman);
760         kfree(region);
761 }
762
763 static int
764 mlxsw_sp_acl_tcam_chunk_assoc(struct mlxsw_sp *mlxsw_sp,
765                               struct mlxsw_sp_acl_tcam_group *group,
766                               unsigned int priority,
767                               struct mlxsw_afk_element_usage *elusage,
768                               struct mlxsw_sp_acl_tcam_chunk *chunk)
769 {
770         struct mlxsw_sp_acl_tcam_region *region;
771         bool region_created = false;
772         bool need_split;
773         int err;
774
775         region = mlxsw_sp_acl_tcam_group_region_find(group, priority, elusage,
776                                                      &need_split);
777         if (region && need_split) {
778                 /* According to priority, the chunk should belong to an
779                  * existing region. However, this chunk needs elements
780                  * that region does not contain. We need to split the existing
781                  * region into two and create a new region for this chunk
782                  * in between. This is not supported now.
783                  */
784                 return -EOPNOTSUPP;
785         }
786         if (!region) {
787                 struct mlxsw_afk_element_usage region_elusage;
788
789                 mlxsw_sp_acl_tcam_group_use_patterns(group, elusage,
790                                                      &region_elusage);
791                 region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, group->tcam,
792                                                          &region_elusage);
793                 if (IS_ERR(region))
794                         return PTR_ERR(region);
795                 region_created = true;
796         }
797
798         chunk->region = region;
799         list_add_tail(&chunk->list, &region->chunk_list);
800
801         if (!region_created)
802                 return 0;
803
804         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, group, region);
805         if (err)
806                 goto err_group_region_attach;
807
808         return 0;
809
810 err_group_region_attach:
811         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
812         return err;
813 }
814
815 static void
816 mlxsw_sp_acl_tcam_chunk_deassoc(struct mlxsw_sp *mlxsw_sp,
817                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
818 {
819         struct mlxsw_sp_acl_tcam_region *region = chunk->region;
820
821         list_del(&chunk->list);
822         if (list_empty(&region->chunk_list)) {
823                 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, region);
824                 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, region);
825         }
826 }
827
828 static struct mlxsw_sp_acl_tcam_chunk *
829 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
830                                struct mlxsw_sp_acl_tcam_group *group,
831                                unsigned int priority,
832                                struct mlxsw_afk_element_usage *elusage)
833 {
834         struct mlxsw_sp_acl_tcam_chunk *chunk;
835         int err;
836
837         if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
838                 return ERR_PTR(-EINVAL);
839
840         chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
841         if (!chunk)
842                 return ERR_PTR(-ENOMEM);
843         chunk->priority = priority;
844         chunk->group = group;
845         chunk->ref_count = 1;
846
847         err = mlxsw_sp_acl_tcam_chunk_assoc(mlxsw_sp, group, priority,
848                                             elusage, chunk);
849         if (err)
850                 goto err_chunk_assoc;
851
852         parman_prio_init(chunk->region->parman, &chunk->parman_prio, priority);
853
854         err = rhashtable_insert_fast(&group->chunk_ht, &chunk->ht_node,
855                                      mlxsw_sp_acl_tcam_chunk_ht_params);
856         if (err)
857                 goto err_rhashtable_insert;
858
859         return chunk;
860
861 err_rhashtable_insert:
862         parman_prio_fini(&chunk->parman_prio);
863         mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
864 err_chunk_assoc:
865         kfree(chunk);
866         return ERR_PTR(err);
867 }
868
869 static void
870 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
871                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
872 {
873         struct mlxsw_sp_acl_tcam_group *group = chunk->group;
874
875         rhashtable_remove_fast(&group->chunk_ht, &chunk->ht_node,
876                                mlxsw_sp_acl_tcam_chunk_ht_params);
877         parman_prio_fini(&chunk->parman_prio);
878         mlxsw_sp_acl_tcam_chunk_deassoc(mlxsw_sp, chunk);
879         kfree(chunk);
880 }
881
882 static struct mlxsw_sp_acl_tcam_chunk *
883 mlxsw_sp_acl_tcam_chunk_get(struct mlxsw_sp *mlxsw_sp,
884                             struct mlxsw_sp_acl_tcam_group *group,
885                             unsigned int priority,
886                             struct mlxsw_afk_element_usage *elusage)
887 {
888         struct mlxsw_sp_acl_tcam_chunk *chunk;
889
890         chunk = rhashtable_lookup_fast(&group->chunk_ht, &priority,
891                                        mlxsw_sp_acl_tcam_chunk_ht_params);
892         if (chunk) {
893                 if (WARN_ON(!mlxsw_afk_key_info_subset(chunk->region->key_info,
894                                                        elusage)))
895                         return ERR_PTR(-EINVAL);
896                 chunk->ref_count++;
897                 return chunk;
898         }
899         return mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, group,
900                                               priority, elusage);
901 }
902
903 static void mlxsw_sp_acl_tcam_chunk_put(struct mlxsw_sp *mlxsw_sp,
904                                         struct mlxsw_sp_acl_tcam_chunk *chunk)
905 {
906         if (--chunk->ref_count)
907                 return;
908         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, chunk);
909 }
910
911 static int mlxsw_sp_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
912                                        struct mlxsw_sp_acl_tcam_group *group,
913                                        struct mlxsw_sp_acl_tcam_entry *entry,
914                                        struct mlxsw_sp_acl_rule_info *rulei)
915 {
916         struct mlxsw_sp_acl_tcam_chunk *chunk;
917         struct mlxsw_sp_acl_tcam_region *region;
918         int err;
919
920         chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, group, rulei->priority,
921                                             &rulei->values.elusage);
922         if (IS_ERR(chunk))
923                 return PTR_ERR(chunk);
924
925         region = chunk->region;
926         err = parman_item_add(region->parman, &chunk->parman_prio,
927                               &entry->parman_item);
928         if (err)
929                 goto err_parman_item_add;
930
931         err = mlxsw_sp_acl_tcam_region_entry_insert(mlxsw_sp, region,
932                                                     entry->parman_item.index,
933                                                     rulei);
934         if (err)
935                 goto err_rule_insert;
936         entry->chunk = chunk;
937
938         return 0;
939
940 err_rule_insert:
941         parman_item_remove(region->parman, &chunk->parman_prio,
942                            &entry->parman_item);
943 err_parman_item_add:
944         mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
945         return err;
946 }
947
948 static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
949                                         struct mlxsw_sp_acl_tcam_entry *entry)
950 {
951         struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
952         struct mlxsw_sp_acl_tcam_region *region = chunk->region;
953
954         mlxsw_sp_acl_tcam_region_entry_remove(mlxsw_sp, region,
955                                               entry->parman_item.index);
956         parman_item_remove(region->parman, &chunk->parman_prio,
957                            &entry->parman_item);
958         mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, chunk);
959 }
960
961 static int
962 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
963                                      struct mlxsw_sp_acl_tcam_entry *entry,
964                                      bool *activity)
965 {
966         struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk;
967         struct mlxsw_sp_acl_tcam_region *region = chunk->region;
968
969         return mlxsw_sp_acl_tcam_region_entry_activity_get(mlxsw_sp, region,
970                                                            entry->parman_item.index,
971                                                            activity);
972 }
973
974 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
975         MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
976         MLXSW_AFK_ELEMENT_DMAC,
977         MLXSW_AFK_ELEMENT_SMAC,
978         MLXSW_AFK_ELEMENT_ETHERTYPE,
979         MLXSW_AFK_ELEMENT_IP_PROTO,
980         MLXSW_AFK_ELEMENT_SRC_IP4,
981         MLXSW_AFK_ELEMENT_DST_IP4,
982         MLXSW_AFK_ELEMENT_DST_L4_PORT,
983         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
984         MLXSW_AFK_ELEMENT_VID,
985         MLXSW_AFK_ELEMENT_PCP,
986         MLXSW_AFK_ELEMENT_TCP_FLAGS,
987         MLXSW_AFK_ELEMENT_IP_TTL_,
988         MLXSW_AFK_ELEMENT_IP_ECN,
989         MLXSW_AFK_ELEMENT_IP_DSCP,
990 };
991
992 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
993         MLXSW_AFK_ELEMENT_ETHERTYPE,
994         MLXSW_AFK_ELEMENT_IP_PROTO,
995         MLXSW_AFK_ELEMENT_SRC_IP6_HI,
996         MLXSW_AFK_ELEMENT_SRC_IP6_LO,
997         MLXSW_AFK_ELEMENT_DST_IP6_HI,
998         MLXSW_AFK_ELEMENT_DST_IP6_LO,
999         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1000         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1001 };
1002
1003 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1004         {
1005                 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1006                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1007         },
1008         {
1009                 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1010                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1011         },
1012 };
1013
1014 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1015         ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1016
1017 struct mlxsw_sp_acl_tcam_flower_ruleset {
1018         struct mlxsw_sp_acl_tcam_group group;
1019 };
1020
1021 struct mlxsw_sp_acl_tcam_flower_rule {
1022         struct mlxsw_sp_acl_tcam_entry entry;
1023 };
1024
1025 static int
1026 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1027                                      void *priv, void *ruleset_priv)
1028 {
1029         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1030         struct mlxsw_sp_acl_tcam *tcam = priv;
1031
1032         return mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group,
1033                                            mlxsw_sp_acl_tcam_patterns,
1034                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT);
1035 }
1036
1037 static void
1038 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1039                                      void *ruleset_priv)
1040 {
1041         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1042
1043         mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group);
1044 }
1045
1046 static int
1047 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1048                                       void *ruleset_priv,
1049                                       struct net_device *dev, bool ingress)
1050 {
1051         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1052
1053         return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->group,
1054                                             dev, ingress);
1055 }
1056
1057 static void
1058 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1059                                         void *ruleset_priv)
1060 {
1061         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1062
1063         mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->group);
1064 }
1065
1066 static int
1067 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1068                                   void *ruleset_priv, void *rule_priv,
1069                                   struct mlxsw_sp_acl_rule_info *rulei)
1070 {
1071         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1072         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1073
1074         return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group,
1075                                            &rule->entry, rulei);
1076 }
1077
1078 static void
1079 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1080 {
1081         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1082
1083         mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry);
1084 }
1085
1086 static int
1087 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1088                                            void *rule_priv, bool *activity)
1089 {
1090         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1091
1092         return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry,
1093                                                     activity);
1094 }
1095
1096 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1097         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1098         .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1099         .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1100         .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1101         .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1102         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1103         .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1104         .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1105         .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1106 };
1107
1108 static const struct mlxsw_sp_acl_profile_ops *
1109 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1110         [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1111 };
1112
1113 static const struct mlxsw_sp_acl_profile_ops *
1114 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1115                               enum mlxsw_sp_acl_profile profile)
1116 {
1117         const struct mlxsw_sp_acl_profile_ops *ops;
1118
1119         if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1120                 return NULL;
1121         ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1122         if (WARN_ON(!ops))
1123                 return NULL;
1124         return ops;
1125 }
1126
1127 const struct mlxsw_sp_acl_ops mlxsw_sp_acl_tcam_ops = {
1128         .priv_size              = sizeof(struct mlxsw_sp_acl_tcam),
1129         .init                   = mlxsw_sp_acl_tcam_init,
1130         .fini                   = mlxsw_sp_acl_tcam_fini,
1131         .profile_ops            = mlxsw_sp_acl_tcam_profile_ops,
1132 };