1 // SPDX-License-Identifier: GPL-2.0
3 * RSS and Classifier helpers for Marvell PPv2 Network Controller
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
11 #include "mvpp2_cls.h"
12 #include "mvpp2_prs.h"
14 #define MVPP2_DEF_FLOW(_type, _id, _opts, _ri, _ri_mask) \
18 .supported_hash_opts = _opts, \
25 static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
28 MVPP22_CLS_HEK_IP4_5T,
29 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
31 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
33 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
34 MVPP22_CLS_HEK_IP4_5T,
35 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
37 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
39 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
40 MVPP22_CLS_HEK_IP4_5T,
41 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
43 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
47 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
48 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
51 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
52 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
53 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
56 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
57 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
58 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
61 /* TCP over IPv4 flows, fragmented, no vlan tag */
62 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
63 MVPP22_CLS_HEK_IP4_2T,
64 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
66 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
68 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
69 MVPP22_CLS_HEK_IP4_2T,
70 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
72 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
74 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
75 MVPP22_CLS_HEK_IP4_2T,
76 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
78 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
80 /* TCP over IPv4 flows, fragmented, with vlan tag */
81 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
82 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
83 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
86 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
87 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
88 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
91 MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
92 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
93 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
96 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
98 MVPP22_CLS_HEK_IP4_5T,
99 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
101 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
103 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
104 MVPP22_CLS_HEK_IP4_5T,
105 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
107 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
109 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
110 MVPP22_CLS_HEK_IP4_5T,
111 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
113 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
117 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
118 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
121 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
122 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
123 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
126 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
127 MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
128 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
133 MVPP22_CLS_HEK_IP4_2T,
134 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
136 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
138 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
139 MVPP22_CLS_HEK_IP4_2T,
140 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
142 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
144 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
145 MVPP22_CLS_HEK_IP4_2T,
146 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
148 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
152 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
153 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
156 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
157 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
158 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
161 MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
162 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
163 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
168 MVPP22_CLS_HEK_IP6_5T,
169 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
171 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
173 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
174 MVPP22_CLS_HEK_IP6_5T,
175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
181 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
182 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
185 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
186 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
187 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
192 MVPP22_CLS_HEK_IP6_2T,
193 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
194 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
195 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
197 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198 MVPP22_CLS_HEK_IP6_2T,
199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
205 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
206 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
210 MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
211 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
212 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
218 MVPP22_CLS_HEK_IP6_5T,
219 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
221 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
223 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
224 MVPP22_CLS_HEK_IP6_5T,
225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
231 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
232 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
235 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
236 MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
237 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
242 MVPP22_CLS_HEK_IP6_2T,
243 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
244 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
245 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
247 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248 MVPP22_CLS_HEK_IP6_2T,
249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
255 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
256 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
260 MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
261 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
262 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
266 /* IPv4 flows, no vlan tag */
267 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
268 MVPP22_CLS_HEK_IP4_2T,
269 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
270 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
271 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
272 MVPP22_CLS_HEK_IP4_2T,
273 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
274 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
275 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
276 MVPP22_CLS_HEK_IP4_2T,
277 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
278 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
280 /* IPv4 flows, with vlan tag */
281 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
282 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
284 MVPP2_PRS_RI_L3_PROTO_MASK),
285 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
286 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
287 MVPP2_PRS_RI_L3_IP4_OPT,
288 MVPP2_PRS_RI_L3_PROTO_MASK),
289 MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
290 MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
291 MVPP2_PRS_RI_L3_IP4_OTHER,
292 MVPP2_PRS_RI_L3_PROTO_MASK),
294 /* IPv6 flows, no vlan tag */
295 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
296 MVPP22_CLS_HEK_IP6_2T,
297 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
298 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
299 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
300 MVPP22_CLS_HEK_IP6_2T,
301 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
302 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
304 /* IPv6 flows, with vlan tag */
305 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
306 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
308 MVPP2_PRS_RI_L3_PROTO_MASK),
309 MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
310 MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
312 MVPP2_PRS_RI_L3_PROTO_MASK),
314 /* Non IP flow, no vlan tag */
315 MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
317 MVPP2_PRS_RI_VLAN_NONE,
318 MVPP2_PRS_RI_VLAN_MASK),
319 /* Non IP flow, with vlan tag */
320 MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
321 MVPP22_CLS_HEK_OPT_VLAN,
325 static void mvpp2_cls_flow_read(struct mvpp2 *priv, int index,
326 struct mvpp2_cls_flow_entry *fe)
329 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, index);
330 fe->data[0] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL0_REG);
331 fe->data[1] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL1_REG);
332 fe->data[2] = mvpp2_read(priv, MVPP2_CLS_FLOW_TBL2_REG);
335 /* Update classification flow table registers */
336 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
337 struct mvpp2_cls_flow_entry *fe)
339 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
340 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
341 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
342 mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
345 /* Update classification lookup table register */
346 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
347 struct mvpp2_cls_lookup_entry *le)
351 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
352 mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
353 mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
356 /* Operations on flow entry */
357 static int mvpp2_cls_flow_hek_num_get(struct mvpp2_cls_flow_entry *fe)
359 return fe->data[1] & MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
362 static void mvpp2_cls_flow_hek_num_set(struct mvpp2_cls_flow_entry *fe,
365 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK;
366 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_N_FIELDS(num_of_fields);
369 static int mvpp2_cls_flow_hek_get(struct mvpp2_cls_flow_entry *fe,
372 return (fe->data[2] >> MVPP2_CLS_FLOW_TBL2_FLD_OFFS(field_index)) &
373 MVPP2_CLS_FLOW_TBL2_FLD_MASK;
376 static void mvpp2_cls_flow_hek_set(struct mvpp2_cls_flow_entry *fe,
377 int field_index, int field_id)
379 fe->data[2] &= ~MVPP2_CLS_FLOW_TBL2_FLD(field_index,
380 MVPP2_CLS_FLOW_TBL2_FLD_MASK);
381 fe->data[2] |= MVPP2_CLS_FLOW_TBL2_FLD(field_index, field_id);
384 static void mvpp2_cls_flow_eng_set(struct mvpp2_cls_flow_entry *fe,
387 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_ENG(MVPP2_CLS_FLOW_TBL0_ENG_MASK);
388 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_ENG(engine);
391 static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
395 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
397 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
400 static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
402 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
403 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
406 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
409 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_LAST;
410 fe->data[0] |= !!is_last;
413 static void mvpp2_cls_flow_pri_set(struct mvpp2_cls_flow_entry *fe, int prio)
415 fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_PRIO(MVPP2_CLS_FLOW_TBL1_PRIO_MASK);
416 fe->data[1] |= MVPP2_CLS_FLOW_TBL1_PRIO(prio);
419 static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
422 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
425 /* Initialize the parser entry for the given flow */
426 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
427 struct mvpp2_cls_flow *flow)
429 mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
430 flow->prs_ri.ri_mask);
433 /* Initialize the Lookup Id table entry for the given flow */
434 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
435 struct mvpp2_cls_flow *flow)
437 struct mvpp2_cls_lookup_entry le;
440 le.lkpid = flow->flow_id;
442 /* The default RxQ for this port is set in the C2 lookup */
445 /* We point on the first lookup in the sequence for the flow, that is
448 le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
450 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
451 le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
453 mvpp2_cls_lookup_write(priv, &le);
456 /* Initialize the flow table entries for the given flow */
457 static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
459 struct mvpp2_cls_flow_entry fe;
463 memset(&fe, 0, sizeof(fe));
464 fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
466 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
467 mvpp2_cls_flow_port_id_sel(&fe, true);
468 mvpp2_cls_flow_last_set(&fe, 0);
469 mvpp2_cls_flow_pri_set(&fe, 0);
470 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
473 for (i = 0; i < MVPP2_MAX_PORTS; i++)
474 mvpp2_cls_flow_port_add(&fe, BIT(i));
476 mvpp2_cls_flow_write(priv, &fe);
479 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
480 memset(&fe, 0, sizeof(fe));
481 fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
483 mvpp2_cls_flow_port_id_sel(&fe, true);
484 mvpp2_cls_flow_pri_set(&fe, i + 1);
485 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
486 mvpp2_cls_flow_port_add(&fe, BIT(i));
488 mvpp2_cls_flow_write(priv, &fe);
491 /* Update the last entry */
492 mvpp2_cls_flow_last_set(&fe, 1);
493 mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
495 mvpp2_cls_flow_write(priv, &fe);
498 /* Adds a field to the Header Extracted Key generation parameters*/
499 static int mvpp2_flow_add_hek_field(struct mvpp2_cls_flow_entry *fe,
502 int nb_fields = mvpp2_cls_flow_hek_num_get(fe);
504 if (nb_fields == MVPP2_FLOW_N_FIELDS)
507 mvpp2_cls_flow_hek_set(fe, nb_fields, field_id);
509 mvpp2_cls_flow_hek_num_set(fe, nb_fields + 1);
514 static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
515 unsigned long hash_opts)
520 /* Clear old fields */
521 mvpp2_cls_flow_hek_num_set(fe, 0);
524 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
526 case MVPP22_CLS_HEK_OPT_VLAN:
527 field_id = MVPP22_CLS_FIELD_VLAN;
529 case MVPP22_CLS_HEK_OPT_IP4SA:
530 field_id = MVPP22_CLS_FIELD_IP4SA;
532 case MVPP22_CLS_HEK_OPT_IP4DA:
533 field_id = MVPP22_CLS_FIELD_IP4DA;
535 case MVPP22_CLS_HEK_OPT_IP6SA:
536 field_id = MVPP22_CLS_FIELD_IP6SA;
538 case MVPP22_CLS_HEK_OPT_IP6DA:
539 field_id = MVPP22_CLS_FIELD_IP6DA;
541 case MVPP22_CLS_HEK_OPT_L4SIP:
542 field_id = MVPP22_CLS_FIELD_L4SIP;
544 case MVPP22_CLS_HEK_OPT_L4DIP:
545 field_id = MVPP22_CLS_FIELD_L4DIP;
550 if (mvpp2_flow_add_hek_field(fe, field_id))
557 static struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
559 if (flow >= MVPP2_N_FLOWS)
562 return &cls_flows[flow];
565 /* Set the hash generation options for the given traffic flow.
566 * One traffic flow (in the ethtool sense) has multiple classification flows,
567 * to handle specific cases such as fragmentation, or the presence of a
570 * Each of these individual flows has different constraints, for example we
571 * can't hash fragmented packets on L4 data (else we would risk having packet
572 * re-ordering), so each classification flows masks the options with their
576 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
579 struct mvpp2_cls_flow_entry fe;
580 struct mvpp2_cls_flow *flow;
581 int i, engine, flow_index;
584 for (i = 0; i < MVPP2_N_FLOWS; i++) {
585 flow = mvpp2_cls_flow_get(i);
589 if (flow->flow_type != flow_type)
592 flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
595 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
597 hash_opts = flow->supported_hash_opts & requested_opts;
599 /* Use C3HB engine to access L4 infos. This adds L4 infos to the
602 if (hash_opts & MVPP22_CLS_HEK_L4_OPTS)
603 engine = MVPP22_CLS_ENGINE_C3HB;
605 engine = MVPP22_CLS_ENGINE_C3HA;
607 if (mvpp2_flow_set_hek_fields(&fe, hash_opts))
610 mvpp2_cls_flow_eng_set(&fe, engine);
612 mvpp2_cls_flow_write(port->priv, &fe);
618 u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
621 int n_fields, i, field;
623 n_fields = mvpp2_cls_flow_hek_num_get(fe);
625 for (i = 0; i < n_fields; i++) {
626 field = mvpp2_cls_flow_hek_get(fe, i);
629 case MVPP22_CLS_FIELD_MAC_DA:
630 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
632 case MVPP22_CLS_FIELD_VLAN:
633 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
635 case MVPP22_CLS_FIELD_L3_PROTO:
636 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
638 case MVPP22_CLS_FIELD_IP4SA:
639 hash_opts |= MVPP22_CLS_HEK_OPT_IP4SA;
641 case MVPP22_CLS_FIELD_IP4DA:
642 hash_opts |= MVPP22_CLS_HEK_OPT_IP4DA;
644 case MVPP22_CLS_FIELD_IP6SA:
645 hash_opts |= MVPP22_CLS_HEK_OPT_IP6SA;
647 case MVPP22_CLS_FIELD_IP6DA:
648 hash_opts |= MVPP22_CLS_HEK_OPT_IP6DA;
650 case MVPP22_CLS_FIELD_L4SIP:
651 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
653 case MVPP22_CLS_FIELD_L4DIP:
654 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
663 /* Returns the hash opts for this flow. There are several classifier flows
664 * for one traffic flow, this returns an aggregation of all configurations.
666 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
668 struct mvpp2_cls_flow_entry fe;
669 struct mvpp2_cls_flow *flow;
673 for (i = 0; i < MVPP2_N_FLOWS; i++) {
674 flow = mvpp2_cls_flow_get(i);
678 if (flow->flow_type != flow_type)
681 flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
684 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
686 hash_opts |= mvpp2_flow_get_hek_fields(&fe);
692 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
694 struct mvpp2_cls_flow *flow;
697 for (i = 0; i < MVPP2_N_FLOWS; i++) {
698 flow = mvpp2_cls_flow_get(i);
702 mvpp2_cls_flow_prs_init(priv, flow);
703 mvpp2_cls_flow_lkp_init(priv, flow);
704 mvpp2_cls_flow_init(priv, flow);
708 static void mvpp2_cls_c2_write(struct mvpp2 *priv,
709 struct mvpp2_cls_c2_entry *c2)
711 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
714 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
715 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
716 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
717 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
718 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
720 mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
722 mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
723 mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
724 mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
725 mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
728 static void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
729 struct mvpp2_cls_c2_entry *c2)
731 mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
735 c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
736 c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
737 c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
738 c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
739 c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
741 c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
743 c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
744 c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
745 c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
746 c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
749 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
751 struct mvpp2_cls_c2_entry c2;
754 memset(&c2, 0, sizeof(c2));
756 c2.index = MVPP22_CLS_C2_RSS_ENTRY(port->id);
758 pmap = BIT(port->id);
759 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
760 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
762 /* Update RSS status after matching this entry */
763 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
765 /* Mark packet as "forwarded to software", needed for RSS */
766 c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
768 /* Configure the default rx queue : Update Queue Low and Queue High, but
769 * don't lock, since the rx queue selection might be overridden by RSS
771 c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD) |
772 MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD);
774 qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
775 ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
777 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
778 MVPP22_CLS_C2_ATTR0_QLOW(ql);
780 mvpp2_cls_c2_write(port->priv, &c2);
783 /* Classifier default initialization */
784 void mvpp2_cls_init(struct mvpp2 *priv)
786 struct mvpp2_cls_lookup_entry le;
787 struct mvpp2_cls_flow_entry fe;
790 /* Enable classifier */
791 mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
793 /* Clear classifier flow table */
794 memset(&fe.data, 0, sizeof(fe.data));
795 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
797 mvpp2_cls_flow_write(priv, &fe);
800 /* Clear classifier lookup table */
802 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
805 mvpp2_cls_lookup_write(priv, &le);
808 mvpp2_cls_lookup_write(priv, &le);
811 mvpp2_cls_port_init_flows(priv);
814 void mvpp2_cls_port_config(struct mvpp2_port *port)
816 struct mvpp2_cls_lookup_entry le;
819 /* Set way for the port */
820 val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
821 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
822 mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
824 /* Pick the entry to be accessed in lookup ID decoding table
825 * according to the way and lkpid.
831 /* Set initial CPU queue for receiving packets */
832 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
833 le.data |= port->first_rxq;
835 /* Disable classification engines */
836 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
838 /* Update lookup ID table entry */
839 mvpp2_cls_lookup_write(port->priv, &le);
841 mvpp2_port_c2_cls_init(port);
844 static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
846 struct mvpp2_cls_c2_entry c2;
848 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
850 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
852 mvpp2_cls_c2_write(port->priv, &c2);
855 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
857 struct mvpp2_cls_c2_entry c2;
859 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
861 c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
863 mvpp2_cls_c2_write(port->priv, &c2);
866 void mvpp22_rss_enable(struct mvpp2_port *port)
868 mvpp2_rss_port_c2_enable(port);
871 void mvpp22_rss_disable(struct mvpp2_port *port)
873 mvpp2_rss_port_c2_disable(port);
876 /* Set CPU queue number for oversize packets */
877 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
881 mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
882 port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
884 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
885 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
887 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
888 val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
889 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
892 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
894 int nrxqs, cpu, cpus = num_possible_cpus();
896 /* Number of RXQs per CPU */
897 nrxqs = port->nrxqs / cpus;
899 /* CPU that will handle this rx queue */
902 if (!cpu_online(cpu))
903 return port->first_rxq;
905 /* Indirection to better distribute the paquets on the CPUs when
906 * configuring the RSS queues.
908 return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
911 void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
913 struct mvpp2 *priv = port->priv;
916 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
917 u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
918 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
919 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
921 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
922 mvpp22_rxfh_indir(port, port->indir[i]));
926 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
930 switch (info->flow_type) {
935 if (info->data & RXH_L4_B_0_1)
936 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
937 if (info->data & RXH_L4_B_2_3)
938 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
942 if (info->data & RXH_L2DA)
943 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
944 if (info->data & RXH_VLAN)
945 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
946 if (info->data & RXH_L3_PROTO)
947 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
948 if (info->data & RXH_IP_SRC)
949 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4SA |
950 MVPP22_CLS_HEK_OPT_IP6SA);
951 if (info->data & RXH_IP_DST)
952 hash_opts |= (MVPP22_CLS_HEK_OPT_IP4DA |
953 MVPP22_CLS_HEK_OPT_IP6DA);
955 default: return -EOPNOTSUPP;
958 return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
961 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
963 unsigned long hash_opts;
966 hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
969 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
971 case MVPP22_CLS_HEK_OPT_MAC_DA:
972 info->data |= RXH_L2DA;
974 case MVPP22_CLS_HEK_OPT_VLAN:
975 info->data |= RXH_VLAN;
977 case MVPP22_CLS_HEK_OPT_L3_PROTO:
978 info->data |= RXH_L3_PROTO;
980 case MVPP22_CLS_HEK_OPT_IP4SA:
981 case MVPP22_CLS_HEK_OPT_IP6SA:
982 info->data |= RXH_IP_SRC;
984 case MVPP22_CLS_HEK_OPT_IP4DA:
985 case MVPP22_CLS_HEK_OPT_IP6DA:
986 info->data |= RXH_IP_DST;
988 case MVPP22_CLS_HEK_OPT_L4SIP:
989 info->data |= RXH_L4_B_0_1;
991 case MVPP22_CLS_HEK_OPT_L4DIP:
992 info->data |= RXH_L4_B_2_3;
1001 void mvpp22_rss_port_init(struct mvpp2_port *port)
1003 struct mvpp2 *priv = port->priv;
1006 /* Set the table width: replace the whole classifier Rx queue number
1007 * with the ones configured in RSS table entries.
1009 mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
1010 mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1012 /* The default RxQ is used as a key to select the RSS table to use.
1013 * We use one RSS table per port.
1015 mvpp2_write(priv, MVPP22_RSS_INDEX,
1016 MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
1017 mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
1018 MVPP22_RSS_TABLE_POINTER(port->id));
1020 /* Configure the first table to evenly distribute the packets across
1021 * real Rx Queues. The table entries map a hash to a port Rx Queue.
1023 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1024 port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1026 mvpp22_rss_fill_table(port, port->id);
1028 /* Configure default flows */
1029 mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
1030 mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
1031 mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1032 mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1033 mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1034 mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);