hc
2023-12-06 08f87f769b595151be1afeff53e144f543faa614
kernel/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
....@@ -22,302 +22,302 @@
2222 } \
2323 }
2424
25
-static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
25
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
2626 /* TCP over IPv4 flows, Not fragmented, no vlan tag */
27
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
27
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
2828 MVPP22_CLS_HEK_IP4_5T,
2929 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
3030 MVPP2_PRS_RI_L4_TCP,
3131 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
3232
33
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
33
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
3434 MVPP22_CLS_HEK_IP4_5T,
3535 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
3636 MVPP2_PRS_RI_L4_TCP,
3737 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
3838
39
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
39
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_UNTAG,
4040 MVPP22_CLS_HEK_IP4_5T,
4141 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
4242 MVPP2_PRS_RI_L4_TCP,
4343 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
4444
4545 /* TCP over IPv4 flows, Not fragmented, with vlan tag */
46
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
47
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
46
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
47
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
4848 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
4949 MVPP2_PRS_IP_MASK),
5050
51
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
52
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
51
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
52
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
5353 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
5454 MVPP2_PRS_IP_MASK),
5555
56
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_TAG,
57
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
56
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_NF_TAG,
57
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
5858 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
5959 MVPP2_PRS_IP_MASK),
6060
6161 /* TCP over IPv4 flows, fragmented, no vlan tag */
62
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
62
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
6363 MVPP22_CLS_HEK_IP4_2T,
6464 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
6565 MVPP2_PRS_RI_L4_TCP,
6666 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
6767
68
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
68
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
6969 MVPP22_CLS_HEK_IP4_2T,
7070 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
7171 MVPP2_PRS_RI_L4_TCP,
7272 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
7373
74
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
74
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_UNTAG,
7575 MVPP22_CLS_HEK_IP4_2T,
7676 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
7777 MVPP2_PRS_RI_L4_TCP,
7878 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
7979
8080 /* TCP over IPv4 flows, fragmented, with vlan tag */
81
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
82
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
81
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
82
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
8383 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_TCP,
8484 MVPP2_PRS_IP_MASK),
8585
86
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
87
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
86
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
87
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
8888 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_TCP,
8989 MVPP2_PRS_IP_MASK),
9090
91
- MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_FRAG_TAG,
92
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
91
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP4, MVPP2_FL_IP4_TCP_FRAG_TAG,
92
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
9393 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_TCP,
9494 MVPP2_PRS_IP_MASK),
9595
9696 /* UDP over IPv4 flows, Not fragmented, no vlan tag */
97
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
97
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
9898 MVPP22_CLS_HEK_IP4_5T,
9999 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
100100 MVPP2_PRS_RI_L4_UDP,
101101 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
102102
103
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
103
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
104104 MVPP22_CLS_HEK_IP4_5T,
105105 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
106106 MVPP2_PRS_RI_L4_UDP,
107107 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
108108
109
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_UNTAG,
109
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_UNTAG,
110110 MVPP22_CLS_HEK_IP4_5T,
111111 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
112112 MVPP2_PRS_RI_L4_UDP,
113113 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
114114
115115 /* UDP over IPv4 flows, Not fragmented, with vlan tag */
116
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
117
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
116
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
117
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
118118 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
119119 MVPP2_PRS_IP_MASK),
120120
121
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
122
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
121
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
122
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
123123 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
124124 MVPP2_PRS_IP_MASK),
125125
126
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_NF_TAG,
127
- MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_OPT_VLAN,
126
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_NF_TAG,
127
+ MVPP22_CLS_HEK_IP4_5T | MVPP22_CLS_HEK_TAGGED,
128128 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
129129 MVPP2_PRS_IP_MASK),
130130
131131 /* UDP over IPv4 flows, fragmented, no vlan tag */
132
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
132
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
133133 MVPP22_CLS_HEK_IP4_2T,
134134 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4 |
135135 MVPP2_PRS_RI_L4_UDP,
136136 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
137137
138
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
138
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
139139 MVPP22_CLS_HEK_IP4_2T,
140140 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT |
141141 MVPP2_PRS_RI_L4_UDP,
142142 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
143143
144
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
144
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_UNTAG,
145145 MVPP22_CLS_HEK_IP4_2T,
146146 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER |
147147 MVPP2_PRS_RI_L4_UDP,
148148 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
149149
150150 /* UDP over IPv4 flows, fragmented, with vlan tag */
151
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
152
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
151
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
152
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
153153 MVPP2_PRS_RI_L3_IP4 | MVPP2_PRS_RI_L4_UDP,
154154 MVPP2_PRS_IP_MASK),
155155
156
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
157
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
156
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
157
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
158158 MVPP2_PRS_RI_L3_IP4_OPT | MVPP2_PRS_RI_L4_UDP,
159159 MVPP2_PRS_IP_MASK),
160160
161
- MVPP2_DEF_FLOW(UDP_V4_FLOW, MVPP2_FL_IP4_UDP_FRAG_TAG,
162
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
161
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP4, MVPP2_FL_IP4_UDP_FRAG_TAG,
162
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
163163 MVPP2_PRS_RI_L3_IP4_OTHER | MVPP2_PRS_RI_L4_UDP,
164164 MVPP2_PRS_IP_MASK),
165165
166166 /* TCP over IPv6 flows, not fragmented, no vlan tag */
167
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
167
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
168168 MVPP22_CLS_HEK_IP6_5T,
169169 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
170170 MVPP2_PRS_RI_L4_TCP,
171171 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
172172
173
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_UNTAG,
173
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_UNTAG,
174174 MVPP22_CLS_HEK_IP6_5T,
175175 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
176176 MVPP2_PRS_RI_L4_TCP,
177177 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
178178
179179 /* TCP over IPv6 flows, not fragmented, with vlan tag */
180
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
181
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
180
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
181
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
182182 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_TCP,
183183 MVPP2_PRS_IP_MASK),
184184
185
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_NF_TAG,
186
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
185
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_NF_TAG,
186
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
187187 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_TCP,
188188 MVPP2_PRS_IP_MASK),
189189
190190 /* TCP over IPv6 flows, fragmented, no vlan tag */
191
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
191
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
192192 MVPP22_CLS_HEK_IP6_2T,
193193 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
194194 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
195195 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
196196
197
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
197
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_UNTAG,
198198 MVPP22_CLS_HEK_IP6_2T,
199199 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
200200 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_TCP,
201201 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
202202
203203 /* TCP over IPv6 flows, fragmented, with vlan tag */
204
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
205
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
204
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
205
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
206206 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
207207 MVPP2_PRS_RI_L4_TCP,
208208 MVPP2_PRS_IP_MASK),
209209
210
- MVPP2_DEF_FLOW(TCP_V6_FLOW, MVPP2_FL_IP6_TCP_FRAG_TAG,
211
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
210
+ MVPP2_DEF_FLOW(MVPP22_FLOW_TCP6, MVPP2_FL_IP6_TCP_FRAG_TAG,
211
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
212212 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
213213 MVPP2_PRS_RI_L4_TCP,
214214 MVPP2_PRS_IP_MASK),
215215
216216 /* UDP over IPv6 flows, not fragmented, no vlan tag */
217
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
217
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
218218 MVPP22_CLS_HEK_IP6_5T,
219219 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
220220 MVPP2_PRS_RI_L4_UDP,
221221 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
222222
223
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_UNTAG,
223
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_UNTAG,
224224 MVPP22_CLS_HEK_IP6_5T,
225225 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
226226 MVPP2_PRS_RI_L4_UDP,
227227 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
228228
229229 /* UDP over IPv6 flows, not fragmented, with vlan tag */
230
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
231
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
230
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
231
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
232232 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_L4_UDP,
233233 MVPP2_PRS_IP_MASK),
234234
235
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_NF_TAG,
236
- MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_OPT_VLAN,
235
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_NF_TAG,
236
+ MVPP22_CLS_HEK_IP6_5T | MVPP22_CLS_HEK_TAGGED,
237237 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_L4_UDP,
238238 MVPP2_PRS_IP_MASK),
239239
240240 /* UDP over IPv6 flows, fragmented, no vlan tag */
241
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
241
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
242242 MVPP22_CLS_HEK_IP6_2T,
243243 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6 |
244244 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
245245 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
246246
247
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
247
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_UNTAG,
248248 MVPP22_CLS_HEK_IP6_2T,
249249 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6_EXT |
250250 MVPP2_PRS_RI_IP_FRAG_TRUE | MVPP2_PRS_RI_L4_UDP,
251251 MVPP2_PRS_IP_MASK | MVPP2_PRS_RI_VLAN_MASK),
252252
253253 /* UDP over IPv6 flows, fragmented, with vlan tag */
254
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
255
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
254
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
255
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
256256 MVPP2_PRS_RI_L3_IP6 | MVPP2_PRS_RI_IP_FRAG_TRUE |
257257 MVPP2_PRS_RI_L4_UDP,
258258 MVPP2_PRS_IP_MASK),
259259
260
- MVPP2_DEF_FLOW(UDP_V6_FLOW, MVPP2_FL_IP6_UDP_FRAG_TAG,
261
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
260
+ MVPP2_DEF_FLOW(MVPP22_FLOW_UDP6, MVPP2_FL_IP6_UDP_FRAG_TAG,
261
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
262262 MVPP2_PRS_RI_L3_IP6_EXT | MVPP2_PRS_RI_IP_FRAG_TRUE |
263263 MVPP2_PRS_RI_L4_UDP,
264264 MVPP2_PRS_IP_MASK),
265265
266266 /* IPv4 flows, no vlan tag */
267
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
267
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
268268 MVPP22_CLS_HEK_IP4_2T,
269269 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4,
270270 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
271
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
271
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
272272 MVPP22_CLS_HEK_IP4_2T,
273273 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OPT,
274274 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
275
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_UNTAG,
275
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_UNTAG,
276276 MVPP22_CLS_HEK_IP4_2T,
277277 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP4_OTHER,
278278 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
279279
280280 /* IPv4 flows, with vlan tag */
281
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
282
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
281
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
282
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
283283 MVPP2_PRS_RI_L3_IP4,
284284 MVPP2_PRS_RI_L3_PROTO_MASK),
285
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
286
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
285
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
286
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
287287 MVPP2_PRS_RI_L3_IP4_OPT,
288288 MVPP2_PRS_RI_L3_PROTO_MASK),
289
- MVPP2_DEF_FLOW(IPV4_FLOW, MVPP2_FL_IP4_TAG,
290
- MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_OPT_VLAN,
289
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP4, MVPP2_FL_IP4_TAG,
290
+ MVPP22_CLS_HEK_IP4_2T | MVPP22_CLS_HEK_TAGGED,
291291 MVPP2_PRS_RI_L3_IP4_OTHER,
292292 MVPP2_PRS_RI_L3_PROTO_MASK),
293293
294294 /* IPv6 flows, no vlan tag */
295
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
295
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
296296 MVPP22_CLS_HEK_IP6_2T,
297297 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
298298 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
299
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_UNTAG,
299
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_UNTAG,
300300 MVPP22_CLS_HEK_IP6_2T,
301301 MVPP2_PRS_RI_VLAN_NONE | MVPP2_PRS_RI_L3_IP6,
302302 MVPP2_PRS_RI_VLAN_MASK | MVPP2_PRS_RI_L3_PROTO_MASK),
303303
304304 /* IPv6 flows, with vlan tag */
305
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
306
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
305
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
306
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
307307 MVPP2_PRS_RI_L3_IP6,
308308 MVPP2_PRS_RI_L3_PROTO_MASK),
309
- MVPP2_DEF_FLOW(IPV6_FLOW, MVPP2_FL_IP6_TAG,
310
- MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_OPT_VLAN,
309
+ MVPP2_DEF_FLOW(MVPP22_FLOW_IP6, MVPP2_FL_IP6_TAG,
310
+ MVPP22_CLS_HEK_IP6_2T | MVPP22_CLS_HEK_TAGGED,
311311 MVPP2_PRS_RI_L3_IP6,
312312 MVPP2_PRS_RI_L3_PROTO_MASK),
313313
314314 /* Non IP flow, no vlan tag */
315
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_UNTAG,
315
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_UNTAG,
316316 0,
317317 MVPP2_PRS_RI_VLAN_NONE,
318318 MVPP2_PRS_RI_VLAN_MASK),
319319 /* Non IP flow, with vlan tag */
320
- MVPP2_DEF_FLOW(ETHER_FLOW, MVPP2_FL_NON_IP_TAG,
320
+ MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
321321 MVPP22_CLS_HEK_OPT_VLAN,
322322 0, 0),
323323 };
....@@ -344,9 +344,9 @@
344344 struct mvpp2_cls_flow_entry *fe)
345345 {
346346 mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
347
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
348
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
349
- mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
347
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
348
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
349
+ mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
350350 }
351351
352352 u32 mvpp2_cls_lookup_hits(struct mvpp2 *priv, int index)
....@@ -429,12 +429,6 @@
429429 fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
430430 }
431431
432
-static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
433
-{
434
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
435
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
436
-}
437
-
438432 static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
439433 bool is_last)
440434 {
....@@ -454,9 +448,22 @@
454448 fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
455449 }
456450
451
+static void mvpp2_cls_flow_port_remove(struct mvpp2_cls_flow_entry *fe,
452
+ u32 port)
453
+{
454
+ fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
455
+}
456
+
457
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
458
+ u8 lu_type)
459
+{
460
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
461
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
462
+}
463
+
457464 /* Initialize the parser entry for the given flow */
458465 static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
459
- struct mvpp2_cls_flow *flow)
466
+ const struct mvpp2_cls_flow *flow)
460467 {
461468 mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
462469 flow->prs_ri.ri_mask);
....@@ -464,7 +471,7 @@
464471
465472 /* Initialize the Lookup Id table entry for the given flow */
466473 static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
467
- struct mvpp2_cls_flow *flow)
474
+ const struct mvpp2_cls_flow *flow)
468475 {
469476 struct mvpp2_cls_lookup_entry le;
470477
....@@ -477,7 +484,7 @@
477484 /* We point on the first lookup in the sequence for the flow, that is
478485 * the C2 lookup.
479486 */
480
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
487
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
481488
482489 /* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
483490 le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
....@@ -485,21 +492,113 @@
485492 mvpp2_cls_lookup_write(priv, &le);
486493 }
487494
495
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
496
+ struct mvpp2_cls_c2_entry *c2)
497
+{
498
+ u32 val;
499
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
500
+
501
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
502
+ if (c2->valid)
503
+ val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
504
+ else
505
+ val |= MVPP22_CLS_C2_TCAM_INV_BIT;
506
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
507
+
508
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
509
+
510
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
511
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
512
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
513
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
514
+
515
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
516
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
517
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
518
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
519
+ /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
520
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
521
+}
522
+
523
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
524
+ struct mvpp2_cls_c2_entry *c2)
525
+{
526
+ u32 val;
527
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
528
+
529
+ c2->index = index;
530
+
531
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
532
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
533
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
534
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
535
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
536
+
537
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
538
+
539
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
540
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
541
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
542
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
543
+
544
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
545
+ c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
546
+}
547
+
548
+static int mvpp2_cls_ethtool_flow_to_type(int flow_type)
549
+{
550
+ switch (flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS)) {
551
+ case ETHER_FLOW:
552
+ return MVPP22_FLOW_ETHERNET;
553
+ case TCP_V4_FLOW:
554
+ return MVPP22_FLOW_TCP4;
555
+ case TCP_V6_FLOW:
556
+ return MVPP22_FLOW_TCP6;
557
+ case UDP_V4_FLOW:
558
+ return MVPP22_FLOW_UDP4;
559
+ case UDP_V6_FLOW:
560
+ return MVPP22_FLOW_UDP6;
561
+ case IPV4_FLOW:
562
+ return MVPP22_FLOW_IP4;
563
+ case IPV6_FLOW:
564
+ return MVPP22_FLOW_IP6;
565
+ default:
566
+ return -EOPNOTSUPP;
567
+ }
568
+}
569
+
570
+static int mvpp2_cls_c2_port_flow_index(struct mvpp2_port *port, int loc)
571
+{
572
+ return MVPP22_CLS_C2_RFS_LOC(port->id, loc);
573
+}
574
+
488575 /* Initialize the flow table entries for the given flow */
489
-static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
576
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
577
+ const struct mvpp2_cls_flow *flow)
490578 {
491579 struct mvpp2_cls_flow_entry fe;
492
- int i;
580
+ int i, pri = 0;
493581
494
- /* C2 lookup */
495
- memset(&fe, 0, sizeof(fe));
496
- fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
582
+ /* Assign default values to all entries in the flow */
583
+ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
584
+ i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
585
+ memset(&fe, 0, sizeof(fe));
586
+ fe.index = i;
587
+ mvpp2_cls_flow_pri_set(&fe, pri++);
588
+
589
+ if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
590
+ mvpp2_cls_flow_last_set(&fe, 1);
591
+
592
+ mvpp2_cls_flow_write(priv, &fe);
593
+ }
594
+
595
+ /* RSS config C2 lookup */
596
+ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
597
+ &fe);
497598
498599 mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
499600 mvpp2_cls_flow_port_id_sel(&fe, true);
500
- mvpp2_cls_flow_last_set(&fe, 0);
501
- mvpp2_cls_flow_pri_set(&fe, 0);
502
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
601
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP22_CLS_LU_TYPE_ALL);
503602
504603 /* Add all ports */
505604 for (i = 0; i < MVPP2_MAX_PORTS; i++)
....@@ -509,22 +608,19 @@
509608
510609 /* C3Hx lookups */
511610 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
512
- memset(&fe, 0, sizeof(fe));
513
- fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
611
+ mvpp2_cls_flow_read(priv,
612
+ MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
613
+ &fe);
514614
615
+ /* Set a default engine. Will be overwritten when setting the
616
+ * real HEK parameters
617
+ */
618
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
515619 mvpp2_cls_flow_port_id_sel(&fe, true);
516
- mvpp2_cls_flow_pri_set(&fe, i + 1);
517
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
518620 mvpp2_cls_flow_port_add(&fe, BIT(i));
519621
520622 mvpp2_cls_flow_write(priv, &fe);
521623 }
522
-
523
- /* Update the last entry */
524
- mvpp2_cls_flow_last_set(&fe, 1);
525
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
526
-
527
- mvpp2_cls_flow_write(priv, &fe);
528624 }
529625
530626 /* Adds a field to the Header Extracted Key generation parameters*/
....@@ -555,8 +651,14 @@
555651
556652 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
557653 switch (BIT(i)) {
654
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
655
+ field_id = MVPP22_CLS_FIELD_MAC_DA;
656
+ break;
558657 case MVPP22_CLS_HEK_OPT_VLAN:
559658 field_id = MVPP22_CLS_FIELD_VLAN;
659
+ break;
660
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
661
+ field_id = MVPP22_CLS_FIELD_VLAN_PRI;
560662 break;
561663 case MVPP22_CLS_HEK_OPT_IP4SA:
562664 field_id = MVPP22_CLS_FIELD_IP4SA;
....@@ -586,9 +688,33 @@
586688 return 0;
587689 }
588690
589
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
691
+/* Returns the size, in bits, of the corresponding HEK field */
692
+static int mvpp2_cls_hek_field_size(u32 field)
590693 {
591
- if (flow >= MVPP2_N_FLOWS)
694
+ switch (field) {
695
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
696
+ return 48;
697
+ case MVPP22_CLS_HEK_OPT_VLAN:
698
+ return 12;
699
+ case MVPP22_CLS_HEK_OPT_VLAN_PRI:
700
+ return 3;
701
+ case MVPP22_CLS_HEK_OPT_IP4SA:
702
+ case MVPP22_CLS_HEK_OPT_IP4DA:
703
+ return 32;
704
+ case MVPP22_CLS_HEK_OPT_IP6SA:
705
+ case MVPP22_CLS_HEK_OPT_IP6DA:
706
+ return 128;
707
+ case MVPP22_CLS_HEK_OPT_L4SIP:
708
+ case MVPP22_CLS_HEK_OPT_L4DIP:
709
+ return 16;
710
+ default:
711
+ return -1;
712
+ }
713
+}
714
+
715
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
716
+{
717
+ if (flow >= MVPP2_N_PRS_FLOWS)
592718 return NULL;
593719
594720 return &cls_flows[flow];
....@@ -608,21 +734,17 @@
608734 static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
609735 u16 requested_opts)
610736 {
737
+ const struct mvpp2_cls_flow *flow;
611738 struct mvpp2_cls_flow_entry fe;
612
- struct mvpp2_cls_flow *flow;
613739 int i, engine, flow_index;
614740 u16 hash_opts;
615741
616
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
742
+ for_each_cls_flow_id_with_type(i, flow_type) {
617743 flow = mvpp2_cls_flow_get(i);
618744 if (!flow)
619745 return -EINVAL;
620746
621
- if (flow->flow_type != flow_type)
622
- continue;
623
-
624
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
625
- flow->flow_id);
747
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
626748
627749 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
628750
....@@ -664,6 +786,9 @@
664786 case MVPP22_CLS_FIELD_VLAN:
665787 hash_opts |= MVPP22_CLS_HEK_OPT_VLAN;
666788 break;
789
+ case MVPP22_CLS_FIELD_VLAN_PRI:
790
+ hash_opts |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
791
+ break;
667792 case MVPP22_CLS_FIELD_L3_PROTO:
668793 hash_opts |= MVPP22_CLS_HEK_OPT_L3_PROTO;
669794 break;
....@@ -697,21 +822,17 @@
697822 */
698823 static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
699824 {
825
+ const struct mvpp2_cls_flow *flow;
700826 struct mvpp2_cls_flow_entry fe;
701
- struct mvpp2_cls_flow *flow;
702827 int i, flow_index;
703828 u16 hash_opts = 0;
704829
705
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
830
+ for_each_cls_flow_id_with_type(i, flow_type) {
706831 flow = mvpp2_cls_flow_get(i);
707832 if (!flow)
708833 return 0;
709834
710
- if (flow->flow_type != flow_type)
711
- continue;
712
-
713
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
714
- flow->flow_id);
835
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
715836
716837 mvpp2_cls_flow_read(port->priv, flow_index, &fe);
717838
....@@ -723,10 +844,10 @@
723844
724845 static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
725846 {
726
- struct mvpp2_cls_flow *flow;
847
+ const struct mvpp2_cls_flow *flow;
727848 int i;
728849
729
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
850
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
730851 flow = mvpp2_cls_flow_get(i);
731852 if (!flow)
732853 break;
....@@ -735,47 +856,6 @@
735856 mvpp2_cls_flow_lkp_init(priv, flow);
736857 mvpp2_cls_flow_init(priv, flow);
737858 }
738
-}
739
-
740
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
741
- struct mvpp2_cls_c2_entry *c2)
742
-{
743
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
744
-
745
- /* Write TCAM */
746
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
747
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
748
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
749
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
750
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
751
-
752
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
753
-
754
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
755
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
756
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
757
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
758
-}
759
-
760
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
761
- struct mvpp2_cls_c2_entry *c2)
762
-{
763
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
764
-
765
- c2->index = index;
766
-
767
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
768
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
769
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
770
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
771
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
772
-
773
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
774
-
775
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
776
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
777
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
778
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
779859 }
780860
781861 static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
....@@ -790,6 +870,10 @@
790870 pmap = BIT(port->id);
791871 c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
792872 c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
873
+
874
+ /* Match on Lookup Type */
875
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
876
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP22_CLS_LU_TYPE_ALL);
793877
794878 /* Update RSS status after matching this entry */
795879 c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
....@@ -809,6 +893,8 @@
809893 c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
810894 MVPP22_CLS_C2_ATTR0_QLOW(ql);
811895
896
+ c2.valid = true;
897
+
812898 mvpp2_cls_c2_write(port->priv, &c2);
813899 }
814900
....@@ -817,6 +903,7 @@
817903 {
818904 struct mvpp2_cls_lookup_entry le;
819905 struct mvpp2_cls_flow_entry fe;
906
+ struct mvpp2_cls_c2_entry c2;
820907 int index;
821908
822909 /* Enable classifier */
....@@ -839,6 +926,20 @@
839926 le.way = 1;
840927 mvpp2_cls_lookup_write(priv, &le);
841928 }
929
+
930
+ /* Clear C2 TCAM engine table */
931
+ memset(&c2, 0, sizeof(c2));
932
+ c2.valid = false;
933
+ for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
934
+ c2.index = index;
935
+ mvpp2_cls_c2_write(priv, &c2);
936
+ }
937
+
938
+ /* Disable the FIFO stages in C2 engine, which are only used in BIST
939
+ * mode
940
+ */
941
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_CTRL,
942
+ MVPP22_CLS_C2_TCAM_BYPASS_FIFO);
842943
843944 mvpp2_cls_port_init_flows(priv);
844945 }
....@@ -880,11 +981,21 @@
880981 return mvpp2_read(priv, MVPP22_CLS_C2_HIT_CTR);
881982 }
882983
883
-static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port)
984
+static void mvpp2_rss_port_c2_enable(struct mvpp2_port *port, u32 ctx)
884985 {
885986 struct mvpp2_cls_c2_entry c2;
987
+ u8 qh, ql;
886988
887989 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
990
+
991
+ /* The RxQ number is used to select the RSS table. It that case, we set
992
+ * it to be the ctx number.
993
+ */
994
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
995
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
996
+
997
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
998
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
888999
8891000 c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
8901001
....@@ -894,22 +1005,57 @@
8941005 static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
8951006 {
8961007 struct mvpp2_cls_c2_entry c2;
1008
+ u8 qh, ql;
8971009
8981010 mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
1011
+
1012
+ /* Reset the default destination RxQ to the port's first rx queue. */
1013
+ qh = (port->first_rxq >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1014
+ ql = port->first_rxq & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1015
+
1016
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1017
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
8991018
9001019 c2.attr[2] &= ~MVPP22_CLS_C2_ATTR2_RSS_EN;
9011020
9021021 mvpp2_cls_c2_write(port->priv, &c2);
9031022 }
9041023
905
-void mvpp22_rss_enable(struct mvpp2_port *port)
1024
+static inline int mvpp22_rss_ctx(struct mvpp2_port *port, int port_rss_ctx)
9061025 {
907
- mvpp2_rss_port_c2_enable(port);
1026
+ return port->rss_ctx[port_rss_ctx];
9081027 }
9091028
910
-void mvpp22_rss_disable(struct mvpp2_port *port)
1029
+int mvpp22_port_rss_enable(struct mvpp2_port *port)
9111030 {
1031
+ if (mvpp22_rss_ctx(port, 0) < 0)
1032
+ return -EINVAL;
1033
+
1034
+ mvpp2_rss_port_c2_enable(port, mvpp22_rss_ctx(port, 0));
1035
+
1036
+ return 0;
1037
+}
1038
+
1039
+int mvpp22_port_rss_disable(struct mvpp2_port *port)
1040
+{
1041
+ if (mvpp22_rss_ctx(port, 0) < 0)
1042
+ return -EINVAL;
1043
+
9121044 mvpp2_rss_port_c2_disable(port);
1045
+
1046
+ return 0;
1047
+}
1048
+
1049
+static void mvpp22_port_c2_lookup_disable(struct mvpp2_port *port, int entry)
1050
+{
1051
+ struct mvpp2_cls_c2_entry c2;
1052
+
1053
+ mvpp2_cls_c2_read(port->priv, entry, &c2);
1054
+
1055
+ /* Clear the port map so that the entry doesn't match anymore */
1056
+ c2.tcam[4] &= ~(MVPP22_CLS_C2_PORT_ID(BIT(port->id)));
1057
+
1058
+ mvpp2_cls_c2_write(port->priv, &c2);
9131059 }
9141060
9151061 /* Set CPU queue number for oversize packets */
....@@ -924,8 +1070,381 @@
9241070 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
9251071
9261072 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
927
- val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
1073
+ val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
9281074 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
1075
+}
1076
+
1077
+static int mvpp2_port_c2_tcam_rule_add(struct mvpp2_port *port,
1078
+ struct mvpp2_rfs_rule *rule)
1079
+{
1080
+ struct flow_action_entry *act;
1081
+ struct mvpp2_cls_c2_entry c2;
1082
+ u8 qh, ql, pmap;
1083
+ int index, ctx;
1084
+
1085
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1086
+ return -EOPNOTSUPP;
1087
+
1088
+ memset(&c2, 0, sizeof(c2));
1089
+
1090
+ index = mvpp2_cls_c2_port_flow_index(port, rule->loc);
1091
+ if (index < 0)
1092
+ return -EINVAL;
1093
+ c2.index = index;
1094
+
1095
+ act = &rule->flow->action.entries[0];
1096
+
1097
+ rule->c2_index = c2.index;
1098
+
1099
+ c2.tcam[3] = (rule->c2_tcam & 0xffff) |
1100
+ ((rule->c2_tcam_mask & 0xffff) << 16);
1101
+ c2.tcam[2] = ((rule->c2_tcam >> 16) & 0xffff) |
1102
+ (((rule->c2_tcam_mask >> 16) & 0xffff) << 16);
1103
+ c2.tcam[1] = ((rule->c2_tcam >> 32) & 0xffff) |
1104
+ (((rule->c2_tcam_mask >> 32) & 0xffff) << 16);
1105
+ c2.tcam[0] = ((rule->c2_tcam >> 48) & 0xffff) |
1106
+ (((rule->c2_tcam_mask >> 48) & 0xffff) << 16);
1107
+
1108
+ pmap = BIT(port->id);
1109
+ c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
1110
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
1111
+
1112
+ /* Match on Lookup Type */
1113
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
1114
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(rule->loc);
1115
+
1116
+ if (act->id == FLOW_ACTION_DROP) {
1117
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_RED_LOCK);
1118
+ } else {
1119
+ /* We want to keep the default color derived from the Header
1120
+ * Parser drop entries, for VLAN and MAC filtering. This will
1121
+ * assign a default color of Green or Red, and we want matches
1122
+ * with a non-drop action to keep that color.
1123
+ */
1124
+ c2.act = MVPP22_CLS_C2_ACT_COLOR(MVPP22_C2_COL_NO_UPD_LOCK);
1125
+
1126
+ /* Update RSS status after matching this entry */
1127
+ if (act->queue.ctx)
1128
+ c2.attr[2] |= MVPP22_CLS_C2_ATTR2_RSS_EN;
1129
+
1130
+ /* Always lock the RSS_EN decision. We might have high prio
1131
+ * rules steering to an RXQ, and a lower one steering to RSS,
1132
+ * we don't want the low prio RSS rule overwriting this flag.
1133
+ */
1134
+ c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
1135
+
1136
+ /* Mark packet as "forwarded to software", needed for RSS */
1137
+ c2.act |= MVPP22_CLS_C2_ACT_FWD(MVPP22_C2_FWD_SW_LOCK);
1138
+
1139
+ c2.act |= MVPP22_CLS_C2_ACT_QHIGH(MVPP22_C2_UPD_LOCK) |
1140
+ MVPP22_CLS_C2_ACT_QLOW(MVPP22_C2_UPD_LOCK);
1141
+
1142
+ if (act->queue.ctx) {
1143
+ /* Get the global ctx number */
1144
+ ctx = mvpp22_rss_ctx(port, act->queue.ctx);
1145
+ if (ctx < 0)
1146
+ return -EINVAL;
1147
+
1148
+ qh = (ctx >> 3) & MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1149
+ ql = ctx & MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1150
+ } else {
1151
+ qh = ((act->queue.index + port->first_rxq) >> 3) &
1152
+ MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
1153
+ ql = (act->queue.index + port->first_rxq) &
1154
+ MVPP22_CLS_C2_ATTR0_QLOW_MASK;
1155
+ }
1156
+
1157
+ c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
1158
+ MVPP22_CLS_C2_ATTR0_QLOW(ql);
1159
+ }
1160
+
1161
+ c2.valid = true;
1162
+
1163
+ mvpp2_cls_c2_write(port->priv, &c2);
1164
+
1165
+ return 0;
1166
+}
1167
+
1168
+static int mvpp2_port_c2_rfs_rule_insert(struct mvpp2_port *port,
1169
+ struct mvpp2_rfs_rule *rule)
1170
+{
1171
+ return mvpp2_port_c2_tcam_rule_add(port, rule);
1172
+}
1173
+
1174
+static int mvpp2_port_cls_rfs_rule_remove(struct mvpp2_port *port,
1175
+ struct mvpp2_rfs_rule *rule)
1176
+{
1177
+ const struct mvpp2_cls_flow *flow;
1178
+ struct mvpp2_cls_flow_entry fe;
1179
+ int index, i;
1180
+
1181
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1182
+ flow = mvpp2_cls_flow_get(i);
1183
+ if (!flow)
1184
+ return 0;
1185
+
1186
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1187
+
1188
+ mvpp2_cls_flow_read(port->priv, index, &fe);
1189
+ mvpp2_cls_flow_port_remove(&fe, BIT(port->id));
1190
+ mvpp2_cls_flow_write(port->priv, &fe);
1191
+ }
1192
+
1193
+ if (rule->c2_index >= 0)
1194
+ mvpp22_port_c2_lookup_disable(port, rule->c2_index);
1195
+
1196
+ return 0;
1197
+}
1198
+
1199
+static int mvpp2_port_flt_rfs_rule_insert(struct mvpp2_port *port,
1200
+ struct mvpp2_rfs_rule *rule)
1201
+{
1202
+ const struct mvpp2_cls_flow *flow;
1203
+ struct mvpp2 *priv = port->priv;
1204
+ struct mvpp2_cls_flow_entry fe;
1205
+ int index, ret, i;
1206
+
1207
+ if (rule->engine != MVPP22_CLS_ENGINE_C2)
1208
+ return -EOPNOTSUPP;
1209
+
1210
+ ret = mvpp2_port_c2_rfs_rule_insert(port, rule);
1211
+ if (ret)
1212
+ return ret;
1213
+
1214
+ for_each_cls_flow_id_containing_type(i, rule->flow_type) {
1215
+ flow = mvpp2_cls_flow_get(i);
1216
+ if (!flow)
1217
+ return 0;
1218
+
1219
+ if ((rule->hek_fields & flow->supported_hash_opts) != rule->hek_fields)
1220
+ continue;
1221
+
1222
+ index = MVPP2_CLS_FLT_C2_RFS(port->id, flow->flow_id, rule->loc);
1223
+
1224
+ mvpp2_cls_flow_read(priv, index, &fe);
1225
+ mvpp2_cls_flow_eng_set(&fe, rule->engine);
1226
+ mvpp2_cls_flow_port_id_sel(&fe, true);
1227
+ mvpp2_flow_set_hek_fields(&fe, rule->hek_fields);
1228
+ mvpp2_cls_flow_lu_type_set(&fe, rule->loc);
1229
+ mvpp2_cls_flow_port_add(&fe, 0xf);
1230
+
1231
+ mvpp2_cls_flow_write(priv, &fe);
1232
+ }
1233
+
1234
+ return 0;
1235
+}
1236
+
1237
+static int mvpp2_cls_c2_build_match(struct mvpp2_rfs_rule *rule)
1238
+{
1239
+ struct flow_rule *flow = rule->flow;
1240
+ int offs = 0;
1241
+
1242
+ /* The order of insertion in C2 tcam must match the order in which
1243
+ * the fields are found in the header
1244
+ */
1245
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
1246
+ struct flow_match_vlan match;
1247
+
1248
+ flow_rule_match_vlan(flow, &match);
1249
+ if (match.mask->vlan_id) {
1250
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN;
1251
+
1252
+ rule->c2_tcam |= ((u64)match.key->vlan_id) << offs;
1253
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_id) << offs;
1254
+
1255
+ /* Don't update the offset yet */
1256
+ }
1257
+
1258
+ if (match.mask->vlan_priority) {
1259
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_VLAN_PRI;
1260
+
1261
+ /* VLAN pri is always at offset 13 relative to the
1262
+ * current offset
1263
+ */
1264
+ rule->c2_tcam |= ((u64)match.key->vlan_priority) <<
1265
+ (offs + 13);
1266
+ rule->c2_tcam_mask |= ((u64)match.mask->vlan_priority) <<
1267
+ (offs + 13);
1268
+ }
1269
+
1270
+ if (match.mask->vlan_dei)
1271
+ return -EOPNOTSUPP;
1272
+
1273
+ /* vlan id and prio always seem to take a full 16-bit slot in
1274
+ * the Header Extracted Key.
1275
+ */
1276
+ offs += 16;
1277
+ }
1278
+
1279
+ if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
1280
+ struct flow_match_ports match;
1281
+
1282
+ flow_rule_match_ports(flow, &match);
1283
+ if (match.mask->src) {
1284
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4SIP;
1285
+
1286
+ rule->c2_tcam |= ((u64)ntohs(match.key->src)) << offs;
1287
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->src)) << offs;
1288
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4SIP);
1289
+ }
1290
+
1291
+ if (match.mask->dst) {
1292
+ rule->hek_fields |= MVPP22_CLS_HEK_OPT_L4DIP;
1293
+
1294
+ rule->c2_tcam |= ((u64)ntohs(match.key->dst)) << offs;
1295
+ rule->c2_tcam_mask |= ((u64)ntohs(match.mask->dst)) << offs;
1296
+ offs += mvpp2_cls_hek_field_size(MVPP22_CLS_HEK_OPT_L4DIP);
1297
+ }
1298
+ }
1299
+
1300
+ if (hweight16(rule->hek_fields) > MVPP2_FLOW_N_FIELDS)
1301
+ return -EOPNOTSUPP;
1302
+
1303
+ return 0;
1304
+}
1305
+
1306
+static int mvpp2_cls_rfs_parse_rule(struct mvpp2_rfs_rule *rule)
1307
+{
1308
+ struct flow_rule *flow = rule->flow;
1309
+ struct flow_action_entry *act;
1310
+
1311
+ if (!flow_action_basic_hw_stats_check(&rule->flow->action, NULL))
1312
+ return -EOPNOTSUPP;
1313
+
1314
+ act = &flow->action.entries[0];
1315
+ if (act->id != FLOW_ACTION_QUEUE && act->id != FLOW_ACTION_DROP)
1316
+ return -EOPNOTSUPP;
1317
+
1318
+ /* When both an RSS context and an queue index are set, the index
1319
+ * is considered as an offset to be added to the indirection table
1320
+ * entries. We don't support this, so reject this rule.
1321
+ */
1322
+ if (act->queue.ctx && act->queue.index)
1323
+ return -EOPNOTSUPP;
1324
+
1325
+ /* For now, only use the C2 engine which has a HEK size limited to 64
1326
+ * bits for TCAM matching.
1327
+ */
1328
+ rule->engine = MVPP22_CLS_ENGINE_C2;
1329
+
1330
+ if (mvpp2_cls_c2_build_match(rule))
1331
+ return -EINVAL;
1332
+
1333
+ return 0;
1334
+}
1335
+
1336
+int mvpp2_ethtool_cls_rule_get(struct mvpp2_port *port,
1337
+ struct ethtool_rxnfc *rxnfc)
1338
+{
1339
+ struct mvpp2_ethtool_fs *efs;
1340
+
1341
+ if (rxnfc->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1342
+ return -EINVAL;
1343
+
1344
+ efs = port->rfs_rules[rxnfc->fs.location];
1345
+ if (!efs)
1346
+ return -ENOENT;
1347
+
1348
+ memcpy(rxnfc, &efs->rxnfc, sizeof(efs->rxnfc));
1349
+
1350
+ return 0;
1351
+}
1352
+
1353
+int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port,
1354
+ struct ethtool_rxnfc *info)
1355
+{
1356
+ struct ethtool_rx_flow_spec_input input = {};
1357
+ struct ethtool_rx_flow_rule *ethtool_rule;
1358
+ struct mvpp2_ethtool_fs *efs, *old_efs;
1359
+ int ret = 0;
1360
+
1361
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1362
+ return -EINVAL;
1363
+
1364
+ efs = kzalloc(sizeof(*efs), GFP_KERNEL);
1365
+ if (!efs)
1366
+ return -ENOMEM;
1367
+
1368
+ input.fs = &info->fs;
1369
+
1370
+ /* We need to manually set the rss_ctx, since this info isn't present
1371
+ * in info->fs
1372
+ */
1373
+ if (info->fs.flow_type & FLOW_RSS)
1374
+ input.rss_ctx = info->rss_context;
1375
+
1376
+ ethtool_rule = ethtool_rx_flow_rule_create(&input);
1377
+ if (IS_ERR(ethtool_rule)) {
1378
+ ret = PTR_ERR(ethtool_rule);
1379
+ goto clean_rule;
1380
+ }
1381
+
1382
+ efs->rule.flow = ethtool_rule->rule;
1383
+ efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type);
1384
+ if (efs->rule.flow_type < 0) {
1385
+ ret = efs->rule.flow_type;
1386
+ goto clean_rule;
1387
+ }
1388
+
1389
+ ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
1390
+ if (ret)
1391
+ goto clean_eth_rule;
1392
+
1393
+ efs->rule.loc = info->fs.location;
1394
+
1395
+ /* Replace an already existing rule */
1396
+ if (port->rfs_rules[efs->rule.loc]) {
1397
+ old_efs = port->rfs_rules[efs->rule.loc];
1398
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &old_efs->rule);
1399
+ if (ret)
1400
+ goto clean_eth_rule;
1401
+ kfree(old_efs);
1402
+ port->n_rfs_rules--;
1403
+ }
1404
+
1405
+ ret = mvpp2_port_flt_rfs_rule_insert(port, &efs->rule);
1406
+ if (ret)
1407
+ goto clean_eth_rule;
1408
+
1409
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
1410
+ efs->rule.flow = NULL;
1411
+
1412
+ memcpy(&efs->rxnfc, info, sizeof(*info));
1413
+ port->rfs_rules[efs->rule.loc] = efs;
1414
+ port->n_rfs_rules++;
1415
+
1416
+ return ret;
1417
+
1418
+clean_eth_rule:
1419
+ ethtool_rx_flow_rule_destroy(ethtool_rule);
1420
+clean_rule:
1421
+ kfree(efs);
1422
+ return ret;
1423
+}
1424
+
1425
+int mvpp2_ethtool_cls_rule_del(struct mvpp2_port *port,
1426
+ struct ethtool_rxnfc *info)
1427
+{
1428
+ struct mvpp2_ethtool_fs *efs;
1429
+ int ret;
1430
+
1431
+ if (info->fs.location >= MVPP2_N_RFS_ENTRIES_PER_FLOW)
1432
+ return -EINVAL;
1433
+
1434
+ efs = port->rfs_rules[info->fs.location];
1435
+ if (!efs)
1436
+ return -EINVAL;
1437
+
1438
+ /* Remove the rule from the engines. */
1439
+ ret = mvpp2_port_cls_rfs_rule_remove(port, &efs->rule);
1440
+ if (ret)
1441
+ return ret;
1442
+
1443
+ port->n_rfs_rules--;
1444
+ port->rfs_rules[info->fs.location] = NULL;
1445
+ kfree(efs);
1446
+
1447
+ return 0;
9291448 }
9301449
9311450 static inline u32 mvpp22_rxfh_indir(struct mvpp2_port *port, u32 rxq)
....@@ -947,37 +1466,181 @@
9471466 return port->first_rxq + ((rxq * nrxqs + rxq / cpus) % port->nrxqs);
9481467 }
9491468
950
-void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table)
1469
+static void mvpp22_rss_fill_table(struct mvpp2_port *port,
1470
+ struct mvpp2_rss_table *table,
1471
+ u32 rss_ctx)
9511472 {
9521473 struct mvpp2 *priv = port->priv;
9531474 int i;
9541475
9551476 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++) {
956
- u32 sel = MVPP22_RSS_INDEX_TABLE(table) |
1477
+ u32 sel = MVPP22_RSS_INDEX_TABLE(rss_ctx) |
9571478 MVPP22_RSS_INDEX_TABLE_ENTRY(i);
9581479 mvpp2_write(priv, MVPP22_RSS_INDEX, sel);
9591480
9601481 mvpp2_write(priv, MVPP22_RSS_TABLE_ENTRY,
961
- mvpp22_rxfh_indir(port, port->indir[i]));
1482
+ mvpp22_rxfh_indir(port, table->indir[i]));
9621483 }
1484
+}
1485
+
1486
+static int mvpp22_rss_context_create(struct mvpp2_port *port, u32 *rss_ctx)
1487
+{
1488
+ struct mvpp2 *priv = port->priv;
1489
+ u32 ctx;
1490
+
1491
+ /* Find the first free RSS table */
1492
+ for (ctx = 0; ctx < MVPP22_N_RSS_TABLES; ctx++) {
1493
+ if (!priv->rss_tables[ctx])
1494
+ break;
1495
+ }
1496
+
1497
+ if (ctx == MVPP22_N_RSS_TABLES)
1498
+ return -EINVAL;
1499
+
1500
+ priv->rss_tables[ctx] = kzalloc(sizeof(*priv->rss_tables[ctx]),
1501
+ GFP_KERNEL);
1502
+ if (!priv->rss_tables[ctx])
1503
+ return -ENOMEM;
1504
+
1505
+ *rss_ctx = ctx;
1506
+
1507
+ /* Set the table width: replace the whole classifier Rx queue number
1508
+ * with the ones configured in RSS table entries.
1509
+ */
1510
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(ctx));
1511
+ mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1512
+
1513
+ mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_QUEUE(ctx));
1514
+ mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE, MVPP22_RSS_TABLE_POINTER(ctx));
1515
+
1516
+ return 0;
1517
+}
1518
+
1519
+int mvpp22_port_rss_ctx_create(struct mvpp2_port *port, u32 *port_ctx)
1520
+{
1521
+ u32 rss_ctx;
1522
+ int ret, i;
1523
+
1524
+ ret = mvpp22_rss_context_create(port, &rss_ctx);
1525
+ if (ret)
1526
+ return ret;
1527
+
1528
+ /* Find the first available context number in the port, starting from 1.
1529
+ * Context 0 on each port is reserved for the default context.
1530
+ */
1531
+ for (i = 1; i < MVPP22_N_RSS_TABLES; i++) {
1532
+ if (port->rss_ctx[i] < 0)
1533
+ break;
1534
+ }
1535
+
1536
+ if (i == MVPP22_N_RSS_TABLES)
1537
+ return -EINVAL;
1538
+
1539
+ port->rss_ctx[i] = rss_ctx;
1540
+ *port_ctx = i;
1541
+
1542
+ return 0;
1543
+}
1544
+
1545
+static struct mvpp2_rss_table *mvpp22_rss_table_get(struct mvpp2 *priv,
1546
+ int rss_ctx)
1547
+{
1548
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1549
+ return NULL;
1550
+
1551
+ return priv->rss_tables[rss_ctx];
1552
+}
1553
+
1554
+int mvpp22_port_rss_ctx_delete(struct mvpp2_port *port, u32 port_ctx)
1555
+{
1556
+ struct mvpp2 *priv = port->priv;
1557
+ struct ethtool_rxnfc *rxnfc;
1558
+ int i, rss_ctx, ret;
1559
+
1560
+ rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1561
+
1562
+ if (rss_ctx < 0 || rss_ctx >= MVPP22_N_RSS_TABLES)
1563
+ return -EINVAL;
1564
+
1565
+ /* Invalidate any active classification rule that use this context */
1566
+ for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
1567
+ if (!port->rfs_rules[i])
1568
+ continue;
1569
+
1570
+ rxnfc = &port->rfs_rules[i]->rxnfc;
1571
+ if (!(rxnfc->fs.flow_type & FLOW_RSS) ||
1572
+ rxnfc->rss_context != port_ctx)
1573
+ continue;
1574
+
1575
+ ret = mvpp2_ethtool_cls_rule_del(port, rxnfc);
1576
+ if (ret) {
1577
+ netdev_warn(port->dev,
1578
+ "couldn't remove classification rule %d associated to this context",
1579
+ rxnfc->fs.location);
1580
+ }
1581
+ }
1582
+
1583
+ kfree(priv->rss_tables[rss_ctx]);
1584
+
1585
+ priv->rss_tables[rss_ctx] = NULL;
1586
+ port->rss_ctx[port_ctx] = -1;
1587
+
1588
+ return 0;
1589
+}
1590
+
1591
+int mvpp22_port_rss_ctx_indir_set(struct mvpp2_port *port, u32 port_ctx,
1592
+ const u32 *indir)
1593
+{
1594
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1595
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1596
+ rss_ctx);
1597
+
1598
+ if (!rss_table)
1599
+ return -EINVAL;
1600
+
1601
+ memcpy(rss_table->indir, indir,
1602
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1603
+
1604
+ mvpp22_rss_fill_table(port, rss_table, rss_ctx);
1605
+
1606
+ return 0;
1607
+}
1608
+
1609
+int mvpp22_port_rss_ctx_indir_get(struct mvpp2_port *port, u32 port_ctx,
1610
+ u32 *indir)
1611
+{
1612
+ int rss_ctx = mvpp22_rss_ctx(port, port_ctx);
1613
+ struct mvpp2_rss_table *rss_table = mvpp22_rss_table_get(port->priv,
1614
+ rss_ctx);
1615
+
1616
+ if (!rss_table)
1617
+ return -EINVAL;
1618
+
1619
+ memcpy(indir, rss_table->indir,
1620
+ MVPP22_RSS_TABLE_ENTRIES * sizeof(rss_table->indir[0]));
1621
+
1622
+ return 0;
9631623 }
9641624
9651625 int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info)
9661626 {
9671627 u16 hash_opts = 0;
1628
+ u32 flow_type;
9681629
969
- switch (info->flow_type) {
970
- case TCP_V4_FLOW:
971
- case UDP_V4_FLOW:
972
- case TCP_V6_FLOW:
973
- case UDP_V6_FLOW:
1630
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1631
+
1632
+ switch (flow_type) {
1633
+ case MVPP22_FLOW_TCP4:
1634
+ case MVPP22_FLOW_UDP4:
1635
+ case MVPP22_FLOW_TCP6:
1636
+ case MVPP22_FLOW_UDP6:
9741637 if (info->data & RXH_L4_B_0_1)
9751638 hash_opts |= MVPP22_CLS_HEK_OPT_L4SIP;
9761639 if (info->data & RXH_L4_B_2_3)
9771640 hash_opts |= MVPP22_CLS_HEK_OPT_L4DIP;
978
- /* Fallthrough */
979
- case IPV4_FLOW:
980
- case IPV6_FLOW:
1641
+ fallthrough;
1642
+ case MVPP22_FLOW_IP4:
1643
+ case MVPP22_FLOW_IP6:
9811644 if (info->data & RXH_L2DA)
9821645 hash_opts |= MVPP22_CLS_HEK_OPT_MAC_DA;
9831646 if (info->data & RXH_VLAN)
....@@ -994,15 +1657,18 @@
9941657 default: return -EOPNOTSUPP;
9951658 }
9961659
997
- return mvpp2_port_rss_hash_opts_set(port, info->flow_type, hash_opts);
1660
+ return mvpp2_port_rss_hash_opts_set(port, flow_type, hash_opts);
9981661 }
9991662
10001663 int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
10011664 {
10021665 unsigned long hash_opts;
1666
+ u32 flow_type;
10031667 int i;
10041668
1005
- hash_opts = mvpp2_port_rss_hash_opts_get(port, info->flow_type);
1669
+ flow_type = mvpp2_cls_ethtool_flow_to_type(info->flow_type);
1670
+
1671
+ hash_opts = mvpp2_port_rss_hash_opts_get(port, flow_type);
10061672 info->data = 0;
10071673
10081674 for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
....@@ -1037,38 +1703,40 @@
10371703 return 0;
10381704 }
10391705
1040
-void mvpp22_rss_port_init(struct mvpp2_port *port)
1706
+int mvpp22_port_rss_init(struct mvpp2_port *port)
10411707 {
1042
- struct mvpp2 *priv = port->priv;
1043
- int i;
1708
+ struct mvpp2_rss_table *table;
1709
+ u32 context = 0;
1710
+ int i, ret;
10441711
1045
- /* Set the table width: replace the whole classifier Rx queue number
1046
- * with the ones configured in RSS table entries.
1047
- */
1048
- mvpp2_write(priv, MVPP22_RSS_INDEX, MVPP22_RSS_INDEX_TABLE(port->id));
1049
- mvpp2_write(priv, MVPP22_RSS_WIDTH, 8);
1712
+ for (i = 0; i < MVPP22_N_RSS_TABLES; i++)
1713
+ port->rss_ctx[i] = -1;
10501714
1051
- /* The default RxQ is used as a key to select the RSS table to use.
1052
- * We use one RSS table per port.
1053
- */
1054
- mvpp2_write(priv, MVPP22_RSS_INDEX,
1055
- MVPP22_RSS_INDEX_QUEUE(port->first_rxq));
1056
- mvpp2_write(priv, MVPP22_RXQ2RSS_TABLE,
1057
- MVPP22_RSS_TABLE_POINTER(port->id));
1715
+ ret = mvpp22_rss_context_create(port, &context);
1716
+ if (ret)
1717
+ return ret;
1718
+
1719
+ table = mvpp22_rss_table_get(port->priv, context);
1720
+ if (!table)
1721
+ return -EINVAL;
1722
+
1723
+ port->rss_ctx[0] = context;
10581724
10591725 /* Configure the first table to evenly distribute the packets across
10601726 * real Rx Queues. The table entries map a hash to a port Rx Queue.
10611727 */
10621728 for (i = 0; i < MVPP22_RSS_TABLE_ENTRIES; i++)
1063
- port->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
1729
+ table->indir[i] = ethtool_rxfh_indir_default(i, port->nrxqs);
10641730
1065
- mvpp22_rss_fill_table(port, port->id);
1731
+ mvpp22_rss_fill_table(port, table, mvpp22_rss_ctx(port, 0));
10661732
10671733 /* Configure default flows */
1068
- mvpp2_port_rss_hash_opts_set(port, IPV4_FLOW, MVPP22_CLS_HEK_IP4_2T);
1069
- mvpp2_port_rss_hash_opts_set(port, IPV6_FLOW, MVPP22_CLS_HEK_IP6_2T);
1070
- mvpp2_port_rss_hash_opts_set(port, TCP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1071
- mvpp2_port_rss_hash_opts_set(port, TCP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1072
- mvpp2_port_rss_hash_opts_set(port, UDP_V4_FLOW, MVPP22_CLS_HEK_IP4_5T);
1073
- mvpp2_port_rss_hash_opts_set(port, UDP_V6_FLOW, MVPP22_CLS_HEK_IP6_5T);
1734
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP4, MVPP22_CLS_HEK_IP4_2T);
1735
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_IP6, MVPP22_CLS_HEK_IP6_2T);
1736
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP4, MVPP22_CLS_HEK_IP4_5T);
1737
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_TCP6, MVPP22_CLS_HEK_IP6_5T);
1738
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP4, MVPP22_CLS_HEK_IP4_5T);
1739
+ mvpp2_port_rss_hash_opts_set(port, MVPP22_FLOW_UDP6, MVPP22_CLS_HEK_IP6_5T);
1740
+
1741
+ return 0;
10741742 }