.. | .. |
---|
33 | 33 | #include <linux/kernel.h> |
---|
34 | 34 | #include <linux/module.h> |
---|
35 | 35 | #include <linux/mlx5/driver.h> |
---|
36 | | -#include <linux/mlx5/cmd.h> |
---|
37 | 36 | #include "mlx5_core.h" |
---|
38 | 37 | |
---|
39 | 38 | /* Scheduling element fw management */ |
---|
40 | 39 | int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, |
---|
41 | 40 | void *ctx, u32 *element_id) |
---|
42 | 41 | { |
---|
43 | | - u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; |
---|
44 | | - u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0}; |
---|
| 42 | + u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {}; |
---|
| 43 | + u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {}; |
---|
45 | 44 | void *schedc; |
---|
46 | 45 | int err; |
---|
47 | 46 | |
---|
.. | .. |
---|
53 | 52 | hierarchy); |
---|
54 | 53 | memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); |
---|
55 | 54 | |
---|
56 | | - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 55 | + err = mlx5_cmd_exec_inout(dev, create_scheduling_element, in, out); |
---|
57 | 56 | if (err) |
---|
58 | 57 | return err; |
---|
59 | 58 | |
---|
.. | .. |
---|
66 | 65 | void *ctx, u32 element_id, |
---|
67 | 66 | u32 modify_bitmask) |
---|
68 | 67 | { |
---|
69 | | - u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; |
---|
70 | | - u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0}; |
---|
| 68 | + u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {}; |
---|
71 | 69 | void *schedc; |
---|
72 | 70 | |
---|
73 | 71 | schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in, |
---|
.. | .. |
---|
82 | 80 | hierarchy); |
---|
83 | 81 | memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context)); |
---|
84 | 82 | |
---|
85 | | - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 83 | + return mlx5_cmd_exec_in(dev, modify_scheduling_element, in); |
---|
86 | 84 | } |
---|
87 | 85 | |
---|
88 | 86 | int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, |
---|
89 | 87 | u32 element_id) |
---|
90 | 88 | { |
---|
91 | | - u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; |
---|
92 | | - u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0}; |
---|
| 89 | + u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {}; |
---|
93 | 90 | |
---|
94 | 91 | MLX5_SET(destroy_scheduling_element_in, in, opcode, |
---|
95 | 92 | MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT); |
---|
.. | .. |
---|
98 | 95 | MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy, |
---|
99 | 96 | hierarchy); |
---|
100 | 97 | |
---|
101 | | - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 98 | + return mlx5_cmd_exec_in(dev, destroy_scheduling_element, in); |
---|
| 99 | +} |
---|
| 100 | + |
---|
| 101 | +static bool mlx5_rl_are_equal_raw(struct mlx5_rl_entry *entry, void *rl_in, |
---|
| 102 | + u16 uid) |
---|
| 103 | +{ |
---|
| 104 | + return (!memcmp(entry->rl_raw, rl_in, sizeof(entry->rl_raw)) && |
---|
| 105 | + entry->uid == uid); |
---|
102 | 106 | } |
---|
103 | 107 | |
---|
104 | 108 | /* Finds an entry where we can register the given rate |
---|
.. | .. |
---|
107 | 111 | * If the table is full, return NULL |
---|
108 | 112 | */ |
---|
109 | 113 | static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table, |
---|
110 | | - struct mlx5_rate_limit *rl) |
---|
| 114 | + void *rl_in, u16 uid, bool dedicated) |
---|
111 | 115 | { |
---|
112 | 116 | struct mlx5_rl_entry *ret_entry = NULL; |
---|
113 | 117 | bool empty_found = false; |
---|
114 | 118 | int i; |
---|
115 | 119 | |
---|
116 | 120 | for (i = 0; i < table->max_size; i++) { |
---|
117 | | - if (mlx5_rl_are_equal(&table->rl_entry[i].rl, rl)) |
---|
118 | | - return &table->rl_entry[i]; |
---|
119 | | - if (!empty_found && !table->rl_entry[i].rl.rate) { |
---|
| 121 | + if (dedicated) { |
---|
| 122 | + if (!table->rl_entry[i].refcount) |
---|
| 123 | + return &table->rl_entry[i]; |
---|
| 124 | + continue; |
---|
| 125 | + } |
---|
| 126 | + |
---|
| 127 | + if (table->rl_entry[i].refcount) { |
---|
| 128 | + if (table->rl_entry[i].dedicated) |
---|
| 129 | + continue; |
---|
| 130 | + if (mlx5_rl_are_equal_raw(&table->rl_entry[i], rl_in, |
---|
| 131 | + uid)) |
---|
| 132 | + return &table->rl_entry[i]; |
---|
| 133 | + } else if (!empty_found) { |
---|
120 | 134 | empty_found = true; |
---|
121 | 135 | ret_entry = &table->rl_entry[i]; |
---|
122 | 136 | } |
---|
.. | .. |
---|
126 | 140 | } |
---|
127 | 141 | |
---|
128 | 142 | static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev, |
---|
129 | | - u16 index, |
---|
130 | | - struct mlx5_rate_limit *rl) |
---|
| 143 | + struct mlx5_rl_entry *entry, bool set) |
---|
131 | 144 | { |
---|
132 | | - u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0}; |
---|
133 | | - u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0}; |
---|
| 145 | + u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {}; |
---|
| 146 | + void *pp_context; |
---|
134 | 147 | |
---|
| 148 | + pp_context = MLX5_ADDR_OF(set_pp_rate_limit_in, in, ctx); |
---|
135 | 149 | MLX5_SET(set_pp_rate_limit_in, in, opcode, |
---|
136 | 150 | MLX5_CMD_OP_SET_PP_RATE_LIMIT); |
---|
137 | | - MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index); |
---|
138 | | - MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rl->rate); |
---|
139 | | - MLX5_SET(set_pp_rate_limit_in, in, burst_upper_bound, rl->max_burst_sz); |
---|
140 | | - MLX5_SET(set_pp_rate_limit_in, in, typical_packet_size, rl->typical_pkt_sz); |
---|
141 | | - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); |
---|
| 151 | + MLX5_SET(set_pp_rate_limit_in, in, uid, entry->uid); |
---|
| 152 | + MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, entry->index); |
---|
| 153 | + if (set) |
---|
| 154 | + memcpy(pp_context, entry->rl_raw, sizeof(entry->rl_raw)); |
---|
| 155 | + return mlx5_cmd_exec_in(dev, set_pp_rate_limit, in); |
---|
142 | 156 | } |
---|
143 | 157 | |
---|
144 | 158 | bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate) |
---|
.. | .. |
---|
158 | 172 | } |
---|
159 | 173 | EXPORT_SYMBOL(mlx5_rl_are_equal); |
---|
160 | 174 | |
---|
161 | | -int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, |
---|
162 | | - struct mlx5_rate_limit *rl) |
---|
| 175 | +int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid, |
---|
| 176 | + bool dedicated_entry, u16 *index) |
---|
163 | 177 | { |
---|
164 | 178 | struct mlx5_rl_table *table = &dev->priv.rl_table; |
---|
165 | 179 | struct mlx5_rl_entry *entry; |
---|
166 | 180 | int err = 0; |
---|
| 181 | + u32 rate; |
---|
167 | 182 | |
---|
| 183 | + rate = MLX5_GET(set_pp_rate_limit_context, rl_in, rate_limit); |
---|
168 | 184 | mutex_lock(&table->rl_lock); |
---|
169 | 185 | |
---|
170 | | - if (!rl->rate || !mlx5_rl_is_in_range(dev, rl->rate)) { |
---|
| 186 | + if (!rate || !mlx5_rl_is_in_range(dev, rate)) { |
---|
171 | 187 | mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n", |
---|
172 | | - rl->rate, table->min_rate, table->max_rate); |
---|
| 188 | + rate, table->min_rate, table->max_rate); |
---|
173 | 189 | err = -EINVAL; |
---|
174 | 190 | goto out; |
---|
175 | 191 | } |
---|
176 | 192 | |
---|
177 | | - entry = find_rl_entry(table, rl); |
---|
| 193 | + entry = find_rl_entry(table, rl_in, uid, dedicated_entry); |
---|
178 | 194 | if (!entry) { |
---|
179 | 195 | mlx5_core_err(dev, "Max number of %u rates reached\n", |
---|
180 | 196 | table->max_size); |
---|
.. | .. |
---|
185 | 201 | /* rate already configured */ |
---|
186 | 202 | entry->refcount++; |
---|
187 | 203 | } else { |
---|
| 204 | + memcpy(entry->rl_raw, rl_in, sizeof(entry->rl_raw)); |
---|
| 205 | + entry->uid = uid; |
---|
188 | 206 | /* new rate limit */ |
---|
189 | | - err = mlx5_set_pp_rate_limit_cmd(dev, entry->index, rl); |
---|
| 207 | + err = mlx5_set_pp_rate_limit_cmd(dev, entry, true); |
---|
190 | 208 | if (err) { |
---|
191 | | - mlx5_core_err(dev, "Failed configuring rate limit(err %d): \ |
---|
192 | | - rate %u, max_burst_sz %u, typical_pkt_sz %u\n", |
---|
193 | | - err, rl->rate, rl->max_burst_sz, |
---|
194 | | - rl->typical_pkt_sz); |
---|
| 209 | + mlx5_core_err( |
---|
| 210 | + dev, |
---|
| 211 | + "Failed configuring rate limit(err %d): rate %u, max_burst_sz %u, typical_pkt_sz %u\n", |
---|
| 212 | + err, rate, |
---|
| 213 | + MLX5_GET(set_pp_rate_limit_context, rl_in, |
---|
| 214 | + burst_upper_bound), |
---|
| 215 | + MLX5_GET(set_pp_rate_limit_context, rl_in, |
---|
| 216 | + typical_packet_size)); |
---|
195 | 217 | goto out; |
---|
196 | 218 | } |
---|
197 | | - entry->rl = *rl; |
---|
| 219 | + |
---|
198 | 220 | entry->refcount = 1; |
---|
| 221 | + entry->dedicated = dedicated_entry; |
---|
199 | 222 | } |
---|
200 | 223 | *index = entry->index; |
---|
201 | 224 | |
---|
.. | .. |
---|
203 | 226 | mutex_unlock(&table->rl_lock); |
---|
204 | 227 | return err; |
---|
205 | 228 | } |
---|
| 229 | +EXPORT_SYMBOL(mlx5_rl_add_rate_raw); |
---|
| 230 | + |
---|
| 231 | +void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index) |
---|
| 232 | +{ |
---|
| 233 | + struct mlx5_rl_table *table = &dev->priv.rl_table; |
---|
| 234 | + struct mlx5_rl_entry *entry; |
---|
| 235 | + |
---|
| 236 | + mutex_lock(&table->rl_lock); |
---|
| 237 | + entry = &table->rl_entry[index - 1]; |
---|
| 238 | + entry->refcount--; |
---|
| 239 | + if (!entry->refcount) |
---|
| 240 | + /* need to remove rate */ |
---|
| 241 | + mlx5_set_pp_rate_limit_cmd(dev, entry, false); |
---|
| 242 | + mutex_unlock(&table->rl_lock); |
---|
| 243 | +} |
---|
| 244 | +EXPORT_SYMBOL(mlx5_rl_remove_rate_raw); |
---|
| 245 | + |
---|
| 246 | +int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index, |
---|
| 247 | + struct mlx5_rate_limit *rl) |
---|
| 248 | +{ |
---|
| 249 | + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {}; |
---|
| 250 | + |
---|
| 251 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate); |
---|
| 252 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound, |
---|
| 253 | + rl->max_burst_sz); |
---|
| 254 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size, |
---|
| 255 | + rl->typical_pkt_sz); |
---|
| 256 | + |
---|
| 257 | + return mlx5_rl_add_rate_raw(dev, rl_raw, |
---|
| 258 | + MLX5_CAP_QOS(dev, packet_pacing_uid) ? |
---|
| 259 | + MLX5_SHARED_RESOURCE_UID : 0, |
---|
| 260 | + false, index); |
---|
| 261 | +} |
---|
206 | 262 | EXPORT_SYMBOL(mlx5_rl_add_rate); |
---|
207 | 263 | |
---|
208 | 264 | void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl) |
---|
209 | 265 | { |
---|
| 266 | + u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {}; |
---|
210 | 267 | struct mlx5_rl_table *table = &dev->priv.rl_table; |
---|
211 | 268 | struct mlx5_rl_entry *entry = NULL; |
---|
212 | | - struct mlx5_rate_limit reset_rl = {0}; |
---|
213 | 269 | |
---|
214 | 270 | /* 0 is a reserved value for unlimited rate */ |
---|
215 | 271 | if (rl->rate == 0) |
---|
216 | 272 | return; |
---|
217 | 273 | |
---|
| 274 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, rate_limit, rl->rate); |
---|
| 275 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, burst_upper_bound, |
---|
| 276 | + rl->max_burst_sz); |
---|
| 277 | + MLX5_SET(set_pp_rate_limit_context, rl_raw, typical_packet_size, |
---|
| 278 | + rl->typical_pkt_sz); |
---|
| 279 | + |
---|
218 | 280 | mutex_lock(&table->rl_lock); |
---|
219 | | - entry = find_rl_entry(table, rl); |
---|
| 281 | + entry = find_rl_entry(table, rl_raw, |
---|
| 282 | + MLX5_CAP_QOS(dev, packet_pacing_uid) ? |
---|
| 283 | + MLX5_SHARED_RESOURCE_UID : 0, false); |
---|
220 | 284 | if (!entry || !entry->refcount) { |
---|
221 | | - mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u \ |
---|
222 | | - are not configured\n", |
---|
| 285 | + mlx5_core_warn(dev, "Rate %u, max_burst_sz %u typical_pkt_sz %u are not configured\n", |
---|
223 | 286 | rl->rate, rl->max_burst_sz, rl->typical_pkt_sz); |
---|
224 | 287 | goto out; |
---|
225 | 288 | } |
---|
226 | 289 | |
---|
227 | 290 | entry->refcount--; |
---|
228 | | - if (!entry->refcount) { |
---|
| 291 | + if (!entry->refcount) |
---|
229 | 292 | /* need to remove rate */ |
---|
230 | | - mlx5_set_pp_rate_limit_cmd(dev, entry->index, &reset_rl); |
---|
231 | | - entry->rl = reset_rl; |
---|
232 | | - } |
---|
| 293 | + mlx5_set_pp_rate_limit_cmd(dev, entry, false); |
---|
233 | 294 | |
---|
234 | 295 | out: |
---|
235 | 296 | mutex_unlock(&table->rl_lock); |
---|
.. | .. |
---|
275 | 336 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev) |
---|
276 | 337 | { |
---|
277 | 338 | struct mlx5_rl_table *table = &dev->priv.rl_table; |
---|
278 | | - struct mlx5_rate_limit rl = {0}; |
---|
279 | 339 | int i; |
---|
280 | 340 | |
---|
281 | 341 | /* Clear all configured rates */ |
---|
282 | 342 | for (i = 0; i < table->max_size; i++) |
---|
283 | | - if (table->rl_entry[i].rl.rate) |
---|
284 | | - mlx5_set_pp_rate_limit_cmd(dev, table->rl_entry[i].index, |
---|
285 | | - &rl); |
---|
| 343 | + if (table->rl_entry[i].refcount) |
---|
| 344 | + mlx5_set_pp_rate_limit_cmd(dev, &table->rl_entry[i], |
---|
| 345 | + false); |
---|
286 | 346 | |
---|
287 | 347 | kfree(dev->priv.rl_table.rl_entry); |
---|
288 | 348 | } |
---|