.. | .. |
---|
17 | 17 | { |
---|
18 | 18 | struct ice_sched_node *root; |
---|
19 | 19 | struct ice_hw *hw; |
---|
20 | | - u16 max_children; |
---|
21 | 20 | |
---|
22 | 21 | if (!pi) |
---|
23 | 22 | return ICE_ERR_PARAM; |
---|
.. | .. |
---|
28 | 27 | if (!root) |
---|
29 | 28 | return ICE_ERR_NO_MEMORY; |
---|
30 | 29 | |
---|
31 | | - max_children = le16_to_cpu(hw->layer_info[0].max_children); |
---|
32 | | - root->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, |
---|
| 30 | + /* coverity[suspicious_sizeof] */ |
---|
| 31 | + root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], |
---|
33 | 32 | sizeof(*root), GFP_KERNEL); |
---|
34 | 33 | if (!root->children) { |
---|
35 | 34 | devm_kfree(ice_hw_to_dev(hw), root); |
---|
.. | .. |
---|
44 | 43 | /** |
---|
45 | 44 | * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB |
---|
46 | 45 | * @start_node: pointer to the starting ice_sched_node struct in a sub-tree |
---|
47 | | - * @teid: node teid to search |
---|
| 46 | + * @teid: node TEID to search |
---|
48 | 47 | * |
---|
49 | | - * This function searches for a node matching the teid in the scheduling tree |
---|
| 48 | + * This function searches for a node matching the TEID in the scheduling tree |
---|
50 | 49 | * from the SW DB. The search is recursive and is restricted by the number of |
---|
51 | 50 | * layers it has searched through; stopping at the max supported layer. |
---|
52 | 51 | * |
---|
.. | .. |
---|
67 | 66 | start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) |
---|
68 | 67 | return NULL; |
---|
69 | 68 | |
---|
70 | | - /* Check if teid matches to any of the children nodes */ |
---|
| 69 | + /* Check if TEID matches to any of the children nodes */ |
---|
71 | 70 | for (i = 0; i < start_node->num_children; i++) |
---|
72 | 71 | if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid) |
---|
73 | 72 | return start_node->children[i]; |
---|
.. | .. |
---|
86 | 85 | } |
---|
87 | 86 | |
---|
88 | 87 | /** |
---|
| 88 | + * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd |
---|
| 89 | + * @hw: pointer to the HW struct |
---|
| 90 | + * @cmd_opc: cmd opcode |
---|
| 91 | + * @elems_req: number of elements to request |
---|
| 92 | + * @buf: pointer to buffer |
---|
| 93 | + * @buf_size: buffer size in bytes |
---|
| 94 | + * @elems_resp: returns total number of elements response |
---|
| 95 | + * @cd: pointer to command details structure or NULL |
---|
| 96 | + * |
---|
| 97 | + * This function sends a scheduling elements cmd (cmd_opc) |
---|
| 98 | + */ |
---|
| 99 | +static enum ice_status |
---|
| 100 | +ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, |
---|
| 101 | + u16 elems_req, void *buf, u16 buf_size, |
---|
| 102 | + u16 *elems_resp, struct ice_sq_cd *cd) |
---|
| 103 | +{ |
---|
| 104 | + struct ice_aqc_sched_elem_cmd *cmd; |
---|
| 105 | + struct ice_aq_desc desc; |
---|
| 106 | + enum ice_status status; |
---|
| 107 | + |
---|
| 108 | + cmd = &desc.params.sched_elem_cmd; |
---|
| 109 | + ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc); |
---|
| 110 | + cmd->num_elem_req = cpu_to_le16(elems_req); |
---|
| 111 | + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
---|
| 112 | + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); |
---|
| 113 | + if (!status && elems_resp) |
---|
| 114 | + *elems_resp = le16_to_cpu(cmd->num_elem_resp); |
---|
| 115 | + |
---|
| 116 | + return status; |
---|
| 117 | +} |
---|
| 118 | + |
---|
| 119 | +/** |
---|
| 120 | + * ice_aq_query_sched_elems - query scheduler elements |
---|
| 121 | + * @hw: pointer to the HW struct |
---|
| 122 | + * @elems_req: number of elements to query |
---|
| 123 | + * @buf: pointer to buffer |
---|
| 124 | + * @buf_size: buffer size in bytes |
---|
| 125 | + * @elems_ret: returns total number of elements returned |
---|
| 126 | + * @cd: pointer to command details structure or NULL |
---|
| 127 | + * |
---|
| 128 | + * Query scheduling elements (0x0404) |
---|
| 129 | + */ |
---|
| 130 | +enum ice_status |
---|
| 131 | +ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, |
---|
| 132 | + struct ice_aqc_txsched_elem_data *buf, u16 buf_size, |
---|
| 133 | + u16 *elems_ret, struct ice_sq_cd *cd) |
---|
| 134 | +{ |
---|
| 135 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems, |
---|
| 136 | + elems_req, (void *)buf, buf_size, |
---|
| 137 | + elems_ret, cd); |
---|
| 138 | +} |
---|
| 139 | + |
---|
| 140 | +/** |
---|
89 | 141 | * ice_sched_add_node - Insert the Tx scheduler node in SW DB |
---|
90 | 142 | * @pi: port information structure |
---|
91 | 143 | * @layer: Scheduler layer of the node |
---|
.. | .. |
---|
97 | 149 | ice_sched_add_node(struct ice_port_info *pi, u8 layer, |
---|
98 | 150 | struct ice_aqc_txsched_elem_data *info) |
---|
99 | 151 | { |
---|
| 152 | + struct ice_aqc_txsched_elem_data elem; |
---|
100 | 153 | struct ice_sched_node *parent; |
---|
101 | 154 | struct ice_sched_node *node; |
---|
| 155 | + enum ice_status status; |
---|
102 | 156 | struct ice_hw *hw; |
---|
103 | | - u16 max_children; |
---|
104 | 157 | |
---|
105 | 158 | if (!pi) |
---|
106 | 159 | return ICE_ERR_PARAM; |
---|
.. | .. |
---|
117 | 170 | return ICE_ERR_PARAM; |
---|
118 | 171 | } |
---|
119 | 172 | |
---|
| 173 | + /* query the current node information from FW before adding it |
---|
| 174 | + * to the SW DB |
---|
| 175 | + */ |
---|
| 176 | + status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem); |
---|
| 177 | + if (status) |
---|
| 178 | + return status; |
---|
| 179 | + |
---|
120 | 180 | node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL); |
---|
121 | 181 | if (!node) |
---|
122 | 182 | return ICE_ERR_NO_MEMORY; |
---|
123 | | - max_children = le16_to_cpu(hw->layer_info[layer].max_children); |
---|
124 | | - if (max_children) { |
---|
125 | | - node->children = devm_kcalloc(ice_hw_to_dev(hw), max_children, |
---|
| 183 | + if (hw->max_children[layer]) { |
---|
| 184 | + /* coverity[suspicious_sizeof] */ |
---|
| 185 | + node->children = devm_kcalloc(ice_hw_to_dev(hw), |
---|
| 186 | + hw->max_children[layer], |
---|
126 | 187 | sizeof(*node), GFP_KERNEL); |
---|
127 | 188 | if (!node->children) { |
---|
128 | 189 | devm_kfree(ice_hw_to_dev(hw), node); |
---|
.. | .. |
---|
134 | 195 | node->parent = parent; |
---|
135 | 196 | node->tx_sched_layer = layer; |
---|
136 | 197 | parent->children[parent->num_children++] = node; |
---|
137 | | - memcpy(&node->info, info, sizeof(*info)); |
---|
| 198 | + node->info = elem; |
---|
138 | 199 | return 0; |
---|
139 | 200 | } |
---|
140 | 201 | |
---|
141 | 202 | /** |
---|
142 | 203 | * ice_aq_delete_sched_elems - delete scheduler elements |
---|
143 | | - * @hw: pointer to the hw struct |
---|
| 204 | + * @hw: pointer to the HW struct |
---|
144 | 205 | * @grps_req: number of groups to delete |
---|
145 | 206 | * @buf: pointer to buffer |
---|
146 | 207 | * @buf_size: buffer size in bytes |
---|
.. | .. |
---|
154 | 215 | struct ice_aqc_delete_elem *buf, u16 buf_size, |
---|
155 | 216 | u16 *grps_del, struct ice_sq_cd *cd) |
---|
156 | 217 | { |
---|
157 | | - struct ice_aqc_add_move_delete_elem *cmd; |
---|
158 | | - struct ice_aq_desc desc; |
---|
159 | | - enum ice_status status; |
---|
160 | | - |
---|
161 | | - cmd = &desc.params.add_move_delete_elem; |
---|
162 | | - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_delete_sched_elems); |
---|
163 | | - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
---|
164 | | - cmd->num_grps_req = cpu_to_le16(grps_req); |
---|
165 | | - |
---|
166 | | - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); |
---|
167 | | - if (!status && grps_del) |
---|
168 | | - *grps_del = le16_to_cpu(cmd->num_grps_updated); |
---|
169 | | - |
---|
170 | | - return status; |
---|
| 218 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems, |
---|
| 219 | + grps_req, (void *)buf, buf_size, |
---|
| 220 | + grps_del, cd); |
---|
171 | 221 | } |
---|
172 | 222 | |
---|
173 | 223 | /** |
---|
174 | | - * ice_sched_remove_elems - remove nodes from hw |
---|
175 | | - * @hw: pointer to the hw struct |
---|
| 224 | + * ice_sched_remove_elems - remove nodes from HW |
---|
| 225 | + * @hw: pointer to the HW struct |
---|
176 | 226 | * @parent: pointer to the parent node |
---|
177 | 227 | * @num_nodes: number of nodes |
---|
178 | 228 | * @node_teids: array of node teids to be deleted |
---|
179 | 229 | * |
---|
180 | | - * This function remove nodes from hw |
---|
| 230 | + * This function remove nodes from HW |
---|
181 | 231 | */ |
---|
182 | 232 | static enum ice_status |
---|
183 | 233 | ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, |
---|
.. | .. |
---|
188 | 238 | enum ice_status status; |
---|
189 | 239 | u16 buf_size; |
---|
190 | 240 | |
---|
191 | | - buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1); |
---|
| 241 | + buf_size = struct_size(buf, teid, num_nodes); |
---|
192 | 242 | buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); |
---|
193 | 243 | if (!buf) |
---|
194 | 244 | return ICE_ERR_NO_MEMORY; |
---|
| 245 | + |
---|
195 | 246 | buf->hdr.parent_teid = parent->info.node_teid; |
---|
196 | 247 | buf->hdr.num_elems = cpu_to_le16(num_nodes); |
---|
197 | 248 | for (i = 0; i < num_nodes; i++) |
---|
198 | 249 | buf->teid[i] = cpu_to_le32(node_teids[i]); |
---|
| 250 | + |
---|
199 | 251 | status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size, |
---|
200 | 252 | &num_groups_removed, NULL); |
---|
201 | 253 | if (status || num_groups_removed != 1) |
---|
202 | | - ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n"); |
---|
| 254 | + ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n", |
---|
| 255 | + hw->adminq.sq_last_status); |
---|
| 256 | + |
---|
203 | 257 | devm_kfree(ice_hw_to_dev(hw), buf); |
---|
204 | 258 | return status; |
---|
205 | 259 | } |
---|
206 | 260 | |
---|
207 | 261 | /** |
---|
208 | 262 | * ice_sched_get_first_node - get the first node of the given layer |
---|
209 | | - * @hw: pointer to the hw struct |
---|
| 263 | + * @pi: port information structure |
---|
210 | 264 | * @parent: pointer the base node of the subtree |
---|
211 | 265 | * @layer: layer number |
---|
212 | 266 | * |
---|
213 | 267 | * This function retrieves the first node of the given layer from the subtree |
---|
214 | 268 | */ |
---|
215 | 269 | static struct ice_sched_node * |
---|
216 | | -ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, |
---|
217 | | - u8 layer) |
---|
| 270 | +ice_sched_get_first_node(struct ice_port_info *pi, |
---|
| 271 | + struct ice_sched_node *parent, u8 layer) |
---|
218 | 272 | { |
---|
219 | | - u8 i; |
---|
220 | | - |
---|
221 | | - if (layer < hw->sw_entry_point_layer) |
---|
222 | | - return NULL; |
---|
223 | | - for (i = 0; i < parent->num_children; i++) { |
---|
224 | | - struct ice_sched_node *node = parent->children[i]; |
---|
225 | | - |
---|
226 | | - if (node) { |
---|
227 | | - if (node->tx_sched_layer == layer) |
---|
228 | | - return node; |
---|
229 | | - /* this recursion is intentional, and wouldn't |
---|
230 | | - * go more than 9 calls |
---|
231 | | - */ |
---|
232 | | - return ice_sched_get_first_node(hw, node, layer); |
---|
233 | | - } |
---|
234 | | - } |
---|
235 | | - return NULL; |
---|
| 273 | + return pi->sib_head[parent->tc_num][layer]; |
---|
236 | 274 | } |
---|
237 | 275 | |
---|
238 | 276 | /** |
---|
.. | .. |
---|
246 | 284 | { |
---|
247 | 285 | u8 i; |
---|
248 | 286 | |
---|
249 | | - if (!pi) |
---|
| 287 | + if (!pi || !pi->root) |
---|
250 | 288 | return NULL; |
---|
251 | 289 | for (i = 0; i < pi->root->num_children; i++) |
---|
252 | 290 | if (pi->root->children[i]->tc_num == tc) |
---|
.. | .. |
---|
282 | 320 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT && |
---|
283 | 321 | node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) { |
---|
284 | 322 | u32 teid = le32_to_cpu(node->info.node_teid); |
---|
285 | | - enum ice_status status; |
---|
286 | 323 | |
---|
287 | | - status = ice_sched_remove_elems(hw, node->parent, 1, &teid); |
---|
288 | | - if (status) |
---|
289 | | - ice_debug(hw, ICE_DBG_SCHED, |
---|
290 | | - "remove element failed %d\n", status); |
---|
| 324 | + ice_sched_remove_elems(hw, node->parent, 1, &teid); |
---|
291 | 325 | } |
---|
292 | 326 | parent = node->parent; |
---|
293 | 327 | /* root has no parent */ |
---|
294 | 328 | if (parent) { |
---|
295 | | - struct ice_sched_node *p, *tc_node; |
---|
| 329 | + struct ice_sched_node *p; |
---|
296 | 330 | |
---|
297 | 331 | /* update the parent */ |
---|
298 | 332 | for (i = 0; i < parent->num_children; i++) |
---|
.. | .. |
---|
304 | 338 | break; |
---|
305 | 339 | } |
---|
306 | 340 | |
---|
307 | | - /* search for previous sibling that points to this node and |
---|
308 | | - * remove the reference |
---|
309 | | - */ |
---|
310 | | - tc_node = ice_sched_get_tc_node(pi, node->tc_num); |
---|
311 | | - if (!tc_node) { |
---|
312 | | - ice_debug(hw, ICE_DBG_SCHED, |
---|
313 | | - "Invalid TC number %d\n", node->tc_num); |
---|
314 | | - goto err_exit; |
---|
315 | | - } |
---|
316 | | - p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer); |
---|
| 341 | + p = ice_sched_get_first_node(pi, node, node->tx_sched_layer); |
---|
317 | 342 | while (p) { |
---|
318 | 343 | if (p->sibling == node) { |
---|
319 | 344 | p->sibling = node->sibling; |
---|
.. | .. |
---|
321 | 346 | } |
---|
322 | 347 | p = p->sibling; |
---|
323 | 348 | } |
---|
| 349 | + |
---|
| 350 | + /* update the sibling head if head is getting removed */ |
---|
| 351 | + if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node) |
---|
| 352 | + pi->sib_head[node->tc_num][node->tx_sched_layer] = |
---|
| 353 | + node->sibling; |
---|
324 | 354 | } |
---|
325 | | -err_exit: |
---|
| 355 | + |
---|
326 | 356 | /* leaf nodes have no children */ |
---|
327 | 357 | if (node->children) |
---|
328 | 358 | devm_kfree(ice_hw_to_dev(hw), node->children); |
---|
.. | .. |
---|
331 | 361 | |
---|
332 | 362 | /** |
---|
333 | 363 | * ice_aq_get_dflt_topo - gets default scheduler topology |
---|
334 | | - * @hw: pointer to the hw struct |
---|
| 364 | + * @hw: pointer to the HW struct |
---|
335 | 365 | * @lport: logical port number |
---|
336 | 366 | * @buf: pointer to buffer |
---|
337 | 367 | * @buf_size: buffer size in bytes |
---|
.. | .. |
---|
361 | 391 | |
---|
362 | 392 | /** |
---|
363 | 393 | * ice_aq_add_sched_elems - adds scheduling element |
---|
364 | | - * @hw: pointer to the hw struct |
---|
| 394 | + * @hw: pointer to the HW struct |
---|
365 | 395 | * @grps_req: the number of groups that are requested to be added |
---|
366 | 396 | * @buf: pointer to buffer |
---|
367 | 397 | * @buf_size: buffer size in bytes |
---|
.. | .. |
---|
375 | 405 | struct ice_aqc_add_elem *buf, u16 buf_size, |
---|
376 | 406 | u16 *grps_added, struct ice_sq_cd *cd) |
---|
377 | 407 | { |
---|
378 | | - struct ice_aqc_add_move_delete_elem *cmd; |
---|
379 | | - struct ice_aq_desc desc; |
---|
380 | | - enum ice_status status; |
---|
381 | | - |
---|
382 | | - cmd = &desc.params.add_move_delete_elem; |
---|
383 | | - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_sched_elems); |
---|
384 | | - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
---|
385 | | - |
---|
386 | | - cmd->num_grps_req = cpu_to_le16(grps_req); |
---|
387 | | - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); |
---|
388 | | - if (!status && grps_added) |
---|
389 | | - *grps_added = le16_to_cpu(cmd->num_grps_updated); |
---|
390 | | - |
---|
391 | | - return status; |
---|
| 408 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems, |
---|
| 409 | + grps_req, (void *)buf, buf_size, |
---|
| 410 | + grps_added, cd); |
---|
392 | 411 | } |
---|
393 | 412 | |
---|
394 | 413 | /** |
---|
395 | | - * ice_suspend_resume_elems - suspend/resume scheduler elements |
---|
396 | | - * @hw: pointer to the hw struct |
---|
397 | | - * @elems_req: number of elements to suspend |
---|
| 414 | + * ice_aq_cfg_sched_elems - configures scheduler elements |
---|
| 415 | + * @hw: pointer to the HW struct |
---|
| 416 | + * @elems_req: number of elements to configure |
---|
398 | 417 | * @buf: pointer to buffer |
---|
399 | 418 | * @buf_size: buffer size in bytes |
---|
400 | | - * @elems_ret: returns total number of elements suspended |
---|
| 419 | + * @elems_cfgd: returns total number of elements configured |
---|
401 | 420 | * @cd: pointer to command details structure or NULL |
---|
402 | | - * @cmd_code: command code for suspend or resume |
---|
403 | 421 | * |
---|
404 | | - * suspend/resume scheduler elements |
---|
| 422 | + * Configure scheduling elements (0x0403) |
---|
405 | 423 | */ |
---|
406 | 424 | static enum ice_status |
---|
407 | | -ice_suspend_resume_elems(struct ice_hw *hw, u16 elems_req, |
---|
408 | | - struct ice_aqc_suspend_resume_elem *buf, u16 buf_size, |
---|
409 | | - u16 *elems_ret, struct ice_sq_cd *cd, |
---|
410 | | - enum ice_adminq_opc cmd_code) |
---|
| 425 | +ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, |
---|
| 426 | + struct ice_aqc_txsched_elem_data *buf, u16 buf_size, |
---|
| 427 | + u16 *elems_cfgd, struct ice_sq_cd *cd) |
---|
411 | 428 | { |
---|
412 | | - struct ice_aqc_get_cfg_elem *cmd; |
---|
413 | | - struct ice_aq_desc desc; |
---|
414 | | - enum ice_status status; |
---|
415 | | - |
---|
416 | | - cmd = &desc.params.get_update_elem; |
---|
417 | | - ice_fill_dflt_direct_cmd_desc(&desc, cmd_code); |
---|
418 | | - cmd->num_elem_req = cpu_to_le16(elems_req); |
---|
419 | | - desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
---|
420 | | - status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); |
---|
421 | | - if (!status && elems_ret) |
---|
422 | | - *elems_ret = le16_to_cpu(cmd->num_elem_resp); |
---|
423 | | - return status; |
---|
| 429 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems, |
---|
| 430 | + elems_req, (void *)buf, buf_size, |
---|
| 431 | + elems_cfgd, cd); |
---|
424 | 432 | } |
---|
425 | 433 | |
---|
426 | 434 | /** |
---|
427 | 435 | * ice_aq_suspend_sched_elems - suspend scheduler elements |
---|
428 | | - * @hw: pointer to the hw struct |
---|
| 436 | + * @hw: pointer to the HW struct |
---|
429 | 437 | * @elems_req: number of elements to suspend |
---|
430 | 438 | * @buf: pointer to buffer |
---|
431 | 439 | * @buf_size: buffer size in bytes |
---|
.. | .. |
---|
435 | 443 | * Suspend scheduling elements (0x0409) |
---|
436 | 444 | */ |
---|
437 | 445 | static enum ice_status |
---|
438 | | -ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, |
---|
439 | | - struct ice_aqc_suspend_resume_elem *buf, |
---|
| 446 | +ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, |
---|
440 | 447 | u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) |
---|
441 | 448 | { |
---|
442 | | - return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, |
---|
443 | | - cd, ice_aqc_opc_suspend_sched_elems); |
---|
| 449 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems, |
---|
| 450 | + elems_req, (void *)buf, buf_size, |
---|
| 451 | + elems_ret, cd); |
---|
444 | 452 | } |
---|
445 | 453 | |
---|
446 | 454 | /** |
---|
447 | 455 | * ice_aq_resume_sched_elems - resume scheduler elements |
---|
448 | | - * @hw: pointer to the hw struct |
---|
| 456 | + * @hw: pointer to the HW struct |
---|
449 | 457 | * @elems_req: number of elements to resume |
---|
450 | 458 | * @buf: pointer to buffer |
---|
451 | 459 | * @buf_size: buffer size in bytes |
---|
.. | .. |
---|
455 | 463 | * resume scheduling elements (0x040A) |
---|
456 | 464 | */ |
---|
457 | 465 | static enum ice_status |
---|
458 | | -ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, |
---|
459 | | - struct ice_aqc_suspend_resume_elem *buf, |
---|
| 466 | +ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf, |
---|
460 | 467 | u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd) |
---|
461 | 468 | { |
---|
462 | | - return ice_suspend_resume_elems(hw, elems_req, buf, buf_size, elems_ret, |
---|
463 | | - cd, ice_aqc_opc_resume_sched_elems); |
---|
| 469 | + return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems, |
---|
| 470 | + elems_req, (void *)buf, buf_size, |
---|
| 471 | + elems_ret, cd); |
---|
464 | 472 | } |
---|
465 | 473 | |
---|
466 | 474 | /** |
---|
467 | 475 | * ice_aq_query_sched_res - query scheduler resource |
---|
468 | | - * @hw: pointer to the hw struct |
---|
| 476 | + * @hw: pointer to the HW struct |
---|
469 | 477 | * @buf_size: buffer size in bytes |
---|
470 | 478 | * @buf: pointer to buffer |
---|
471 | 479 | * @cd: pointer to command details structure or NULL |
---|
.. | .. |
---|
484 | 492 | } |
---|
485 | 493 | |
---|
486 | 494 | /** |
---|
487 | | - * ice_sched_suspend_resume_elems - suspend or resume hw nodes |
---|
488 | | - * @hw: pointer to the hw struct |
---|
| 495 | + * ice_sched_suspend_resume_elems - suspend or resume HW nodes |
---|
| 496 | + * @hw: pointer to the HW struct |
---|
489 | 497 | * @num_nodes: number of nodes |
---|
490 | 498 | * @node_teids: array of node teids to be suspended or resumed |
---|
491 | 499 | * @suspend: true means suspend / false means resume |
---|
492 | 500 | * |
---|
493 | | - * This function suspends or resumes hw nodes |
---|
| 501 | + * This function suspends or resumes HW nodes |
---|
494 | 502 | */ |
---|
495 | 503 | static enum ice_status |
---|
496 | 504 | ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids, |
---|
497 | 505 | bool suspend) |
---|
498 | 506 | { |
---|
499 | | - struct ice_aqc_suspend_resume_elem *buf; |
---|
500 | 507 | u16 i, buf_size, num_elem_ret = 0; |
---|
501 | 508 | enum ice_status status; |
---|
| 509 | + __le32 *buf; |
---|
502 | 510 | |
---|
503 | 511 | buf_size = sizeof(*buf) * num_nodes; |
---|
504 | 512 | buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); |
---|
.. | .. |
---|
506 | 514 | return ICE_ERR_NO_MEMORY; |
---|
507 | 515 | |
---|
508 | 516 | for (i = 0; i < num_nodes; i++) |
---|
509 | | - buf->teid[i] = cpu_to_le32(node_teids[i]); |
---|
| 517 | + buf[i] = cpu_to_le32(node_teids[i]); |
---|
510 | 518 | |
---|
511 | 519 | if (suspend) |
---|
512 | 520 | status = ice_aq_suspend_sched_elems(hw, num_nodes, buf, |
---|
.. | .. |
---|
524 | 532 | } |
---|
525 | 533 | |
---|
526 | 534 | /** |
---|
527 | | - * ice_sched_clear_tx_topo - clears the schduler tree nodes |
---|
| 535 | + * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC |
---|
| 536 | + * @hw: pointer to the HW struct |
---|
| 537 | + * @vsi_handle: VSI handle |
---|
| 538 | + * @tc: TC number |
---|
| 539 | + * @new_numqs: number of queues |
---|
| 540 | + */ |
---|
| 541 | +static enum ice_status |
---|
| 542 | +ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs) |
---|
| 543 | +{ |
---|
| 544 | + struct ice_vsi_ctx *vsi_ctx; |
---|
| 545 | + struct ice_q_ctx *q_ctx; |
---|
| 546 | + |
---|
| 547 | + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
---|
| 548 | + if (!vsi_ctx) |
---|
| 549 | + return ICE_ERR_PARAM; |
---|
| 550 | + /* allocate LAN queue contexts */ |
---|
| 551 | + if (!vsi_ctx->lan_q_ctx[tc]) { |
---|
| 552 | + vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw), |
---|
| 553 | + new_numqs, |
---|
| 554 | + sizeof(*q_ctx), |
---|
| 555 | + GFP_KERNEL); |
---|
| 556 | + if (!vsi_ctx->lan_q_ctx[tc]) |
---|
| 557 | + return ICE_ERR_NO_MEMORY; |
---|
| 558 | + vsi_ctx->num_lan_q_entries[tc] = new_numqs; |
---|
| 559 | + return 0; |
---|
| 560 | + } |
---|
| 561 | + /* num queues are increased, update the queue contexts */ |
---|
| 562 | + if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) { |
---|
| 563 | + u16 prev_num = vsi_ctx->num_lan_q_entries[tc]; |
---|
| 564 | + |
---|
| 565 | + q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs, |
---|
| 566 | + sizeof(*q_ctx), GFP_KERNEL); |
---|
| 567 | + if (!q_ctx) |
---|
| 568 | + return ICE_ERR_NO_MEMORY; |
---|
| 569 | + memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc], |
---|
| 570 | + prev_num * sizeof(*q_ctx)); |
---|
| 571 | + devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]); |
---|
| 572 | + vsi_ctx->lan_q_ctx[tc] = q_ctx; |
---|
| 573 | + vsi_ctx->num_lan_q_entries[tc] = new_numqs; |
---|
| 574 | + } |
---|
| 575 | + return 0; |
---|
| 576 | +} |
---|
| 577 | + |
---|
| 578 | +/** |
---|
| 579 | + * ice_aq_rl_profile - performs a rate limiting task |
---|
| 580 | + * @hw: pointer to the HW struct |
---|
| 581 | + * @opcode: opcode for add, query, or remove profile(s) |
---|
| 582 | + * @num_profiles: the number of profiles |
---|
| 583 | + * @buf: pointer to buffer |
---|
| 584 | + * @buf_size: buffer size in bytes |
---|
| 585 | + * @num_processed: number of processed add or remove profile(s) to return |
---|
| 586 | + * @cd: pointer to command details structure |
---|
| 587 | + * |
---|
| 588 | + * RL profile function to add, query, or remove profile(s) |
---|
| 589 | + */ |
---|
| 590 | +static enum ice_status |
---|
| 591 | +ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode, |
---|
| 592 | + u16 num_profiles, struct ice_aqc_rl_profile_elem *buf, |
---|
| 593 | + u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd) |
---|
| 594 | +{ |
---|
| 595 | + struct ice_aqc_rl_profile *cmd; |
---|
| 596 | + struct ice_aq_desc desc; |
---|
| 597 | + enum ice_status status; |
---|
| 598 | + |
---|
| 599 | + cmd = &desc.params.rl_profile; |
---|
| 600 | + |
---|
| 601 | + ice_fill_dflt_direct_cmd_desc(&desc, opcode); |
---|
| 602 | + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); |
---|
| 603 | + cmd->num_profiles = cpu_to_le16(num_profiles); |
---|
| 604 | + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); |
---|
| 605 | + if (!status && num_processed) |
---|
| 606 | + *num_processed = le16_to_cpu(cmd->num_processed); |
---|
| 607 | + return status; |
---|
| 608 | +} |
---|
| 609 | + |
---|
| 610 | +/** |
---|
| 611 | + * ice_aq_add_rl_profile - adds rate limiting profile(s) |
---|
| 612 | + * @hw: pointer to the HW struct |
---|
| 613 | + * @num_profiles: the number of profile(s) to be add |
---|
| 614 | + * @buf: pointer to buffer |
---|
| 615 | + * @buf_size: buffer size in bytes |
---|
| 616 | + * @num_profiles_added: total number of profiles added to return |
---|
| 617 | + * @cd: pointer to command details structure |
---|
| 618 | + * |
---|
| 619 | + * Add RL profile (0x0410) |
---|
| 620 | + */ |
---|
| 621 | +static enum ice_status |
---|
| 622 | +ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles, |
---|
| 623 | + struct ice_aqc_rl_profile_elem *buf, u16 buf_size, |
---|
| 624 | + u16 *num_profiles_added, struct ice_sq_cd *cd) |
---|
| 625 | +{ |
---|
| 626 | + return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles, num_profiles, |
---|
| 627 | + buf, buf_size, num_profiles_added, cd); |
---|
| 628 | +} |
---|
| 629 | + |
---|
| 630 | +/** |
---|
| 631 | + * ice_aq_remove_rl_profile - removes RL profile(s) |
---|
| 632 | + * @hw: pointer to the HW struct |
---|
| 633 | + * @num_profiles: the number of profile(s) to remove |
---|
| 634 | + * @buf: pointer to buffer |
---|
| 635 | + * @buf_size: buffer size in bytes |
---|
| 636 | + * @num_profiles_removed: total number of profiles removed to return |
---|
| 637 | + * @cd: pointer to command details structure or NULL |
---|
| 638 | + * |
---|
| 639 | + * Remove RL profile (0x0415) |
---|
| 640 | + */ |
---|
| 641 | +static enum ice_status |
---|
| 642 | +ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles, |
---|
| 643 | + struct ice_aqc_rl_profile_elem *buf, u16 buf_size, |
---|
| 644 | + u16 *num_profiles_removed, struct ice_sq_cd *cd) |
---|
| 645 | +{ |
---|
| 646 | + return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles, |
---|
| 647 | + num_profiles, buf, buf_size, |
---|
| 648 | + num_profiles_removed, cd); |
---|
| 649 | +} |
---|
| 650 | + |
---|
| 651 | +/** |
---|
| 652 | + * ice_sched_del_rl_profile - remove RL profile |
---|
| 653 | + * @hw: pointer to the HW struct |
---|
| 654 | + * @rl_info: rate limit profile information |
---|
| 655 | + * |
---|
| 656 | + * If the profile ID is not referenced anymore, it removes profile ID with |
---|
| 657 | + * its associated parameters from HW DB,and locally. The caller needs to |
---|
| 658 | + * hold scheduler lock. |
---|
| 659 | + */ |
---|
| 660 | +static enum ice_status |
---|
| 661 | +ice_sched_del_rl_profile(struct ice_hw *hw, |
---|
| 662 | + struct ice_aqc_rl_profile_info *rl_info) |
---|
| 663 | +{ |
---|
| 664 | + struct ice_aqc_rl_profile_elem *buf; |
---|
| 665 | + u16 num_profiles_removed; |
---|
| 666 | + enum ice_status status; |
---|
| 667 | + u16 num_profiles = 1; |
---|
| 668 | + |
---|
| 669 | + if (rl_info->prof_id_ref != 0) |
---|
| 670 | + return ICE_ERR_IN_USE; |
---|
| 671 | + |
---|
| 672 | + /* Safe to remove profile ID */ |
---|
| 673 | + buf = &rl_info->profile; |
---|
| 674 | + status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf), |
---|
| 675 | + &num_profiles_removed, NULL); |
---|
| 676 | + if (status || num_profiles_removed != num_profiles) |
---|
| 677 | + return ICE_ERR_CFG; |
---|
| 678 | + |
---|
| 679 | + /* Delete stale entry now */ |
---|
| 680 | + list_del(&rl_info->list_entry); |
---|
| 681 | + devm_kfree(ice_hw_to_dev(hw), rl_info); |
---|
| 682 | + return status; |
---|
| 683 | +} |
---|
| 684 | + |
---|
| 685 | +/** |
---|
| 686 | + * ice_sched_clear_rl_prof - clears RL prof entries |
---|
528 | 687 | * @pi: port information structure |
---|
529 | 688 | * |
---|
530 | | - * This function removes all the nodes from HW as well as from SW DB. |
---|
| 689 | + * This function removes all RL profile from HW as well as from SW DB. |
---|
531 | 690 | */ |
---|
532 | | -static void ice_sched_clear_tx_topo(struct ice_port_info *pi) |
---|
| 691 | +static void ice_sched_clear_rl_prof(struct ice_port_info *pi) |
---|
| 692 | +{ |
---|
| 693 | + u16 ln; |
---|
| 694 | + |
---|
| 695 | + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { |
---|
| 696 | + struct ice_aqc_rl_profile_info *rl_prof_elem; |
---|
| 697 | + struct ice_aqc_rl_profile_info *rl_prof_tmp; |
---|
| 698 | + |
---|
| 699 | + list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, |
---|
| 700 | + &pi->rl_prof_list[ln], list_entry) { |
---|
| 701 | + struct ice_hw *hw = pi->hw; |
---|
| 702 | + enum ice_status status; |
---|
| 703 | + |
---|
| 704 | + rl_prof_elem->prof_id_ref = 0; |
---|
| 705 | + status = ice_sched_del_rl_profile(hw, rl_prof_elem); |
---|
| 706 | + if (status) { |
---|
| 707 | + ice_debug(hw, ICE_DBG_SCHED, |
---|
| 708 | + "Remove rl profile failed\n"); |
---|
| 709 | + /* On error, free mem required */ |
---|
| 710 | + list_del(&rl_prof_elem->list_entry); |
---|
| 711 | + devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); |
---|
| 712 | + } |
---|
| 713 | + } |
---|
| 714 | + } |
---|
| 715 | +} |
---|
| 716 | + |
---|
| 717 | +/** |
---|
| 718 | + * ice_sched_clear_agg - clears the aggregator related information |
---|
| 719 | + * @hw: pointer to the hardware structure |
---|
| 720 | + * |
---|
| 721 | + * This function removes aggregator list and free up aggregator related memory |
---|
| 722 | + * previously allocated. |
---|
| 723 | + */ |
---|
| 724 | +void ice_sched_clear_agg(struct ice_hw *hw) |
---|
533 | 725 | { |
---|
534 | 726 | struct ice_sched_agg_info *agg_info; |
---|
535 | | - struct ice_sched_vsi_info *vsi_elem; |
---|
536 | 727 | struct ice_sched_agg_info *atmp; |
---|
537 | | - struct ice_sched_vsi_info *tmp; |
---|
538 | | - struct ice_hw *hw; |
---|
539 | 728 | |
---|
540 | | - if (!pi) |
---|
541 | | - return; |
---|
542 | | - |
---|
543 | | - hw = pi->hw; |
---|
544 | | - |
---|
545 | | - list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { |
---|
| 729 | + list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) { |
---|
546 | 730 | struct ice_sched_agg_vsi_info *agg_vsi_info; |
---|
547 | 731 | struct ice_sched_agg_vsi_info *vtmp; |
---|
548 | 732 | |
---|
.. | .. |
---|
551 | 735 | list_del(&agg_vsi_info->list_entry); |
---|
552 | 736 | devm_kfree(ice_hw_to_dev(hw), agg_vsi_info); |
---|
553 | 737 | } |
---|
| 738 | + list_del(&agg_info->list_entry); |
---|
| 739 | + devm_kfree(ice_hw_to_dev(hw), agg_info); |
---|
554 | 740 | } |
---|
| 741 | +} |
---|
555 | 742 | |
---|
556 | | - /* remove the vsi list */ |
---|
557 | | - list_for_each_entry_safe(vsi_elem, tmp, &pi->vsi_info_list, |
---|
558 | | - list_entry) { |
---|
559 | | - list_del(&vsi_elem->list_entry); |
---|
560 | | - devm_kfree(ice_hw_to_dev(hw), vsi_elem); |
---|
561 | | - } |
---|
562 | | - |
---|
| 743 | +/** |
---|
| 744 | + * ice_sched_clear_tx_topo - clears the scheduler tree nodes |
---|
| 745 | + * @pi: port information structure |
---|
| 746 | + * |
---|
| 747 | + * This function removes all the nodes from HW as well as from SW DB. |
---|
| 748 | + */ |
---|
| 749 | +static void ice_sched_clear_tx_topo(struct ice_port_info *pi) |
---|
| 750 | +{ |
---|
| 751 | + if (!pi) |
---|
| 752 | + return; |
---|
| 753 | + /* remove RL profiles related lists */ |
---|
| 754 | + ice_sched_clear_rl_prof(pi); |
---|
563 | 755 | if (pi->root) { |
---|
564 | 756 | ice_free_sched_node(pi, pi->root); |
---|
565 | 757 | pi->root = NULL; |
---|
.. | .. |
---|
572 | 764 | * |
---|
573 | 765 | * Cleanup scheduling elements from SW DB |
---|
574 | 766 | */ |
---|
575 | | -static void ice_sched_clear_port(struct ice_port_info *pi) |
---|
| 767 | +void ice_sched_clear_port(struct ice_port_info *pi) |
---|
576 | 768 | { |
---|
577 | 769 | if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) |
---|
578 | 770 | return; |
---|
.. | .. |
---|
586 | 778 | |
---|
587 | 779 | /** |
---|
588 | 780 | * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports |
---|
589 | | - * @hw: pointer to the hw struct |
---|
| 781 | + * @hw: pointer to the HW struct |
---|
590 | 782 | * |
---|
591 | 783 | * Cleanup scheduling elements from SW DB for all the ports |
---|
592 | 784 | */ |
---|
593 | 785 | void ice_sched_cleanup_all(struct ice_hw *hw) |
---|
594 | 786 | { |
---|
595 | | - if (!hw || !hw->port_info) |
---|
| 787 | + if (!hw) |
---|
596 | 788 | return; |
---|
597 | 789 | |
---|
598 | | - if (hw->layer_info) |
---|
| 790 | + if (hw->layer_info) { |
---|
599 | 791 | devm_kfree(ice_hw_to_dev(hw), hw->layer_info); |
---|
| 792 | + hw->layer_info = NULL; |
---|
| 793 | + } |
---|
600 | 794 | |
---|
601 | 795 | ice_sched_clear_port(hw->port_info); |
---|
602 | 796 | |
---|
.. | .. |
---|
607 | 801 | } |
---|
608 | 802 | |
---|
609 | 803 | /** |
---|
610 | | - * ice_sched_create_vsi_info_entry - create an empty new VSI entry |
---|
611 | | - * @pi: port information structure |
---|
612 | | - * @vsi_id: VSI Id |
---|
613 | | - * |
---|
614 | | - * This function creates a new VSI entry and adds it to list |
---|
615 | | - */ |
---|
616 | | -static struct ice_sched_vsi_info * |
---|
617 | | -ice_sched_create_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) |
---|
618 | | -{ |
---|
619 | | - struct ice_sched_vsi_info *vsi_elem; |
---|
620 | | - |
---|
621 | | - if (!pi) |
---|
622 | | - return NULL; |
---|
623 | | - |
---|
624 | | - vsi_elem = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*vsi_elem), |
---|
625 | | - GFP_KERNEL); |
---|
626 | | - if (!vsi_elem) |
---|
627 | | - return NULL; |
---|
628 | | - |
---|
629 | | - list_add(&vsi_elem->list_entry, &pi->vsi_info_list); |
---|
630 | | - vsi_elem->vsi_id = vsi_id; |
---|
631 | | - return vsi_elem; |
---|
632 | | -} |
---|
633 | | - |
---|
634 | | -/** |
---|
635 | | - * ice_sched_add_elems - add nodes to hw and SW DB |
---|
| 804 | + * ice_sched_add_elems - add nodes to HW and SW DB |
---|
636 | 805 | * @pi: port information structure |
---|
637 | 806 | * @tc_node: pointer to the branch node |
---|
638 | 807 | * @parent: pointer to the parent node |
---|
639 | 808 | * @layer: layer number to add nodes |
---|
640 | 809 | * @num_nodes: number of nodes |
---|
641 | 810 | * @num_nodes_added: pointer to num nodes added |
---|
642 | | - * @first_node_teid: if new nodes are added then return the teid of first node |
---|
| 811 | + * @first_node_teid: if new nodes are added then return the TEID of first node |
---|
643 | 812 | * |
---|
644 | | - * This function add nodes to hw as well as to SW DB for a given layer |
---|
| 813 | + * This function add nodes to HW as well as to SW DB for a given layer |
---|
645 | 814 | */ |
---|
646 | 815 | static enum ice_status |
---|
647 | 816 | ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, |
---|
.. | .. |
---|
653 | 822 | u16 i, num_groups_added = 0; |
---|
654 | 823 | enum ice_status status = 0; |
---|
655 | 824 | struct ice_hw *hw = pi->hw; |
---|
656 | | - u16 buf_size; |
---|
| 825 | + size_t buf_size; |
---|
657 | 826 | u32 teid; |
---|
658 | 827 | |
---|
659 | | - buf_size = sizeof(*buf) + sizeof(*buf->generic) * (num_nodes - 1); |
---|
| 828 | + buf_size = struct_size(buf, generic, num_nodes); |
---|
660 | 829 | buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL); |
---|
661 | 830 | if (!buf) |
---|
662 | 831 | return ICE_ERR_NO_MEMORY; |
---|
.. | .. |
---|
671 | 840 | ICE_AQC_ELEM_VALID_EIR; |
---|
672 | 841 | buf->generic[i].data.generic = 0; |
---|
673 | 842 | buf->generic[i].data.cir_bw.bw_profile_idx = |
---|
674 | | - ICE_SCHED_DFLT_RL_PROF_ID; |
---|
| 843 | + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
---|
| 844 | + buf->generic[i].data.cir_bw.bw_alloc = |
---|
| 845 | + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); |
---|
675 | 846 | buf->generic[i].data.eir_bw.bw_profile_idx = |
---|
676 | | - ICE_SCHED_DFLT_RL_PROF_ID; |
---|
| 847 | + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
---|
| 848 | + buf->generic[i].data.eir_bw.bw_alloc = |
---|
| 849 | + cpu_to_le16(ICE_SCHED_DFLT_BW_WT); |
---|
677 | 850 | } |
---|
678 | 851 | |
---|
679 | 852 | status = ice_aq_add_sched_elems(hw, 1, buf, buf_size, |
---|
680 | 853 | &num_groups_added, NULL); |
---|
681 | 854 | if (status || num_groups_added != 1) { |
---|
682 | | - ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n"); |
---|
| 855 | + ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n", |
---|
| 856 | + hw->adminq.sq_last_status); |
---|
683 | 857 | devm_kfree(ice_hw_to_dev(hw), buf); |
---|
684 | 858 | return ICE_ERR_CFG; |
---|
685 | 859 | } |
---|
.. | .. |
---|
697 | 871 | |
---|
698 | 872 | teid = le32_to_cpu(buf->generic[i].node_teid); |
---|
699 | 873 | new_node = ice_sched_find_node_by_teid(parent, teid); |
---|
700 | | - |
---|
701 | 874 | if (!new_node) { |
---|
702 | 875 | ice_debug(hw, ICE_DBG_SCHED, |
---|
703 | 876 | "Node is missing for teid =%d\n", teid); |
---|
.. | .. |
---|
709 | 882 | |
---|
710 | 883 | /* add it to previous node sibling pointer */ |
---|
711 | 884 | /* Note: siblings are not linked across branches */ |
---|
712 | | - prev = ice_sched_get_first_node(hw, tc_node, layer); |
---|
713 | | - |
---|
| 885 | + prev = ice_sched_get_first_node(pi, tc_node, layer); |
---|
714 | 886 | if (prev && prev != new_node) { |
---|
715 | 887 | while (prev->sibling) |
---|
716 | 888 | prev = prev->sibling; |
---|
717 | 889 | prev->sibling = new_node; |
---|
718 | 890 | } |
---|
| 891 | + |
---|
| 892 | + /* initialize the sibling head */ |
---|
| 893 | + if (!pi->sib_head[tc_node->tc_num][layer]) |
---|
| 894 | + pi->sib_head[tc_node->tc_num][layer] = new_node; |
---|
719 | 895 | |
---|
720 | 896 | if (i == 0) |
---|
721 | 897 | *first_node_teid = teid; |
---|
.. | .. |
---|
732 | 908 | * @parent: pointer to parent node |
---|
733 | 909 | * @layer: layer number to add nodes |
---|
734 | 910 | * @num_nodes: number of nodes to be added |
---|
735 | | - * @first_node_teid: pointer to the first node teid |
---|
| 911 | + * @first_node_teid: pointer to the first node TEID |
---|
736 | 912 | * @num_nodes_added: pointer to number of nodes added |
---|
737 | 913 | * |
---|
738 | 914 | * This function add nodes to a given layer. |
---|
.. | .. |
---|
760 | 936 | return ICE_ERR_PARAM; |
---|
761 | 937 | |
---|
762 | 938 | /* max children per node per layer */ |
---|
763 | | - max_child_nodes = |
---|
764 | | - le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); |
---|
| 939 | + max_child_nodes = hw->max_children[parent->tx_sched_layer]; |
---|
765 | 940 | |
---|
766 | 941 | /* current number of children + required nodes exceed max children ? */ |
---|
767 | 942 | if ((parent->num_children + num_nodes) > max_child_nodes) { |
---|
.. | .. |
---|
785 | 960 | |
---|
786 | 961 | *num_nodes_added += num_added; |
---|
787 | 962 | } |
---|
788 | | - /* Don't modify the first node teid memory if the first node was |
---|
| 963 | + /* Don't modify the first node TEID memory if the first node was |
---|
789 | 964 | * added already in the above call. Instead send some temp |
---|
790 | 965 | * memory for all other recursive calls. |
---|
791 | 966 | */ |
---|
.. | .. |
---|
817 | 992 | |
---|
818 | 993 | /** |
---|
819 | 994 | * ice_sched_get_qgrp_layer - get the current queue group layer number |
---|
820 | | - * @hw: pointer to the hw struct |
---|
| 995 | + * @hw: pointer to the HW struct |
---|
821 | 996 | * |
---|
822 | 997 | * This function returns the current queue group layer number |
---|
823 | 998 | */ |
---|
.. | .. |
---|
829 | 1004 | |
---|
830 | 1005 | /** |
---|
831 | 1006 | * ice_sched_get_vsi_layer - get the current VSI layer number |
---|
832 | | - * @hw: pointer to the hw struct |
---|
| 1007 | + * @hw: pointer to the HW struct |
---|
833 | 1008 | * |
---|
834 | 1009 | * This function returns the current VSI layer number |
---|
835 | 1010 | */ |
---|
.. | .. |
---|
840 | 1015 | * 7 4 |
---|
841 | 1016 | * 5 or less sw_entry_point_layer |
---|
842 | 1017 | */ |
---|
843 | | - /* calculate the vsi layer based on number of layers. */ |
---|
| 1018 | + /* calculate the VSI layer based on number of layers. */ |
---|
844 | 1019 | if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { |
---|
845 | 1020 | u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; |
---|
846 | 1021 | |
---|
.. | .. |
---|
851 | 1026 | } |
---|
852 | 1027 | |
---|
853 | 1028 | /** |
---|
854 | | - * ice_sched_get_num_nodes_per_layer - Get the total number of nodes per layer |
---|
855 | | - * @pi: pointer to the port info struct |
---|
856 | | - * @layer: layer number |
---|
857 | | - * |
---|
858 | | - * This function calculates the number of nodes present in the scheduler tree |
---|
859 | | - * including all the branches for a given layer |
---|
860 | | - */ |
---|
861 | | -static u16 |
---|
862 | | -ice_sched_get_num_nodes_per_layer(struct ice_port_info *pi, u8 layer) |
---|
863 | | -{ |
---|
864 | | - struct ice_hw *hw; |
---|
865 | | - u16 num_nodes = 0; |
---|
866 | | - u8 i; |
---|
867 | | - |
---|
868 | | - if (!pi) |
---|
869 | | - return num_nodes; |
---|
870 | | - |
---|
871 | | - hw = pi->hw; |
---|
872 | | - |
---|
873 | | - /* Calculate the number of nodes for all TCs */ |
---|
874 | | - for (i = 0; i < pi->root->num_children; i++) { |
---|
875 | | - struct ice_sched_node *tc_node, *node; |
---|
876 | | - |
---|
877 | | - tc_node = pi->root->children[i]; |
---|
878 | | - |
---|
879 | | - /* Get the first node */ |
---|
880 | | - node = ice_sched_get_first_node(hw, tc_node, layer); |
---|
881 | | - if (!node) |
---|
882 | | - continue; |
---|
883 | | - |
---|
884 | | - /* count the siblings */ |
---|
885 | | - while (node) { |
---|
886 | | - num_nodes++; |
---|
887 | | - node = node->sibling; |
---|
888 | | - } |
---|
889 | | - } |
---|
890 | | - |
---|
891 | | - return num_nodes; |
---|
892 | | -} |
---|
893 | | - |
---|
894 | | -/** |
---|
895 | | - * ice_sched_val_max_nodes - check max number of nodes reached or not |
---|
896 | | - * @pi: port information structure |
---|
897 | | - * @new_num_nodes_per_layer: pointer to the new number of nodes array |
---|
898 | | - * |
---|
899 | | - * This function checks whether the scheduler tree layers have enough space to |
---|
900 | | - * add new nodes |
---|
901 | | - */ |
---|
902 | | -static enum ice_status |
---|
903 | | -ice_sched_validate_for_max_nodes(struct ice_port_info *pi, |
---|
904 | | - u16 *new_num_nodes_per_layer) |
---|
905 | | -{ |
---|
906 | | - struct ice_hw *hw = pi->hw; |
---|
907 | | - u8 i, qg_layer; |
---|
908 | | - u16 num_nodes; |
---|
909 | | - |
---|
910 | | - qg_layer = ice_sched_get_qgrp_layer(hw); |
---|
911 | | - |
---|
912 | | - /* walk through all the layers from SW entry point to qgroup layer */ |
---|
913 | | - for (i = hw->sw_entry_point_layer; i <= qg_layer; i++) { |
---|
914 | | - num_nodes = ice_sched_get_num_nodes_per_layer(pi, i); |
---|
915 | | - if (num_nodes + new_num_nodes_per_layer[i] > |
---|
916 | | - le16_to_cpu(hw->layer_info[i].max_pf_nodes)) { |
---|
917 | | - ice_debug(hw, ICE_DBG_SCHED, |
---|
918 | | - "max nodes reached for layer = %d\n", i); |
---|
919 | | - return ICE_ERR_CFG; |
---|
920 | | - } |
---|
921 | | - } |
---|
922 | | - return 0; |
---|
923 | | -} |
---|
924 | | - |
---|
925 | | -/** |
---|
926 | 1029 | * ice_rm_dflt_leaf_node - remove the default leaf node in the tree |
---|
927 | 1030 | * @pi: port information structure |
---|
928 | 1031 | * |
---|
929 | 1032 | * This function removes the leaf node that was created by the FW |
---|
930 | 1033 | * during initialization |
---|
931 | 1034 | */ |
---|
932 | | -static void |
---|
933 | | -ice_rm_dflt_leaf_node(struct ice_port_info *pi) |
---|
| 1035 | +static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) |
---|
934 | 1036 | { |
---|
935 | 1037 | struct ice_sched_node *node; |
---|
936 | 1038 | |
---|
.. | .. |
---|
958 | 1060 | * This function frees all the nodes except root and TC that were created by |
---|
959 | 1061 | * the FW during initialization |
---|
960 | 1062 | */ |
---|
961 | | -static void |
---|
962 | | -ice_sched_rm_dflt_nodes(struct ice_port_info *pi) |
---|
| 1063 | +static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) |
---|
963 | 1064 | { |
---|
964 | 1065 | struct ice_sched_node *node; |
---|
965 | 1066 | |
---|
.. | .. |
---|
1003 | 1104 | hw = pi->hw; |
---|
1004 | 1105 | |
---|
1005 | 1106 | /* Query the Default Topology from FW */ |
---|
1006 | | - buf = devm_kcalloc(ice_hw_to_dev(hw), ICE_TXSCHED_MAX_BRANCHES, |
---|
1007 | | - sizeof(*buf), GFP_KERNEL); |
---|
| 1107 | + buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); |
---|
1008 | 1108 | if (!buf) |
---|
1009 | 1109 | return ICE_ERR_NO_MEMORY; |
---|
1010 | 1110 | |
---|
1011 | 1111 | /* Query default scheduling tree topology */ |
---|
1012 | | - status = ice_aq_get_dflt_topo(hw, pi->lport, buf, |
---|
1013 | | - sizeof(*buf) * ICE_TXSCHED_MAX_BRANCHES, |
---|
| 1112 | + status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN, |
---|
1014 | 1113 | &num_branches, NULL); |
---|
1015 | 1114 | if (status) |
---|
1016 | 1115 | goto err_init_port; |
---|
.. | .. |
---|
1034 | 1133 | goto err_init_port; |
---|
1035 | 1134 | } |
---|
1036 | 1135 | |
---|
1037 | | - /* If the last node is a leaf node then the index of the Q group |
---|
| 1136 | + /* If the last node is a leaf node then the index of the queue group |
---|
1038 | 1137 | * layer is two less than the number of elements. |
---|
1039 | 1138 | */ |
---|
1040 | 1139 | if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type == |
---|
.. | .. |
---|
1074 | 1173 | /* initialize the port for handling the scheduler tree */ |
---|
1075 | 1174 | pi->port_state = ICE_SCHED_PORT_STATE_READY; |
---|
1076 | 1175 | mutex_init(&pi->sched_lock); |
---|
1077 | | - INIT_LIST_HEAD(&pi->agg_list); |
---|
1078 | | - INIT_LIST_HEAD(&pi->vsi_info_list); |
---|
| 1176 | + for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) |
---|
| 1177 | + INIT_LIST_HEAD(&pi->rl_prof_list[i]); |
---|
1079 | 1178 | |
---|
1080 | 1179 | err_init_port: |
---|
1081 | 1180 | if (status && pi->root) { |
---|
.. | .. |
---|
1097 | 1196 | { |
---|
1098 | 1197 | struct ice_aqc_query_txsched_res_resp *buf; |
---|
1099 | 1198 | enum ice_status status = 0; |
---|
| 1199 | + __le16 max_sibl; |
---|
| 1200 | + u16 i; |
---|
1100 | 1201 | |
---|
1101 | 1202 | if (hw->layer_info) |
---|
1102 | 1203 | return status; |
---|
.. | .. |
---|
1115 | 1216 | hw->flattened_layers = buf->sched_props.flattening_bitmap; |
---|
1116 | 1217 | hw->max_cgds = buf->sched_props.max_pf_cgds; |
---|
1117 | 1218 | |
---|
1118 | | - hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, |
---|
1119 | | - (hw->num_tx_sched_layers * |
---|
1120 | | - sizeof(*hw->layer_info)), |
---|
1121 | | - GFP_KERNEL); |
---|
| 1219 | + /* max sibling group size of current layer refers to the max children |
---|
| 1220 | + * of the below layer node. |
---|
| 1221 | + * layer 1 node max children will be layer 2 max sibling group size |
---|
| 1222 | + * layer 2 node max children will be layer 3 max sibling group size |
---|
| 1223 | + * and so on. This array will be populated from root (index 0) to |
---|
| 1224 | + * qgroup layer 7. Leaf node has no children. |
---|
| 1225 | + */ |
---|
| 1226 | + for (i = 0; i < hw->num_tx_sched_layers - 1; i++) { |
---|
| 1227 | + max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz; |
---|
| 1228 | + hw->max_children[i] = le16_to_cpu(max_sibl); |
---|
| 1229 | + } |
---|
| 1230 | + |
---|
| 1231 | + hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props, |
---|
| 1232 | + (hw->num_tx_sched_layers * |
---|
| 1233 | + sizeof(*hw->layer_info)), |
---|
| 1234 | + GFP_KERNEL); |
---|
1122 | 1235 | if (!hw->layer_info) { |
---|
1123 | 1236 | status = ICE_ERR_NO_MEMORY; |
---|
1124 | 1237 | goto sched_query_out; |
---|
.. | .. |
---|
1130 | 1243 | } |
---|
1131 | 1244 | |
---|
1132 | 1245 | /** |
---|
1133 | | - * ice_sched_get_vsi_info_entry - Get the vsi entry list for given vsi_id |
---|
1134 | | - * @pi: port information structure |
---|
1135 | | - * @vsi_id: vsi id |
---|
1136 | | - * |
---|
1137 | | - * This function retrieves the vsi list for the given vsi id |
---|
1138 | | - */ |
---|
1139 | | -static struct ice_sched_vsi_info * |
---|
1140 | | -ice_sched_get_vsi_info_entry(struct ice_port_info *pi, u16 vsi_id) |
---|
1141 | | -{ |
---|
1142 | | - struct ice_sched_vsi_info *list_elem; |
---|
1143 | | - |
---|
1144 | | - if (!pi) |
---|
1145 | | - return NULL; |
---|
1146 | | - |
---|
1147 | | - list_for_each_entry(list_elem, &pi->vsi_info_list, list_entry) |
---|
1148 | | - if (list_elem->vsi_id == vsi_id) |
---|
1149 | | - return list_elem; |
---|
1150 | | - return NULL; |
---|
1151 | | -} |
---|
1152 | | - |
---|
1153 | | -/** |
---|
1154 | 1246 | * ice_sched_find_node_in_subtree - Find node in part of base node subtree |
---|
1155 | | - * @hw: pointer to the hw struct |
---|
| 1247 | + * @hw: pointer to the HW struct |
---|
1156 | 1248 | * @base: pointer to the base node |
---|
1157 | 1249 | * @node: pointer to the node to search |
---|
1158 | 1250 | * |
---|
.. | .. |
---|
1184 | 1276 | } |
---|
1185 | 1277 | |
---|
1186 | 1278 | /** |
---|
1187 | | - * ice_sched_get_free_qparent - Get a free lan or rdma q group node |
---|
| 1279 | + * ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node |
---|
1188 | 1280 | * @pi: port information structure |
---|
1189 | | - * @vsi_id: vsi id |
---|
1190 | | - * @tc: branch number |
---|
1191 | | - * @owner: lan or rdma |
---|
| 1281 | + * @vsi_node: software VSI handle |
---|
| 1282 | + * @qgrp_node: first queue group node identified for scanning |
---|
| 1283 | + * @owner: LAN or RDMA |
---|
1192 | 1284 | * |
---|
1193 | | - * This function retrieves a free lan or rdma q group node |
---|
| 1285 | + * This function retrieves a free LAN or RDMA queue group node by scanning |
---|
| 1286 | + * qgrp_node and its siblings for the queue group with the fewest number |
---|
| 1287 | + * of queues currently assigned. |
---|
| 1288 | + */ |
---|
| 1289 | +static struct ice_sched_node * |
---|
| 1290 | +ice_sched_get_free_qgrp(struct ice_port_info *pi, |
---|
| 1291 | + struct ice_sched_node *vsi_node, |
---|
| 1292 | + struct ice_sched_node *qgrp_node, u8 owner) |
---|
| 1293 | +{ |
---|
| 1294 | + struct ice_sched_node *min_qgrp; |
---|
| 1295 | + u8 min_children; |
---|
| 1296 | + |
---|
| 1297 | + if (!qgrp_node) |
---|
| 1298 | + return qgrp_node; |
---|
| 1299 | + min_children = qgrp_node->num_children; |
---|
| 1300 | + if (!min_children) |
---|
| 1301 | + return qgrp_node; |
---|
| 1302 | + min_qgrp = qgrp_node; |
---|
| 1303 | + /* scan all queue groups until find a node which has less than the |
---|
| 1304 | + * minimum number of children. This way all queue group nodes get |
---|
| 1305 | + * equal number of shares and active. The bandwidth will be equally |
---|
| 1306 | + * distributed across all queues. |
---|
| 1307 | + */ |
---|
| 1308 | + while (qgrp_node) { |
---|
| 1309 | + /* make sure the qgroup node is part of the VSI subtree */ |
---|
| 1310 | + if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) |
---|
| 1311 | + if (qgrp_node->num_children < min_children && |
---|
| 1312 | + qgrp_node->owner == owner) { |
---|
| 1313 | + /* replace the new min queue group node */ |
---|
| 1314 | + min_qgrp = qgrp_node; |
---|
| 1315 | + min_children = min_qgrp->num_children; |
---|
| 1316 | + /* break if it has no children, */ |
---|
| 1317 | + if (!min_children) |
---|
| 1318 | + break; |
---|
| 1319 | + } |
---|
| 1320 | + qgrp_node = qgrp_node->sibling; |
---|
| 1321 | + } |
---|
| 1322 | + return min_qgrp; |
---|
| 1323 | +} |
---|
| 1324 | + |
---|
| 1325 | +/** |
---|
| 1326 | + * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node |
---|
| 1327 | + * @pi: port information structure |
---|
| 1328 | + * @vsi_handle: software VSI handle |
---|
| 1329 | + * @tc: branch number |
---|
| 1330 | + * @owner: LAN or RDMA |
---|
| 1331 | + * |
---|
| 1332 | + * This function retrieves a free LAN or RDMA queue group node |
---|
1194 | 1333 | */ |
---|
1195 | 1334 | struct ice_sched_node * |
---|
1196 | | -ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_id, u8 tc, |
---|
| 1335 | +ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
---|
1197 | 1336 | u8 owner) |
---|
1198 | 1337 | { |
---|
1199 | | - struct ice_sched_node *vsi_node, *qgrp_node = NULL; |
---|
1200 | | - struct ice_sched_vsi_info *list_elem; |
---|
| 1338 | + struct ice_sched_node *vsi_node, *qgrp_node; |
---|
| 1339 | + struct ice_vsi_ctx *vsi_ctx; |
---|
1201 | 1340 | u16 max_children; |
---|
1202 | 1341 | u8 qgrp_layer; |
---|
1203 | 1342 | |
---|
1204 | 1343 | qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); |
---|
1205 | | - max_children = le16_to_cpu(pi->hw->layer_info[qgrp_layer].max_children); |
---|
| 1344 | + max_children = pi->hw->max_children[qgrp_layer]; |
---|
1206 | 1345 | |
---|
1207 | | - list_elem = ice_sched_get_vsi_info_entry(pi, vsi_id); |
---|
1208 | | - if (!list_elem) |
---|
1209 | | - goto lan_q_exit; |
---|
1210 | | - |
---|
1211 | | - vsi_node = list_elem->vsi_node[tc]; |
---|
1212 | | - |
---|
1213 | | - /* validate invalid VSI id */ |
---|
| 1346 | + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); |
---|
| 1347 | + if (!vsi_ctx) |
---|
| 1348 | + return NULL; |
---|
| 1349 | + vsi_node = vsi_ctx->sched.vsi_node[tc]; |
---|
| 1350 | + /* validate invalid VSI ID */ |
---|
1214 | 1351 | if (!vsi_node) |
---|
1215 | | - goto lan_q_exit; |
---|
| 1352 | + return NULL; |
---|
1216 | 1353 | |
---|
1217 | | - /* get the first q group node from VSI sub-tree */ |
---|
1218 | | - qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); |
---|
| 1354 | + /* get the first queue group node from VSI sub-tree */ |
---|
| 1355 | + qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); |
---|
1219 | 1356 | while (qgrp_node) { |
---|
1220 | 1357 | /* make sure the qgroup node is part of the VSI subtree */ |
---|
1221 | 1358 | if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) |
---|
.. | .. |
---|
1225 | 1362 | qgrp_node = qgrp_node->sibling; |
---|
1226 | 1363 | } |
---|
1227 | 1364 | |
---|
1228 | | -lan_q_exit: |
---|
1229 | | - return qgrp_node; |
---|
| 1365 | + /* Select the best queue group */ |
---|
| 1366 | + return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner); |
---|
1230 | 1367 | } |
---|
1231 | 1368 | |
---|
1232 | 1369 | /** |
---|
1233 | | - * ice_sched_get_vsi_node - Get a VSI node based on VSI id |
---|
1234 | | - * @hw: pointer to the hw struct |
---|
| 1370 | + * ice_sched_get_vsi_node - Get a VSI node based on VSI ID |
---|
| 1371 | + * @hw: pointer to the HW struct |
---|
1235 | 1372 | * @tc_node: pointer to the TC node |
---|
1236 | | - * @vsi_id: VSI id |
---|
| 1373 | + * @vsi_handle: software VSI handle |
---|
1237 | 1374 | * |
---|
1238 | | - * This function retrieves a VSI node for a given VSI id from a given |
---|
| 1375 | + * This function retrieves a VSI node for a given VSI ID from a given |
---|
1239 | 1376 | * TC branch |
---|
1240 | 1377 | */ |
---|
1241 | 1378 | static struct ice_sched_node * |
---|
1242 | 1379 | ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, |
---|
1243 | | - u16 vsi_id) |
---|
| 1380 | + u16 vsi_handle) |
---|
1244 | 1381 | { |
---|
1245 | 1382 | struct ice_sched_node *node; |
---|
1246 | 1383 | u8 vsi_layer; |
---|
1247 | 1384 | |
---|
1248 | 1385 | vsi_layer = ice_sched_get_vsi_layer(hw); |
---|
1249 | | - node = ice_sched_get_first_node(hw, tc_node, vsi_layer); |
---|
| 1386 | + node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer); |
---|
1250 | 1387 | |
---|
1251 | 1388 | /* Check whether it already exists */ |
---|
1252 | 1389 | while (node) { |
---|
1253 | | - if (node->vsi_id == vsi_id) |
---|
| 1390 | + if (node->vsi_handle == vsi_handle) |
---|
1254 | 1391 | return node; |
---|
1255 | 1392 | node = node->sibling; |
---|
1256 | 1393 | } |
---|
.. | .. |
---|
1260 | 1397 | |
---|
1261 | 1398 | /** |
---|
1262 | 1399 | * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes |
---|
1263 | | - * @hw: pointer to the hw struct |
---|
| 1400 | + * @hw: pointer to the HW struct |
---|
1264 | 1401 | * @num_qs: number of queues |
---|
1265 | 1402 | * @num_nodes: num nodes array |
---|
1266 | 1403 | * |
---|
.. | .. |
---|
1276 | 1413 | qgl = ice_sched_get_qgrp_layer(hw); |
---|
1277 | 1414 | vsil = ice_sched_get_vsi_layer(hw); |
---|
1278 | 1415 | |
---|
1279 | | - /* calculate num nodes from q group to VSI layer */ |
---|
| 1416 | + /* calculate num nodes from queue group to VSI layer */ |
---|
1280 | 1417 | for (i = qgl; i > vsil; i--) { |
---|
1281 | | - u16 max_children = le16_to_cpu(hw->layer_info[i].max_children); |
---|
1282 | | - |
---|
1283 | 1418 | /* round to the next integer if there is a remainder */ |
---|
1284 | | - num = DIV_ROUND_UP(num, max_children); |
---|
| 1419 | + num = DIV_ROUND_UP(num, hw->max_children[i]); |
---|
1285 | 1420 | |
---|
1286 | 1421 | /* need at least one node */ |
---|
1287 | 1422 | num_nodes[i] = num ? num : 1; |
---|
.. | .. |
---|
1291 | 1426 | /** |
---|
1292 | 1427 | * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree |
---|
1293 | 1428 | * @pi: port information structure |
---|
1294 | | - * @vsi_id: VSI id |
---|
| 1429 | + * @vsi_handle: software VSI handle |
---|
1295 | 1430 | * @tc_node: pointer to the TC node |
---|
1296 | 1431 | * @num_nodes: pointer to the num nodes that needs to be added per layer |
---|
1297 | | - * @owner: node owner (lan or rdma) |
---|
| 1432 | + * @owner: node owner (LAN or RDMA) |
---|
1298 | 1433 | * |
---|
1299 | 1434 | * This function adds the VSI child nodes to tree. It gets called for |
---|
1300 | | - * lan and rdma separately. |
---|
| 1435 | + * LAN and RDMA separately. |
---|
1301 | 1436 | */ |
---|
1302 | 1437 | static enum ice_status |
---|
1303 | | -ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, |
---|
| 1438 | +ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, |
---|
1304 | 1439 | struct ice_sched_node *tc_node, u16 *num_nodes, |
---|
1305 | 1440 | u8 owner) |
---|
1306 | 1441 | { |
---|
.. | .. |
---|
1311 | 1446 | u16 num_added = 0; |
---|
1312 | 1447 | u8 i, qgl, vsil; |
---|
1313 | 1448 | |
---|
1314 | | - status = ice_sched_validate_for_max_nodes(pi, num_nodes); |
---|
1315 | | - if (status) |
---|
1316 | | - return status; |
---|
1317 | | - |
---|
1318 | 1449 | qgl = ice_sched_get_qgrp_layer(hw); |
---|
1319 | 1450 | vsil = ice_sched_get_vsi_layer(hw); |
---|
1320 | | - parent = ice_sched_get_vsi_node(hw, tc_node, vsi_id); |
---|
| 1451 | + parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); |
---|
1321 | 1452 | for (i = vsil + 1; i <= qgl; i++) { |
---|
1322 | 1453 | if (!parent) |
---|
1323 | 1454 | return ICE_ERR_CFG; |
---|
| 1455 | + |
---|
1324 | 1456 | status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i, |
---|
1325 | 1457 | num_nodes[i], |
---|
1326 | 1458 | &first_node_teid, |
---|
.. | .. |
---|
1348 | 1480 | } |
---|
1349 | 1481 | |
---|
1350 | 1482 | /** |
---|
1351 | | - * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree |
---|
1352 | | - * @pi: port information structure |
---|
1353 | | - * @vsi_node: pointer to the VSI node |
---|
1354 | | - * @num_nodes: pointer to the num nodes that needs to be removed per layer |
---|
1355 | | - * @owner: node owner (lan or rdma) |
---|
1356 | | - * |
---|
1357 | | - * This function removes the VSI child nodes from the tree. It gets called for |
---|
1358 | | - * lan and rdma separately. |
---|
1359 | | - */ |
---|
1360 | | -static void |
---|
1361 | | -ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, |
---|
1362 | | - struct ice_sched_node *vsi_node, u16 *num_nodes, |
---|
1363 | | - u8 owner) |
---|
1364 | | -{ |
---|
1365 | | - struct ice_sched_node *node, *next; |
---|
1366 | | - u8 i, qgl, vsil; |
---|
1367 | | - u16 num; |
---|
1368 | | - |
---|
1369 | | - qgl = ice_sched_get_qgrp_layer(pi->hw); |
---|
1370 | | - vsil = ice_sched_get_vsi_layer(pi->hw); |
---|
1371 | | - |
---|
1372 | | - for (i = qgl; i > vsil; i--) { |
---|
1373 | | - num = num_nodes[i]; |
---|
1374 | | - node = ice_sched_get_first_node(pi->hw, vsi_node, i); |
---|
1375 | | - while (node && num) { |
---|
1376 | | - next = node->sibling; |
---|
1377 | | - if (node->owner == owner && !node->num_children) { |
---|
1378 | | - ice_free_sched_node(pi, node); |
---|
1379 | | - num--; |
---|
1380 | | - } |
---|
1381 | | - node = next; |
---|
1382 | | - } |
---|
1383 | | - } |
---|
1384 | | -} |
---|
1385 | | - |
---|
1386 | | -/** |
---|
1387 | 1483 | * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes |
---|
1388 | | - * @hw: pointer to the hw struct |
---|
| 1484 | + * @hw: pointer to the HW struct |
---|
1389 | 1485 | * @tc_node: pointer to TC node |
---|
1390 | 1486 | * @num_nodes: pointer to num nodes array |
---|
1391 | 1487 | * |
---|
1392 | 1488 | * This function calculates the number of supported nodes needed to add this |
---|
1393 | | - * VSI into tx tree including the VSI, parent and intermediate nodes in below |
---|
| 1489 | + * VSI into Tx tree including the VSI, parent and intermediate nodes in below |
---|
1394 | 1490 | * layers |
---|
1395 | 1491 | */ |
---|
1396 | 1492 | static void |
---|
.. | .. |
---|
1398 | 1494 | struct ice_sched_node *tc_node, u16 *num_nodes) |
---|
1399 | 1495 | { |
---|
1400 | 1496 | struct ice_sched_node *node; |
---|
1401 | | - u16 max_child; |
---|
1402 | | - u8 i, vsil; |
---|
| 1497 | + u8 vsil; |
---|
| 1498 | + int i; |
---|
1403 | 1499 | |
---|
1404 | 1500 | vsil = ice_sched_get_vsi_layer(hw); |
---|
1405 | 1501 | for (i = vsil; i >= hw->sw_entry_point_layer; i--) |
---|
.. | .. |
---|
1412 | 1508 | /* If intermediate nodes are reached max children |
---|
1413 | 1509 | * then add a new one. |
---|
1414 | 1510 | */ |
---|
1415 | | - node = ice_sched_get_first_node(hw, tc_node, i); |
---|
1416 | | - max_child = le16_to_cpu(hw->layer_info[i].max_children); |
---|
1417 | | - |
---|
| 1511 | + node = ice_sched_get_first_node(hw->port_info, tc_node, |
---|
| 1512 | + (u8)i); |
---|
1418 | 1513 | /* scan all the siblings */ |
---|
1419 | 1514 | while (node) { |
---|
1420 | | - if (node->num_children < max_child) |
---|
| 1515 | + if (node->num_children < hw->max_children[i]) |
---|
1421 | 1516 | break; |
---|
1422 | 1517 | node = node->sibling; |
---|
1423 | 1518 | } |
---|
1424 | 1519 | |
---|
| 1520 | + /* tree has one intermediate node to add this new VSI. |
---|
| 1521 | + * So no need to calculate supported nodes for below |
---|
| 1522 | + * layers. |
---|
| 1523 | + */ |
---|
| 1524 | + if (node) |
---|
| 1525 | + break; |
---|
1425 | 1526 | /* all the nodes are full, allocate a new one */ |
---|
1426 | | - if (!node) |
---|
1427 | | - num_nodes[i]++; |
---|
| 1527 | + num_nodes[i]++; |
---|
1428 | 1528 | } |
---|
1429 | 1529 | } |
---|
1430 | 1530 | |
---|
1431 | 1531 | /** |
---|
1432 | | - * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree |
---|
| 1532 | + * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree |
---|
1433 | 1533 | * @pi: port information structure |
---|
1434 | | - * @vsi_id: VSI Id |
---|
| 1534 | + * @vsi_handle: software VSI handle |
---|
1435 | 1535 | * @tc_node: pointer to TC node |
---|
1436 | 1536 | * @num_nodes: pointer to num nodes array |
---|
1437 | 1537 | * |
---|
1438 | | - * This function adds the VSI supported nodes into tx tree including the |
---|
| 1538 | + * This function adds the VSI supported nodes into Tx tree including the |
---|
1439 | 1539 | * VSI, its parent and intermediate nodes in below layers |
---|
1440 | 1540 | */ |
---|
1441 | 1541 | static enum ice_status |
---|
1442 | | -ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_id, |
---|
| 1542 | +ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle, |
---|
1443 | 1543 | struct ice_sched_node *tc_node, u16 *num_nodes) |
---|
1444 | 1544 | { |
---|
1445 | 1545 | struct ice_sched_node *parent = tc_node; |
---|
.. | .. |
---|
1450 | 1550 | |
---|
1451 | 1551 | if (!pi) |
---|
1452 | 1552 | return ICE_ERR_PARAM; |
---|
1453 | | - |
---|
1454 | | - status = ice_sched_validate_for_max_nodes(pi, num_nodes); |
---|
1455 | | - if (status) |
---|
1456 | | - return status; |
---|
1457 | 1553 | |
---|
1458 | 1554 | vsil = ice_sched_get_vsi_layer(pi->hw); |
---|
1459 | 1555 | for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) { |
---|
.. | .. |
---|
1477 | 1573 | return ICE_ERR_CFG; |
---|
1478 | 1574 | |
---|
1479 | 1575 | if (i == vsil) |
---|
1480 | | - parent->vsi_id = vsi_id; |
---|
| 1576 | + parent->vsi_handle = vsi_handle; |
---|
1481 | 1577 | } |
---|
| 1578 | + |
---|
1482 | 1579 | return 0; |
---|
1483 | 1580 | } |
---|
1484 | 1581 | |
---|
1485 | 1582 | /** |
---|
1486 | 1583 | * ice_sched_add_vsi_to_topo - add a new VSI into tree |
---|
1487 | 1584 | * @pi: port information structure |
---|
1488 | | - * @vsi_id: VSI Id |
---|
| 1585 | + * @vsi_handle: software VSI handle |
---|
1489 | 1586 | * @tc: TC number |
---|
1490 | 1587 | * |
---|
1491 | 1588 | * This function adds a new VSI into scheduler tree |
---|
1492 | 1589 | */ |
---|
1493 | 1590 | static enum ice_status |
---|
1494 | | -ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_id, u8 tc) |
---|
| 1591 | +ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc) |
---|
1495 | 1592 | { |
---|
1496 | 1593 | u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
---|
1497 | 1594 | struct ice_sched_node *tc_node; |
---|
.. | .. |
---|
1504 | 1601 | /* calculate number of supported nodes needed for this VSI */ |
---|
1505 | 1602 | ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes); |
---|
1506 | 1603 | |
---|
1507 | | - /* add vsi supported nodes to tc subtree */ |
---|
1508 | | - return ice_sched_add_vsi_support_nodes(pi, vsi_id, tc_node, num_nodes); |
---|
| 1604 | + /* add VSI supported nodes to TC subtree */ |
---|
| 1605 | + return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node, |
---|
| 1606 | + num_nodes); |
---|
1509 | 1607 | } |
---|
1510 | 1608 | |
---|
1511 | 1609 | /** |
---|
1512 | 1610 | * ice_sched_update_vsi_child_nodes - update VSI child nodes |
---|
1513 | 1611 | * @pi: port information structure |
---|
1514 | | - * @vsi_id: VSI Id |
---|
| 1612 | + * @vsi_handle: software VSI handle |
---|
1515 | 1613 | * @tc: TC number |
---|
1516 | 1614 | * @new_numqs: new number of max queues |
---|
1517 | 1615 | * @owner: owner of this subtree |
---|
.. | .. |
---|
1519 | 1617 | * This function updates the VSI child nodes based on the number of queues |
---|
1520 | 1618 | */ |
---|
1521 | 1619 | static enum ice_status |
---|
1522 | | -ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc, |
---|
1523 | | - u16 new_numqs, u8 owner) |
---|
| 1620 | +ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, |
---|
| 1621 | + u8 tc, u16 new_numqs, u8 owner) |
---|
1524 | 1622 | { |
---|
1525 | | - u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
---|
1526 | 1623 | u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 }; |
---|
1527 | 1624 | struct ice_sched_node *vsi_node; |
---|
1528 | 1625 | struct ice_sched_node *tc_node; |
---|
1529 | | - struct ice_sched_vsi_info *vsi; |
---|
| 1626 | + struct ice_vsi_ctx *vsi_ctx; |
---|
1530 | 1627 | enum ice_status status = 0; |
---|
1531 | 1628 | struct ice_hw *hw = pi->hw; |
---|
1532 | 1629 | u16 prev_numqs; |
---|
1533 | | - u8 i; |
---|
1534 | 1630 | |
---|
1535 | 1631 | tc_node = ice_sched_get_tc_node(pi, tc); |
---|
1536 | 1632 | if (!tc_node) |
---|
1537 | 1633 | return ICE_ERR_CFG; |
---|
1538 | 1634 | |
---|
1539 | | - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); |
---|
| 1635 | + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); |
---|
1540 | 1636 | if (!vsi_node) |
---|
1541 | 1637 | return ICE_ERR_CFG; |
---|
1542 | 1638 | |
---|
1543 | | - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); |
---|
1544 | | - if (!vsi) |
---|
1545 | | - return ICE_ERR_CFG; |
---|
1546 | | - |
---|
1547 | | - if (owner == ICE_SCHED_NODE_OWNER_LAN) |
---|
1548 | | - prev_numqs = vsi->max_lanq[tc]; |
---|
1549 | | - else |
---|
| 1639 | + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
---|
| 1640 | + if (!vsi_ctx) |
---|
1550 | 1641 | return ICE_ERR_PARAM; |
---|
1551 | 1642 | |
---|
1552 | | - /* num queues are not changed */ |
---|
1553 | | - if (prev_numqs == new_numqs) |
---|
| 1643 | + prev_numqs = vsi_ctx->sched.max_lanq[tc]; |
---|
| 1644 | + /* num queues are not changed or less than the previous number */ |
---|
| 1645 | + if (new_numqs <= prev_numqs) |
---|
1554 | 1646 | return status; |
---|
1555 | | - |
---|
1556 | | - /* calculate number of nodes based on prev/new number of qs */ |
---|
1557 | | - if (prev_numqs) |
---|
1558 | | - ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes); |
---|
| 1647 | + status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs); |
---|
| 1648 | + if (status) |
---|
| 1649 | + return status; |
---|
1559 | 1650 | |
---|
1560 | 1651 | if (new_numqs) |
---|
1561 | 1652 | ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes); |
---|
| 1653 | + /* Keep the max number of queue configuration all the time. Update the |
---|
| 1654 | + * tree only if number of queues > previous number of queues. This may |
---|
| 1655 | + * leave some extra nodes in the tree if number of queues < previous |
---|
| 1656 | + * number but that wouldn't harm anything. Removing those extra nodes |
---|
| 1657 | + * may complicate the code if those nodes are part of SRL or |
---|
| 1658 | + * individually rate limited. |
---|
| 1659 | + */ |
---|
| 1660 | + status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node, |
---|
| 1661 | + new_num_nodes, owner); |
---|
| 1662 | + if (status) |
---|
| 1663 | + return status; |
---|
| 1664 | + vsi_ctx->sched.max_lanq[tc] = new_numqs; |
---|
1562 | 1665 | |
---|
1563 | | - if (prev_numqs > new_numqs) { |
---|
1564 | | - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) |
---|
1565 | | - new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i]; |
---|
1566 | | - |
---|
1567 | | - ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes, |
---|
1568 | | - owner); |
---|
1569 | | - } else { |
---|
1570 | | - for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++) |
---|
1571 | | - new_num_nodes[i] -= prev_num_nodes[i]; |
---|
1572 | | - |
---|
1573 | | - status = ice_sched_add_vsi_child_nodes(pi, vsi_id, tc_node, |
---|
1574 | | - new_num_nodes, owner); |
---|
1575 | | - if (status) |
---|
1576 | | - return status; |
---|
1577 | | - } |
---|
1578 | | - |
---|
1579 | | - vsi->max_lanq[tc] = new_numqs; |
---|
1580 | | - |
---|
1581 | | - return status; |
---|
| 1666 | + return 0; |
---|
1582 | 1667 | } |
---|
1583 | 1668 | |
---|
1584 | 1669 | /** |
---|
1585 | | - * ice_sched_cfg_vsi - configure the new/exisiting VSI |
---|
| 1670 | + * ice_sched_cfg_vsi - configure the new/existing VSI |
---|
1586 | 1671 | * @pi: port information structure |
---|
1587 | | - * @vsi_id: VSI Id |
---|
| 1672 | + * @vsi_handle: software VSI handle |
---|
1588 | 1673 | * @tc: TC number |
---|
1589 | 1674 | * @maxqs: max number of queues |
---|
1590 | | - * @owner: lan or rdma |
---|
| 1675 | + * @owner: LAN or RDMA |
---|
1591 | 1676 | * @enable: TC enabled or disabled |
---|
1592 | 1677 | * |
---|
1593 | 1678 | * This function adds/updates VSI nodes based on the number of queues. If TC is |
---|
.. | .. |
---|
1595 | 1680 | * disabled then suspend the VSI if it is not already. |
---|
1596 | 1681 | */ |
---|
1597 | 1682 | enum ice_status |
---|
1598 | | -ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_id, u8 tc, u16 maxqs, |
---|
| 1683 | +ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, |
---|
1599 | 1684 | u8 owner, bool enable) |
---|
1600 | 1685 | { |
---|
1601 | 1686 | struct ice_sched_node *vsi_node, *tc_node; |
---|
1602 | | - struct ice_sched_vsi_info *vsi; |
---|
| 1687 | + struct ice_vsi_ctx *vsi_ctx; |
---|
1603 | 1688 | enum ice_status status = 0; |
---|
1604 | 1689 | struct ice_hw *hw = pi->hw; |
---|
1605 | 1690 | |
---|
| 1691 | + ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle); |
---|
1606 | 1692 | tc_node = ice_sched_get_tc_node(pi, tc); |
---|
1607 | 1693 | if (!tc_node) |
---|
1608 | 1694 | return ICE_ERR_PARAM; |
---|
| 1695 | + vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle); |
---|
| 1696 | + if (!vsi_ctx) |
---|
| 1697 | + return ICE_ERR_PARAM; |
---|
| 1698 | + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); |
---|
1609 | 1699 | |
---|
1610 | | - vsi = ice_sched_get_vsi_info_entry(pi, vsi_id); |
---|
1611 | | - if (!vsi) |
---|
1612 | | - vsi = ice_sched_create_vsi_info_entry(pi, vsi_id); |
---|
1613 | | - if (!vsi) |
---|
1614 | | - return ICE_ERR_NO_MEMORY; |
---|
1615 | | - |
---|
1616 | | - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); |
---|
1617 | | - |
---|
1618 | | - /* suspend the VSI if tc is not enabled */ |
---|
| 1700 | + /* suspend the VSI if TC is not enabled */ |
---|
1619 | 1701 | if (!enable) { |
---|
1620 | 1702 | if (vsi_node && vsi_node->in_use) { |
---|
1621 | 1703 | u32 teid = le32_to_cpu(vsi_node->info.node_teid); |
---|
.. | .. |
---|
1630 | 1712 | |
---|
1631 | 1713 | /* TC is enabled, if it is a new VSI then add it to the tree */ |
---|
1632 | 1714 | if (!vsi_node) { |
---|
1633 | | - status = ice_sched_add_vsi_to_topo(pi, vsi_id, tc); |
---|
| 1715 | + status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc); |
---|
1634 | 1716 | if (status) |
---|
1635 | 1717 | return status; |
---|
1636 | | - vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_id); |
---|
| 1718 | + |
---|
| 1719 | + vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle); |
---|
1637 | 1720 | if (!vsi_node) |
---|
1638 | 1721 | return ICE_ERR_CFG; |
---|
1639 | | - vsi->vsi_node[tc] = vsi_node; |
---|
| 1722 | + |
---|
| 1723 | + vsi_ctx->sched.vsi_node[tc] = vsi_node; |
---|
1640 | 1724 | vsi_node->in_use = true; |
---|
| 1725 | + /* invalidate the max queues whenever VSI gets added first time |
---|
| 1726 | + * into the scheduler tree (boot or after reset). We need to |
---|
| 1727 | + * recreate the child nodes all the time in these cases. |
---|
| 1728 | + */ |
---|
| 1729 | + vsi_ctx->sched.max_lanq[tc] = 0; |
---|
1641 | 1730 | } |
---|
1642 | 1731 | |
---|
1643 | 1732 | /* update the VSI child nodes */ |
---|
1644 | | - status = ice_sched_update_vsi_child_nodes(pi, vsi_id, tc, maxqs, owner); |
---|
| 1733 | + status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs, |
---|
| 1734 | + owner); |
---|
1645 | 1735 | if (status) |
---|
1646 | 1736 | return status; |
---|
1647 | 1737 | |
---|
.. | .. |
---|
1656 | 1746 | |
---|
1657 | 1747 | return status; |
---|
1658 | 1748 | } |
---|
| 1749 | + |
---|
| 1750 | +/** |
---|
| 1751 | + * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry |
---|
| 1752 | + * @pi: port information structure |
---|
| 1753 | + * @vsi_handle: software VSI handle |
---|
| 1754 | + * |
---|
| 1755 | + * This function removes single aggregator VSI info entry from |
---|
| 1756 | + * aggregator list. |
---|
| 1757 | + */ |
---|
| 1758 | +static void ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) |
---|
| 1759 | +{ |
---|
| 1760 | + struct ice_sched_agg_info *agg_info; |
---|
| 1761 | + struct ice_sched_agg_info *atmp; |
---|
| 1762 | + |
---|
| 1763 | + list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list, |
---|
| 1764 | + list_entry) { |
---|
| 1765 | + struct ice_sched_agg_vsi_info *agg_vsi_info; |
---|
| 1766 | + struct ice_sched_agg_vsi_info *vtmp; |
---|
| 1767 | + |
---|
| 1768 | + list_for_each_entry_safe(agg_vsi_info, vtmp, |
---|
| 1769 | + &agg_info->agg_vsi_list, list_entry) |
---|
| 1770 | + if (agg_vsi_info->vsi_handle == vsi_handle) { |
---|
| 1771 | + list_del(&agg_vsi_info->list_entry); |
---|
| 1772 | + devm_kfree(ice_hw_to_dev(pi->hw), |
---|
| 1773 | + agg_vsi_info); |
---|
| 1774 | + return; |
---|
| 1775 | + } |
---|
| 1776 | + } |
---|
| 1777 | +} |
---|
| 1778 | + |
---|
| 1779 | +/** |
---|
| 1780 | + * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree |
---|
| 1781 | + * @node: pointer to the sub-tree node |
---|
| 1782 | + * |
---|
| 1783 | + * This function checks for a leaf node presence in a given sub-tree node. |
---|
| 1784 | + */ |
---|
| 1785 | +static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node) |
---|
| 1786 | +{ |
---|
| 1787 | + u8 i; |
---|
| 1788 | + |
---|
| 1789 | + for (i = 0; i < node->num_children; i++) |
---|
| 1790 | + if (ice_sched_is_leaf_node_present(node->children[i])) |
---|
| 1791 | + return true; |
---|
| 1792 | + /* check for a leaf node */ |
---|
| 1793 | + return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF); |
---|
| 1794 | +} |
---|
| 1795 | + |
---|
| 1796 | +/** |
---|
| 1797 | + * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes |
---|
| 1798 | + * @pi: port information structure |
---|
| 1799 | + * @vsi_handle: software VSI handle |
---|
| 1800 | + * @owner: LAN or RDMA |
---|
| 1801 | + * |
---|
| 1802 | + * This function removes the VSI and its LAN or RDMA children nodes from the |
---|
| 1803 | + * scheduler tree. |
---|
| 1804 | + */ |
---|
| 1805 | +static enum ice_status |
---|
| 1806 | +ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) |
---|
| 1807 | +{ |
---|
| 1808 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 1809 | + struct ice_vsi_ctx *vsi_ctx; |
---|
| 1810 | + u8 i; |
---|
| 1811 | + |
---|
| 1812 | + ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle); |
---|
| 1813 | + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) |
---|
| 1814 | + return status; |
---|
| 1815 | + mutex_lock(&pi->sched_lock); |
---|
| 1816 | + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); |
---|
| 1817 | + if (!vsi_ctx) |
---|
| 1818 | + goto exit_sched_rm_vsi_cfg; |
---|
| 1819 | + |
---|
| 1820 | + ice_for_each_traffic_class(i) { |
---|
| 1821 | + struct ice_sched_node *vsi_node, *tc_node; |
---|
| 1822 | + u8 j = 0; |
---|
| 1823 | + |
---|
| 1824 | + tc_node = ice_sched_get_tc_node(pi, i); |
---|
| 1825 | + if (!tc_node) |
---|
| 1826 | + continue; |
---|
| 1827 | + |
---|
| 1828 | + vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle); |
---|
| 1829 | + if (!vsi_node) |
---|
| 1830 | + continue; |
---|
| 1831 | + |
---|
| 1832 | + if (ice_sched_is_leaf_node_present(vsi_node)) { |
---|
| 1833 | + ice_debug(pi->hw, ICE_DBG_SCHED, |
---|
| 1834 | + "VSI has leaf nodes in TC %d\n", i); |
---|
| 1835 | + status = ICE_ERR_IN_USE; |
---|
| 1836 | + goto exit_sched_rm_vsi_cfg; |
---|
| 1837 | + } |
---|
| 1838 | + while (j < vsi_node->num_children) { |
---|
| 1839 | + if (vsi_node->children[j]->owner == owner) { |
---|
| 1840 | + ice_free_sched_node(pi, vsi_node->children[j]); |
---|
| 1841 | + |
---|
| 1842 | + /* reset the counter again since the num |
---|
| 1843 | + * children will be updated after node removal |
---|
| 1844 | + */ |
---|
| 1845 | + j = 0; |
---|
| 1846 | + } else { |
---|
| 1847 | + j++; |
---|
| 1848 | + } |
---|
| 1849 | + } |
---|
| 1850 | + /* remove the VSI if it has no children */ |
---|
| 1851 | + if (!vsi_node->num_children) { |
---|
| 1852 | + ice_free_sched_node(pi, vsi_node); |
---|
| 1853 | + vsi_ctx->sched.vsi_node[i] = NULL; |
---|
| 1854 | + |
---|
| 1855 | + /* clean up aggregator related VSI info if any */ |
---|
| 1856 | + ice_sched_rm_agg_vsi_info(pi, vsi_handle); |
---|
| 1857 | + } |
---|
| 1858 | + if (owner == ICE_SCHED_NODE_OWNER_LAN) |
---|
| 1859 | + vsi_ctx->sched.max_lanq[i] = 0; |
---|
| 1860 | + } |
---|
| 1861 | + status = 0; |
---|
| 1862 | + |
---|
| 1863 | +exit_sched_rm_vsi_cfg: |
---|
| 1864 | + mutex_unlock(&pi->sched_lock); |
---|
| 1865 | + return status; |
---|
| 1866 | +} |
---|
| 1867 | + |
---|
| 1868 | +/** |
---|
| 1869 | + * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes |
---|
| 1870 | + * @pi: port information structure |
---|
| 1871 | + * @vsi_handle: software VSI handle |
---|
| 1872 | + * |
---|
| 1873 | + * This function clears the VSI and its LAN children nodes from scheduler tree |
---|
| 1874 | + * for all TCs. |
---|
| 1875 | + */ |
---|
| 1876 | +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) |
---|
| 1877 | +{ |
---|
| 1878 | + return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); |
---|
| 1879 | +} |
---|
| 1880 | + |
---|
| 1881 | +/** |
---|
| 1882 | + * ice_sched_rm_unused_rl_prof - remove unused RL profile |
---|
| 1883 | + * @pi: port information structure |
---|
| 1884 | + * |
---|
| 1885 | + * This function removes unused rate limit profiles from the HW and |
---|
| 1886 | + * SW DB. The caller needs to hold scheduler lock. |
---|
| 1887 | + */ |
---|
| 1888 | +static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi) |
---|
| 1889 | +{ |
---|
| 1890 | + u16 ln; |
---|
| 1891 | + |
---|
| 1892 | + for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) { |
---|
| 1893 | + struct ice_aqc_rl_profile_info *rl_prof_elem; |
---|
| 1894 | + struct ice_aqc_rl_profile_info *rl_prof_tmp; |
---|
| 1895 | + |
---|
| 1896 | + list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp, |
---|
| 1897 | + &pi->rl_prof_list[ln], list_entry) { |
---|
| 1898 | + if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem)) |
---|
| 1899 | + ice_debug(pi->hw, ICE_DBG_SCHED, |
---|
| 1900 | + "Removed rl profile\n"); |
---|
| 1901 | + } |
---|
| 1902 | + } |
---|
| 1903 | +} |
---|
| 1904 | + |
---|
| 1905 | +/** |
---|
| 1906 | + * ice_sched_update_elem - update element |
---|
| 1907 | + * @hw: pointer to the HW struct |
---|
| 1908 | + * @node: pointer to node |
---|
| 1909 | + * @info: node info to update |
---|
| 1910 | + * |
---|
| 1911 | + * Update the HW DB, and local SW DB of node. Update the scheduling |
---|
| 1912 | + * parameters of node from argument info data buffer (Info->data buf) and |
---|
| 1913 | + * returns success or error on config sched element failure. The caller |
---|
| 1914 | + * needs to hold scheduler lock. |
---|
| 1915 | + */ |
---|
| 1916 | +static enum ice_status |
---|
| 1917 | +ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node, |
---|
| 1918 | + struct ice_aqc_txsched_elem_data *info) |
---|
| 1919 | +{ |
---|
| 1920 | + struct ice_aqc_txsched_elem_data buf; |
---|
| 1921 | + enum ice_status status; |
---|
| 1922 | + u16 elem_cfgd = 0; |
---|
| 1923 | + u16 num_elems = 1; |
---|
| 1924 | + |
---|
| 1925 | + buf = *info; |
---|
| 1926 | + /* Parent TEID is reserved field in this aq call */ |
---|
| 1927 | + buf.parent_teid = 0; |
---|
| 1928 | + /* Element type is reserved field in this aq call */ |
---|
| 1929 | + buf.data.elem_type = 0; |
---|
| 1930 | + /* Flags is reserved field in this aq call */ |
---|
| 1931 | + buf.data.flags = 0; |
---|
| 1932 | + |
---|
| 1933 | + /* Update HW DB */ |
---|
| 1934 | + /* Configure element node */ |
---|
| 1935 | + status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf), |
---|
| 1936 | + &elem_cfgd, NULL); |
---|
| 1937 | + if (status || elem_cfgd != num_elems) { |
---|
| 1938 | + ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n"); |
---|
| 1939 | + return ICE_ERR_CFG; |
---|
| 1940 | + } |
---|
| 1941 | + |
---|
| 1942 | + /* Config success case */ |
---|
| 1943 | + /* Now update local SW DB */ |
---|
| 1944 | + /* Only copy the data portion of info buffer */ |
---|
| 1945 | + node->info.data = info->data; |
---|
| 1946 | + return status; |
---|
| 1947 | +} |
---|
| 1948 | + |
---|
| 1949 | +/** |
---|
| 1950 | + * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params |
---|
| 1951 | + * @hw: pointer to the HW struct |
---|
| 1952 | + * @node: sched node to configure |
---|
| 1953 | + * @rl_type: rate limit type CIR, EIR, or shared |
---|
| 1954 | + * @bw_alloc: BW weight/allocation |
---|
| 1955 | + * |
---|
| 1956 | + * This function configures node element's BW allocation. |
---|
| 1957 | + */ |
---|
| 1958 | +static enum ice_status |
---|
| 1959 | +ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node, |
---|
| 1960 | + enum ice_rl_type rl_type, u16 bw_alloc) |
---|
| 1961 | +{ |
---|
| 1962 | + struct ice_aqc_txsched_elem_data buf; |
---|
| 1963 | + struct ice_aqc_txsched_elem *data; |
---|
| 1964 | + enum ice_status status; |
---|
| 1965 | + |
---|
| 1966 | + buf = node->info; |
---|
| 1967 | + data = &buf.data; |
---|
| 1968 | + if (rl_type == ICE_MIN_BW) { |
---|
| 1969 | + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; |
---|
| 1970 | + data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc); |
---|
| 1971 | + } else if (rl_type == ICE_MAX_BW) { |
---|
| 1972 | + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
---|
| 1973 | + data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc); |
---|
| 1974 | + } else { |
---|
| 1975 | + return ICE_ERR_PARAM; |
---|
| 1976 | + } |
---|
| 1977 | + |
---|
| 1978 | + /* Configure element */ |
---|
| 1979 | + status = ice_sched_update_elem(hw, node, &buf); |
---|
| 1980 | + return status; |
---|
| 1981 | +} |
---|
| 1982 | + |
---|
| 1983 | +/** |
---|
| 1984 | + * ice_set_clear_cir_bw - set or clear CIR BW |
---|
| 1985 | + * @bw_t_info: bandwidth type information structure |
---|
| 1986 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 1987 | + * |
---|
| 1988 | + * Save or clear CIR bandwidth (BW) in the passed param bw_t_info. |
---|
| 1989 | + */ |
---|
| 1990 | +static void ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
---|
| 1991 | +{ |
---|
| 1992 | + if (bw == ICE_SCHED_DFLT_BW) { |
---|
| 1993 | + clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); |
---|
| 1994 | + bw_t_info->cir_bw.bw = 0; |
---|
| 1995 | + } else { |
---|
| 1996 | + /* Save type of BW information */ |
---|
| 1997 | + set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap); |
---|
| 1998 | + bw_t_info->cir_bw.bw = bw; |
---|
| 1999 | + } |
---|
| 2000 | +} |
---|
| 2001 | + |
---|
| 2002 | +/** |
---|
| 2003 | + * ice_set_clear_eir_bw - set or clear EIR BW |
---|
| 2004 | + * @bw_t_info: bandwidth type information structure |
---|
| 2005 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2006 | + * |
---|
| 2007 | + * Save or clear EIR bandwidth (BW) in the passed param bw_t_info. |
---|
| 2008 | + */ |
---|
| 2009 | +static void ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
---|
| 2010 | +{ |
---|
| 2011 | + if (bw == ICE_SCHED_DFLT_BW) { |
---|
| 2012 | + clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); |
---|
| 2013 | + bw_t_info->eir_bw.bw = 0; |
---|
| 2014 | + } else { |
---|
| 2015 | + /* EIR BW and Shared BW profiles are mutually exclusive and |
---|
| 2016 | + * hence only one of them may be set for any given element. |
---|
| 2017 | + * First clear earlier saved shared BW information. |
---|
| 2018 | + */ |
---|
| 2019 | + clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); |
---|
| 2020 | + bw_t_info->shared_bw = 0; |
---|
| 2021 | + /* save EIR BW information */ |
---|
| 2022 | + set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); |
---|
| 2023 | + bw_t_info->eir_bw.bw = bw; |
---|
| 2024 | + } |
---|
| 2025 | +} |
---|
| 2026 | + |
---|
| 2027 | +/** |
---|
| 2028 | + * ice_set_clear_shared_bw - set or clear shared BW |
---|
| 2029 | + * @bw_t_info: bandwidth type information structure |
---|
| 2030 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2031 | + * |
---|
| 2032 | + * Save or clear shared bandwidth (BW) in the passed param bw_t_info. |
---|
| 2033 | + */ |
---|
| 2034 | +static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw) |
---|
| 2035 | +{ |
---|
| 2036 | + if (bw == ICE_SCHED_DFLT_BW) { |
---|
| 2037 | + clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); |
---|
| 2038 | + bw_t_info->shared_bw = 0; |
---|
| 2039 | + } else { |
---|
| 2040 | + /* EIR BW and Shared BW profiles are mutually exclusive and |
---|
| 2041 | + * hence only one of them may be set for any given element. |
---|
| 2042 | + * First clear earlier saved EIR BW information. |
---|
| 2043 | + */ |
---|
| 2044 | + clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap); |
---|
| 2045 | + bw_t_info->eir_bw.bw = 0; |
---|
| 2046 | + /* save shared BW information */ |
---|
| 2047 | + set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap); |
---|
| 2048 | + bw_t_info->shared_bw = bw; |
---|
| 2049 | + } |
---|
| 2050 | +} |
---|
| 2051 | + |
---|
| 2052 | +/** |
---|
| 2053 | + * ice_sched_calc_wakeup - calculate RL profile wakeup parameter |
---|
| 2054 | + * @bw: bandwidth in Kbps |
---|
| 2055 | + * |
---|
| 2056 | + * This function calculates the wakeup parameter of RL profile. |
---|
| 2057 | + */ |
---|
| 2058 | +static u16 ice_sched_calc_wakeup(s32 bw) |
---|
| 2059 | +{ |
---|
| 2060 | + s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f; |
---|
| 2061 | + s32 wakeup_f_int; |
---|
| 2062 | + u16 wakeup = 0; |
---|
| 2063 | + |
---|
| 2064 | + /* Get the wakeup integer value */ |
---|
| 2065 | + bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); |
---|
| 2066 | + wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec); |
---|
| 2067 | + if (wakeup_int > 63) { |
---|
| 2068 | + wakeup = (u16)((1 << 15) | wakeup_int); |
---|
| 2069 | + } else { |
---|
| 2070 | + /* Calculate fraction value up to 4 decimals |
---|
| 2071 | + * Convert Integer value to a constant multiplier |
---|
| 2072 | + */ |
---|
| 2073 | + wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int; |
---|
| 2074 | + wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER * |
---|
| 2075 | + ICE_RL_PROF_FREQUENCY, |
---|
| 2076 | + bytes_per_sec); |
---|
| 2077 | + |
---|
| 2078 | + /* Get Fraction value */ |
---|
| 2079 | + wakeup_f = wakeup_a - wakeup_b; |
---|
| 2080 | + |
---|
| 2081 | + /* Round up the Fractional value via Ceil(Fractional value) */ |
---|
| 2082 | + if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2)) |
---|
| 2083 | + wakeup_f += 1; |
---|
| 2084 | + |
---|
| 2085 | + wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION, |
---|
| 2086 | + ICE_RL_PROF_MULTIPLIER); |
---|
| 2087 | + wakeup |= (u16)(wakeup_int << 9); |
---|
| 2088 | + wakeup |= (u16)(0x1ff & wakeup_f_int); |
---|
| 2089 | + } |
---|
| 2090 | + |
---|
| 2091 | + return wakeup; |
---|
| 2092 | +} |
---|
| 2093 | + |
---|
| 2094 | +/** |
---|
| 2095 | + * ice_sched_bw_to_rl_profile - convert BW to profile parameters |
---|
| 2096 | + * @bw: bandwidth in Kbps |
---|
| 2097 | + * @profile: profile parameters to return |
---|
| 2098 | + * |
---|
| 2099 | + * This function converts the BW to profile structure format. |
---|
| 2100 | + */ |
---|
| 2101 | +static enum ice_status |
---|
| 2102 | +ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile) |
---|
| 2103 | +{ |
---|
| 2104 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 2105 | + s64 bytes_per_sec, ts_rate, mv_tmp; |
---|
| 2106 | + bool found = false; |
---|
| 2107 | + s32 encode = 0; |
---|
| 2108 | + s64 mv = 0; |
---|
| 2109 | + s32 i; |
---|
| 2110 | + |
---|
| 2111 | + /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */ |
---|
| 2112 | + if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW) |
---|
| 2113 | + return status; |
---|
| 2114 | + |
---|
| 2115 | + /* Bytes per second from Kbps */ |
---|
| 2116 | + bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE); |
---|
| 2117 | + |
---|
| 2118 | + /* encode is 6 bits but really useful are 5 bits */ |
---|
| 2119 | + for (i = 0; i < 64; i++) { |
---|
| 2120 | + u64 pow_result = BIT_ULL(i); |
---|
| 2121 | + |
---|
| 2122 | + ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY, |
---|
| 2123 | + pow_result * ICE_RL_PROF_TS_MULTIPLIER); |
---|
| 2124 | + if (ts_rate <= 0) |
---|
| 2125 | + continue; |
---|
| 2126 | + |
---|
| 2127 | + /* Multiplier value */ |
---|
| 2128 | + mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER, |
---|
| 2129 | + ts_rate); |
---|
| 2130 | + |
---|
| 2131 | + /* Round to the nearest ICE_RL_PROF_MULTIPLIER */ |
---|
| 2132 | + mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER); |
---|
| 2133 | + |
---|
| 2134 | + /* First multiplier value greater than the given |
---|
| 2135 | + * accuracy bytes |
---|
| 2136 | + */ |
---|
| 2137 | + if (mv > ICE_RL_PROF_ACCURACY_BYTES) { |
---|
| 2138 | + encode = i; |
---|
| 2139 | + found = true; |
---|
| 2140 | + break; |
---|
| 2141 | + } |
---|
| 2142 | + } |
---|
| 2143 | + if (found) { |
---|
| 2144 | + u16 wm; |
---|
| 2145 | + |
---|
| 2146 | + wm = ice_sched_calc_wakeup(bw); |
---|
| 2147 | + profile->rl_multiply = cpu_to_le16(mv); |
---|
| 2148 | + profile->wake_up_calc = cpu_to_le16(wm); |
---|
| 2149 | + profile->rl_encode = cpu_to_le16(encode); |
---|
| 2150 | + status = 0; |
---|
| 2151 | + } else { |
---|
| 2152 | + status = ICE_ERR_DOES_NOT_EXIST; |
---|
| 2153 | + } |
---|
| 2154 | + |
---|
| 2155 | + return status; |
---|
| 2156 | +} |
---|
| 2157 | + |
---|
| 2158 | +/** |
---|
| 2159 | + * ice_sched_add_rl_profile - add RL profile |
---|
| 2160 | + * @pi: port information structure |
---|
| 2161 | + * @rl_type: type of rate limit BW - min, max, or shared |
---|
| 2162 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2163 | + * @layer_num: specifies in which layer to create profile |
---|
| 2164 | + * |
---|
| 2165 | + * This function first checks the existing list for corresponding BW |
---|
| 2166 | + * parameter. If it exists, it returns the associated profile otherwise |
---|
| 2167 | + * it creates a new rate limit profile for requested BW, and adds it to |
---|
| 2168 | + * the HW DB and local list. It returns the new profile or null on error. |
---|
| 2169 | + * The caller needs to hold the scheduler lock. |
---|
| 2170 | + */ |
---|
| 2171 | +static struct ice_aqc_rl_profile_info * |
---|
| 2172 | +ice_sched_add_rl_profile(struct ice_port_info *pi, |
---|
| 2173 | + enum ice_rl_type rl_type, u32 bw, u8 layer_num) |
---|
| 2174 | +{ |
---|
| 2175 | + struct ice_aqc_rl_profile_info *rl_prof_elem; |
---|
| 2176 | + u16 profiles_added = 0, num_profiles = 1; |
---|
| 2177 | + struct ice_aqc_rl_profile_elem *buf; |
---|
| 2178 | + enum ice_status status; |
---|
| 2179 | + struct ice_hw *hw; |
---|
| 2180 | + u8 profile_type; |
---|
| 2181 | + |
---|
| 2182 | + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) |
---|
| 2183 | + return NULL; |
---|
| 2184 | + switch (rl_type) { |
---|
| 2185 | + case ICE_MIN_BW: |
---|
| 2186 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; |
---|
| 2187 | + break; |
---|
| 2188 | + case ICE_MAX_BW: |
---|
| 2189 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; |
---|
| 2190 | + break; |
---|
| 2191 | + case ICE_SHARED_BW: |
---|
| 2192 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; |
---|
| 2193 | + break; |
---|
| 2194 | + default: |
---|
| 2195 | + return NULL; |
---|
| 2196 | + } |
---|
| 2197 | + |
---|
| 2198 | + if (!pi) |
---|
| 2199 | + return NULL; |
---|
| 2200 | + hw = pi->hw; |
---|
| 2201 | + list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], |
---|
| 2202 | + list_entry) |
---|
| 2203 | + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == |
---|
| 2204 | + profile_type && rl_prof_elem->bw == bw) |
---|
| 2205 | + /* Return existing profile ID info */ |
---|
| 2206 | + return rl_prof_elem; |
---|
| 2207 | + |
---|
| 2208 | + /* Create new profile ID */ |
---|
| 2209 | + rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem), |
---|
| 2210 | + GFP_KERNEL); |
---|
| 2211 | + |
---|
| 2212 | + if (!rl_prof_elem) |
---|
| 2213 | + return NULL; |
---|
| 2214 | + |
---|
| 2215 | + status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile); |
---|
| 2216 | + if (status) |
---|
| 2217 | + goto exit_add_rl_prof; |
---|
| 2218 | + |
---|
| 2219 | + rl_prof_elem->bw = bw; |
---|
| 2220 | + /* layer_num is zero relative, and fw expects level from 1 to 9 */ |
---|
| 2221 | + rl_prof_elem->profile.level = layer_num + 1; |
---|
| 2222 | + rl_prof_elem->profile.flags = profile_type; |
---|
| 2223 | + rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size); |
---|
| 2224 | + |
---|
| 2225 | + /* Create new entry in HW DB */ |
---|
| 2226 | + buf = &rl_prof_elem->profile; |
---|
| 2227 | + status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf), |
---|
| 2228 | + &profiles_added, NULL); |
---|
| 2229 | + if (status || profiles_added != num_profiles) |
---|
| 2230 | + goto exit_add_rl_prof; |
---|
| 2231 | + |
---|
| 2232 | + /* Good entry - add in the list */ |
---|
| 2233 | + rl_prof_elem->prof_id_ref = 0; |
---|
| 2234 | + list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]); |
---|
| 2235 | + return rl_prof_elem; |
---|
| 2236 | + |
---|
| 2237 | +exit_add_rl_prof: |
---|
| 2238 | + devm_kfree(ice_hw_to_dev(hw), rl_prof_elem); |
---|
| 2239 | + return NULL; |
---|
| 2240 | +} |
---|
| 2241 | + |
---|
| 2242 | +/** |
---|
| 2243 | + * ice_sched_cfg_node_bw_lmt - configure node sched params |
---|
| 2244 | + * @hw: pointer to the HW struct |
---|
| 2245 | + * @node: sched node to configure |
---|
| 2246 | + * @rl_type: rate limit type CIR, EIR, or shared |
---|
| 2247 | + * @rl_prof_id: rate limit profile ID |
---|
| 2248 | + * |
---|
| 2249 | + * This function configures node element's BW limit. |
---|
| 2250 | + */ |
---|
| 2251 | +static enum ice_status |
---|
| 2252 | +ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node, |
---|
| 2253 | + enum ice_rl_type rl_type, u16 rl_prof_id) |
---|
| 2254 | +{ |
---|
| 2255 | + struct ice_aqc_txsched_elem_data buf; |
---|
| 2256 | + struct ice_aqc_txsched_elem *data; |
---|
| 2257 | + |
---|
| 2258 | + buf = node->info; |
---|
| 2259 | + data = &buf.data; |
---|
| 2260 | + switch (rl_type) { |
---|
| 2261 | + case ICE_MIN_BW: |
---|
| 2262 | + data->valid_sections |= ICE_AQC_ELEM_VALID_CIR; |
---|
| 2263 | + data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); |
---|
| 2264 | + break; |
---|
| 2265 | + case ICE_MAX_BW: |
---|
| 2266 | + /* EIR BW and Shared BW profiles are mutually exclusive and |
---|
| 2267 | + * hence only one of them may be set for any given element |
---|
| 2268 | + */ |
---|
| 2269 | + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) |
---|
| 2270 | + return ICE_ERR_CFG; |
---|
| 2271 | + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
---|
| 2272 | + data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id); |
---|
| 2273 | + break; |
---|
| 2274 | + case ICE_SHARED_BW: |
---|
| 2275 | + /* Check for removing shared BW */ |
---|
| 2276 | + if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) { |
---|
| 2277 | + /* remove shared profile */ |
---|
| 2278 | + data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED; |
---|
| 2279 | + data->srl_id = 0; /* clear SRL field */ |
---|
| 2280 | + |
---|
| 2281 | + /* enable back EIR to default profile */ |
---|
| 2282 | + data->valid_sections |= ICE_AQC_ELEM_VALID_EIR; |
---|
| 2283 | + data->eir_bw.bw_profile_idx = |
---|
| 2284 | + cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID); |
---|
| 2285 | + break; |
---|
| 2286 | + } |
---|
| 2287 | + /* EIR BW and Shared BW profiles are mutually exclusive and |
---|
| 2288 | + * hence only one of them may be set for any given element |
---|
| 2289 | + */ |
---|
| 2290 | + if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) && |
---|
| 2291 | + (le16_to_cpu(data->eir_bw.bw_profile_idx) != |
---|
| 2292 | + ICE_SCHED_DFLT_RL_PROF_ID)) |
---|
| 2293 | + return ICE_ERR_CFG; |
---|
| 2294 | + /* EIR BW is set to default, disable it */ |
---|
| 2295 | + data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR; |
---|
| 2296 | + /* Okay to enable shared BW now */ |
---|
| 2297 | + data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED; |
---|
| 2298 | + data->srl_id = cpu_to_le16(rl_prof_id); |
---|
| 2299 | + break; |
---|
| 2300 | + default: |
---|
| 2301 | + /* Unknown rate limit type */ |
---|
| 2302 | + return ICE_ERR_PARAM; |
---|
| 2303 | + } |
---|
| 2304 | + |
---|
| 2305 | + /* Configure element */ |
---|
| 2306 | + return ice_sched_update_elem(hw, node, &buf); |
---|
| 2307 | +} |
---|
| 2308 | + |
---|
| 2309 | +/** |
---|
| 2310 | + * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID |
---|
| 2311 | + * @node: sched node |
---|
| 2312 | + * @rl_type: rate limit type |
---|
| 2313 | + * |
---|
| 2314 | + * If existing profile matches, it returns the corresponding rate |
---|
| 2315 | + * limit profile ID, otherwise it returns an invalid ID as error. |
---|
| 2316 | + */ |
---|
| 2317 | +static u16 |
---|
| 2318 | +ice_sched_get_node_rl_prof_id(struct ice_sched_node *node, |
---|
| 2319 | + enum ice_rl_type rl_type) |
---|
| 2320 | +{ |
---|
| 2321 | + u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID; |
---|
| 2322 | + struct ice_aqc_txsched_elem *data; |
---|
| 2323 | + |
---|
| 2324 | + data = &node->info.data; |
---|
| 2325 | + switch (rl_type) { |
---|
| 2326 | + case ICE_MIN_BW: |
---|
| 2327 | + if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR) |
---|
| 2328 | + rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx); |
---|
| 2329 | + break; |
---|
| 2330 | + case ICE_MAX_BW: |
---|
| 2331 | + if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR) |
---|
| 2332 | + rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx); |
---|
| 2333 | + break; |
---|
| 2334 | + case ICE_SHARED_BW: |
---|
| 2335 | + if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED) |
---|
| 2336 | + rl_prof_id = le16_to_cpu(data->srl_id); |
---|
| 2337 | + break; |
---|
| 2338 | + default: |
---|
| 2339 | + break; |
---|
| 2340 | + } |
---|
| 2341 | + |
---|
| 2342 | + return rl_prof_id; |
---|
| 2343 | +} |
---|
| 2344 | + |
---|
| 2345 | +/** |
---|
| 2346 | + * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer |
---|
| 2347 | + * @pi: port information structure |
---|
| 2348 | + * @rl_type: type of rate limit BW - min, max, or shared |
---|
| 2349 | + * @layer_index: layer index |
---|
| 2350 | + * |
---|
| 2351 | + * This function returns requested profile creation layer. |
---|
| 2352 | + */ |
---|
| 2353 | +static u8 |
---|
| 2354 | +ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type, |
---|
| 2355 | + u8 layer_index) |
---|
| 2356 | +{ |
---|
| 2357 | + struct ice_hw *hw = pi->hw; |
---|
| 2358 | + |
---|
| 2359 | + if (layer_index >= hw->num_tx_sched_layers) |
---|
| 2360 | + return ICE_SCHED_INVAL_LAYER_NUM; |
---|
| 2361 | + switch (rl_type) { |
---|
| 2362 | + case ICE_MIN_BW: |
---|
| 2363 | + if (hw->layer_info[layer_index].max_cir_rl_profiles) |
---|
| 2364 | + return layer_index; |
---|
| 2365 | + break; |
---|
| 2366 | + case ICE_MAX_BW: |
---|
| 2367 | + if (hw->layer_info[layer_index].max_eir_rl_profiles) |
---|
| 2368 | + return layer_index; |
---|
| 2369 | + break; |
---|
| 2370 | + case ICE_SHARED_BW: |
---|
| 2371 | + /* if current layer doesn't support SRL profile creation |
---|
| 2372 | + * then try a layer up or down. |
---|
| 2373 | + */ |
---|
| 2374 | + if (hw->layer_info[layer_index].max_srl_profiles) |
---|
| 2375 | + return layer_index; |
---|
| 2376 | + else if (layer_index < hw->num_tx_sched_layers - 1 && |
---|
| 2377 | + hw->layer_info[layer_index + 1].max_srl_profiles) |
---|
| 2378 | + return layer_index + 1; |
---|
| 2379 | + else if (layer_index > 0 && |
---|
| 2380 | + hw->layer_info[layer_index - 1].max_srl_profiles) |
---|
| 2381 | + return layer_index - 1; |
---|
| 2382 | + break; |
---|
| 2383 | + default: |
---|
| 2384 | + break; |
---|
| 2385 | + } |
---|
| 2386 | + return ICE_SCHED_INVAL_LAYER_NUM; |
---|
| 2387 | +} |
---|
| 2388 | + |
---|
| 2389 | +/** |
---|
| 2390 | + * ice_sched_get_srl_node - get shared rate limit node |
---|
| 2391 | + * @node: tree node |
---|
| 2392 | + * @srl_layer: shared rate limit layer |
---|
| 2393 | + * |
---|
| 2394 | + * This function returns SRL node to be used for shared rate limit purpose. |
---|
| 2395 | + * The caller needs to hold scheduler lock. |
---|
| 2396 | + */ |
---|
| 2397 | +static struct ice_sched_node * |
---|
| 2398 | +ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer) |
---|
| 2399 | +{ |
---|
| 2400 | + if (srl_layer > node->tx_sched_layer) |
---|
| 2401 | + return node->children[0]; |
---|
| 2402 | + else if (srl_layer < node->tx_sched_layer) |
---|
| 2403 | + /* Node can't be created without a parent. It will always |
---|
| 2404 | + * have a valid parent except root node. |
---|
| 2405 | + */ |
---|
| 2406 | + return node->parent; |
---|
| 2407 | + else |
---|
| 2408 | + return node; |
---|
| 2409 | +} |
---|
| 2410 | + |
---|
| 2411 | +/** |
---|
| 2412 | + * ice_sched_rm_rl_profile - remove RL profile ID |
---|
| 2413 | + * @pi: port information structure |
---|
| 2414 | + * @layer_num: layer number where profiles are saved |
---|
| 2415 | + * @profile_type: profile type like EIR, CIR, or SRL |
---|
| 2416 | + * @profile_id: profile ID to remove |
---|
| 2417 | + * |
---|
| 2418 | + * This function removes rate limit profile from layer 'layer_num' of type |
---|
| 2419 | + * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold |
---|
| 2420 | + * scheduler lock. |
---|
| 2421 | + */ |
---|
| 2422 | +static enum ice_status |
---|
| 2423 | +ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, |
---|
| 2424 | + u16 profile_id) |
---|
| 2425 | +{ |
---|
| 2426 | + struct ice_aqc_rl_profile_info *rl_prof_elem; |
---|
| 2427 | + enum ice_status status = 0; |
---|
| 2428 | + |
---|
| 2429 | + if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) |
---|
| 2430 | + return ICE_ERR_PARAM; |
---|
| 2431 | + /* Check the existing list for RL profile */ |
---|
| 2432 | + list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], |
---|
| 2433 | + list_entry) |
---|
| 2434 | + if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) == |
---|
| 2435 | + profile_type && |
---|
| 2436 | + le16_to_cpu(rl_prof_elem->profile.profile_id) == |
---|
| 2437 | + profile_id) { |
---|
| 2438 | + if (rl_prof_elem->prof_id_ref) |
---|
| 2439 | + rl_prof_elem->prof_id_ref--; |
---|
| 2440 | + |
---|
| 2441 | + /* Remove old profile ID from database */ |
---|
| 2442 | + status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem); |
---|
| 2443 | + if (status && status != ICE_ERR_IN_USE) |
---|
| 2444 | + ice_debug(pi->hw, ICE_DBG_SCHED, |
---|
| 2445 | + "Remove rl profile failed\n"); |
---|
| 2446 | + break; |
---|
| 2447 | + } |
---|
| 2448 | + if (status == ICE_ERR_IN_USE) |
---|
| 2449 | + status = 0; |
---|
| 2450 | + return status; |
---|
| 2451 | +} |
---|
| 2452 | + |
---|
| 2453 | +/** |
---|
| 2454 | + * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default |
---|
| 2455 | + * @pi: port information structure |
---|
| 2456 | + * @node: pointer to node structure |
---|
| 2457 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2458 | + * @layer_num: layer number where RL profiles are saved |
---|
| 2459 | + * |
---|
| 2460 | + * This function configures node element's BW rate limit profile ID of |
---|
| 2461 | + * type CIR, EIR, or SRL to default. This function needs to be called |
---|
| 2462 | + * with the scheduler lock held. |
---|
| 2463 | + */ |
---|
| 2464 | +static enum ice_status |
---|
| 2465 | +ice_sched_set_node_bw_dflt(struct ice_port_info *pi, |
---|
| 2466 | + struct ice_sched_node *node, |
---|
| 2467 | + enum ice_rl_type rl_type, u8 layer_num) |
---|
| 2468 | +{ |
---|
| 2469 | + enum ice_status status; |
---|
| 2470 | + struct ice_hw *hw; |
---|
| 2471 | + u8 profile_type; |
---|
| 2472 | + u16 rl_prof_id; |
---|
| 2473 | + u16 old_id; |
---|
| 2474 | + |
---|
| 2475 | + hw = pi->hw; |
---|
| 2476 | + switch (rl_type) { |
---|
| 2477 | + case ICE_MIN_BW: |
---|
| 2478 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR; |
---|
| 2479 | + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; |
---|
| 2480 | + break; |
---|
| 2481 | + case ICE_MAX_BW: |
---|
| 2482 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR; |
---|
| 2483 | + rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID; |
---|
| 2484 | + break; |
---|
| 2485 | + case ICE_SHARED_BW: |
---|
| 2486 | + profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL; |
---|
| 2487 | + /* No SRL is configured for default case */ |
---|
| 2488 | + rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID; |
---|
| 2489 | + break; |
---|
| 2490 | + default: |
---|
| 2491 | + return ICE_ERR_PARAM; |
---|
| 2492 | + } |
---|
| 2493 | + /* Save existing RL prof ID for later clean up */ |
---|
| 2494 | + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); |
---|
| 2495 | + /* Configure BW scheduling parameters */ |
---|
| 2496 | + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); |
---|
| 2497 | + if (status) |
---|
| 2498 | + return status; |
---|
| 2499 | + |
---|
| 2500 | + /* Remove stale RL profile ID */ |
---|
| 2501 | + if (old_id == ICE_SCHED_DFLT_RL_PROF_ID || |
---|
| 2502 | + old_id == ICE_SCHED_INVAL_PROF_ID) |
---|
| 2503 | + return 0; |
---|
| 2504 | + |
---|
| 2505 | + return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id); |
---|
| 2506 | +} |
---|
| 2507 | + |
---|
| 2508 | +/** |
---|
| 2509 | + * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness |
---|
| 2510 | + * @pi: port information structure |
---|
| 2511 | + * @node: pointer to node structure |
---|
| 2512 | + * @layer_num: layer number where rate limit profiles are saved |
---|
| 2513 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2514 | + * @bw: bandwidth value |
---|
| 2515 | + * |
---|
| 2516 | + * This function prepares node element's bandwidth to SRL or EIR exclusively. |
---|
| 2517 | + * EIR BW and Shared BW profiles are mutually exclusive and hence only one of |
---|
| 2518 | + * them may be set for any given element. This function needs to be called |
---|
| 2519 | + * with the scheduler lock held. |
---|
| 2520 | + */ |
---|
| 2521 | +static enum ice_status |
---|
| 2522 | +ice_sched_set_eir_srl_excl(struct ice_port_info *pi, |
---|
| 2523 | + struct ice_sched_node *node, |
---|
| 2524 | + u8 layer_num, enum ice_rl_type rl_type, u32 bw) |
---|
| 2525 | +{ |
---|
| 2526 | + if (rl_type == ICE_SHARED_BW) { |
---|
| 2527 | + /* SRL node passed in this case, it may be different node */ |
---|
| 2528 | + if (bw == ICE_SCHED_DFLT_BW) |
---|
| 2529 | + /* SRL being removed, ice_sched_cfg_node_bw_lmt() |
---|
| 2530 | + * enables EIR to default. EIR is not set in this |
---|
| 2531 | + * case, so no additional action is required. |
---|
| 2532 | + */ |
---|
| 2533 | + return 0; |
---|
| 2534 | + |
---|
| 2535 | + /* SRL being configured, set EIR to default here. |
---|
| 2536 | + * ice_sched_cfg_node_bw_lmt() disables EIR when it |
---|
| 2537 | + * configures SRL |
---|
| 2538 | + */ |
---|
| 2539 | + return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW, |
---|
| 2540 | + layer_num); |
---|
| 2541 | + } else if (rl_type == ICE_MAX_BW && |
---|
| 2542 | + node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) { |
---|
| 2543 | + /* Remove Shared profile. Set default shared BW call |
---|
| 2544 | + * removes shared profile for a node. |
---|
| 2545 | + */ |
---|
| 2546 | + return ice_sched_set_node_bw_dflt(pi, node, |
---|
| 2547 | + ICE_SHARED_BW, |
---|
| 2548 | + layer_num); |
---|
| 2549 | + } |
---|
| 2550 | + return 0; |
---|
| 2551 | +} |
---|
| 2552 | + |
---|
| 2553 | +/** |
---|
| 2554 | + * ice_sched_set_node_bw - set node's bandwidth |
---|
| 2555 | + * @pi: port information structure |
---|
| 2556 | + * @node: tree node |
---|
| 2557 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2558 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2559 | + * @layer_num: layer number |
---|
| 2560 | + * |
---|
| 2561 | + * This function adds new profile corresponding to requested BW, configures |
---|
| 2562 | + * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile |
---|
| 2563 | + * ID from local database. The caller needs to hold scheduler lock. |
---|
| 2564 | + */ |
---|
| 2565 | +static enum ice_status |
---|
| 2566 | +ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node, |
---|
| 2567 | + enum ice_rl_type rl_type, u32 bw, u8 layer_num) |
---|
| 2568 | +{ |
---|
| 2569 | + struct ice_aqc_rl_profile_info *rl_prof_info; |
---|
| 2570 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 2571 | + struct ice_hw *hw = pi->hw; |
---|
| 2572 | + u16 old_id, rl_prof_id; |
---|
| 2573 | + |
---|
| 2574 | + rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num); |
---|
| 2575 | + if (!rl_prof_info) |
---|
| 2576 | + return status; |
---|
| 2577 | + |
---|
| 2578 | + rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id); |
---|
| 2579 | + |
---|
| 2580 | + /* Save existing RL prof ID for later clean up */ |
---|
| 2581 | + old_id = ice_sched_get_node_rl_prof_id(node, rl_type); |
---|
| 2582 | + /* Configure BW scheduling parameters */ |
---|
| 2583 | + status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id); |
---|
| 2584 | + if (status) |
---|
| 2585 | + return status; |
---|
| 2586 | + |
---|
| 2587 | + /* New changes has been applied */ |
---|
| 2588 | + /* Increment the profile ID reference count */ |
---|
| 2589 | + rl_prof_info->prof_id_ref++; |
---|
| 2590 | + |
---|
| 2591 | + /* Check for old ID removal */ |
---|
| 2592 | + if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) || |
---|
| 2593 | + old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id) |
---|
| 2594 | + return 0; |
---|
| 2595 | + |
---|
| 2596 | + return ice_sched_rm_rl_profile(pi, layer_num, |
---|
| 2597 | + rl_prof_info->profile.flags & |
---|
| 2598 | + ICE_AQC_RL_PROFILE_TYPE_M, old_id); |
---|
| 2599 | +} |
---|
| 2600 | + |
---|
| 2601 | +/** |
---|
| 2602 | + * ice_sched_set_node_bw_lmt - set node's BW limit |
---|
| 2603 | + * @pi: port information structure |
---|
| 2604 | + * @node: tree node |
---|
| 2605 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2606 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2607 | + * |
---|
| 2608 | + * It updates node's BW limit parameters like BW RL profile ID of type CIR, |
---|
| 2609 | + * EIR, or SRL. The caller needs to hold scheduler lock. |
---|
| 2610 | + */ |
---|
| 2611 | +static enum ice_status |
---|
| 2612 | +ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node, |
---|
| 2613 | + enum ice_rl_type rl_type, u32 bw) |
---|
| 2614 | +{ |
---|
| 2615 | + struct ice_sched_node *cfg_node = node; |
---|
| 2616 | + enum ice_status status; |
---|
| 2617 | + |
---|
| 2618 | + struct ice_hw *hw; |
---|
| 2619 | + u8 layer_num; |
---|
| 2620 | + |
---|
| 2621 | + if (!pi) |
---|
| 2622 | + return ICE_ERR_PARAM; |
---|
| 2623 | + hw = pi->hw; |
---|
| 2624 | + /* Remove unused RL profile IDs from HW and SW DB */ |
---|
| 2625 | + ice_sched_rm_unused_rl_prof(pi); |
---|
| 2626 | + layer_num = ice_sched_get_rl_prof_layer(pi, rl_type, |
---|
| 2627 | + node->tx_sched_layer); |
---|
| 2628 | + if (layer_num >= hw->num_tx_sched_layers) |
---|
| 2629 | + return ICE_ERR_PARAM; |
---|
| 2630 | + |
---|
| 2631 | + if (rl_type == ICE_SHARED_BW) { |
---|
| 2632 | + /* SRL node may be different */ |
---|
| 2633 | + cfg_node = ice_sched_get_srl_node(node, layer_num); |
---|
| 2634 | + if (!cfg_node) |
---|
| 2635 | + return ICE_ERR_CFG; |
---|
| 2636 | + } |
---|
| 2637 | + /* EIR BW and Shared BW profiles are mutually exclusive and |
---|
| 2638 | + * hence only one of them may be set for any given element |
---|
| 2639 | + */ |
---|
| 2640 | + status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type, |
---|
| 2641 | + bw); |
---|
| 2642 | + if (status) |
---|
| 2643 | + return status; |
---|
| 2644 | + if (bw == ICE_SCHED_DFLT_BW) |
---|
| 2645 | + return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type, |
---|
| 2646 | + layer_num); |
---|
| 2647 | + return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num); |
---|
| 2648 | +} |
---|
| 2649 | + |
---|
| 2650 | +/** |
---|
| 2651 | + * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default |
---|
| 2652 | + * @pi: port information structure |
---|
| 2653 | + * @node: pointer to node structure |
---|
| 2654 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2655 | + * |
---|
| 2656 | + * This function configures node element's BW rate limit profile ID of |
---|
| 2657 | + * type CIR, EIR, or SRL to default. This function needs to be called |
---|
| 2658 | + * with the scheduler lock held. |
---|
| 2659 | + */ |
---|
| 2660 | +static enum ice_status |
---|
| 2661 | +ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi, |
---|
| 2662 | + struct ice_sched_node *node, |
---|
| 2663 | + enum ice_rl_type rl_type) |
---|
| 2664 | +{ |
---|
| 2665 | + return ice_sched_set_node_bw_lmt(pi, node, rl_type, |
---|
| 2666 | + ICE_SCHED_DFLT_BW); |
---|
| 2667 | +} |
---|
| 2668 | + |
---|
| 2669 | +/** |
---|
| 2670 | + * ice_sched_validate_srl_node - Check node for SRL applicability |
---|
| 2671 | + * @node: sched node to configure |
---|
| 2672 | + * @sel_layer: selected SRL layer |
---|
| 2673 | + * |
---|
| 2674 | + * This function checks if the SRL can be applied to a selected layer node on |
---|
| 2675 | + * behalf of the requested node (first argument). This function needs to be |
---|
| 2676 | + * called with scheduler lock held. |
---|
| 2677 | + */ |
---|
| 2678 | +static enum ice_status |
---|
| 2679 | +ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer) |
---|
| 2680 | +{ |
---|
| 2681 | + /* SRL profiles are not available on all layers. Check if the |
---|
| 2682 | + * SRL profile can be applied to a node above or below the |
---|
| 2683 | + * requested node. SRL configuration is possible only if the |
---|
| 2684 | + * selected layer's node has single child. |
---|
| 2685 | + */ |
---|
| 2686 | + if (sel_layer == node->tx_sched_layer || |
---|
| 2687 | + ((sel_layer == node->tx_sched_layer + 1) && |
---|
| 2688 | + node->num_children == 1) || |
---|
| 2689 | + ((sel_layer == node->tx_sched_layer - 1) && |
---|
| 2690 | + (node->parent && node->parent->num_children == 1))) |
---|
| 2691 | + return 0; |
---|
| 2692 | + |
---|
| 2693 | + return ICE_ERR_CFG; |
---|
| 2694 | +} |
---|
| 2695 | + |
---|
| 2696 | +/** |
---|
| 2697 | + * ice_sched_save_q_bw - save queue node's BW information |
---|
| 2698 | + * @q_ctx: queue context structure |
---|
| 2699 | + * @rl_type: rate limit type min, max, or shared |
---|
| 2700 | + * @bw: bandwidth in Kbps - Kilo bits per sec |
---|
| 2701 | + * |
---|
| 2702 | + * Save BW information of queue type node for post replay use. |
---|
| 2703 | + */ |
---|
| 2704 | +static enum ice_status |
---|
| 2705 | +ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw) |
---|
| 2706 | +{ |
---|
| 2707 | + switch (rl_type) { |
---|
| 2708 | + case ICE_MIN_BW: |
---|
| 2709 | + ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw); |
---|
| 2710 | + break; |
---|
| 2711 | + case ICE_MAX_BW: |
---|
| 2712 | + ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw); |
---|
| 2713 | + break; |
---|
| 2714 | + case ICE_SHARED_BW: |
---|
| 2715 | + ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw); |
---|
| 2716 | + break; |
---|
| 2717 | + default: |
---|
| 2718 | + return ICE_ERR_PARAM; |
---|
| 2719 | + } |
---|
| 2720 | + return 0; |
---|
| 2721 | +} |
---|
| 2722 | + |
---|
| 2723 | +/** |
---|
| 2724 | + * ice_sched_set_q_bw_lmt - sets queue BW limit |
---|
| 2725 | + * @pi: port information structure |
---|
| 2726 | + * @vsi_handle: sw VSI handle |
---|
| 2727 | + * @tc: traffic class |
---|
| 2728 | + * @q_handle: software queue handle |
---|
| 2729 | + * @rl_type: min, max, or shared |
---|
| 2730 | + * @bw: bandwidth in Kbps |
---|
| 2731 | + * |
---|
| 2732 | + * This function sets BW limit of queue scheduling node. |
---|
| 2733 | + */ |
---|
| 2734 | +static enum ice_status |
---|
| 2735 | +ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
---|
| 2736 | + u16 q_handle, enum ice_rl_type rl_type, u32 bw) |
---|
| 2737 | +{ |
---|
| 2738 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 2739 | + struct ice_sched_node *node; |
---|
| 2740 | + struct ice_q_ctx *q_ctx; |
---|
| 2741 | + |
---|
| 2742 | + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) |
---|
| 2743 | + return ICE_ERR_PARAM; |
---|
| 2744 | + mutex_lock(&pi->sched_lock); |
---|
| 2745 | + q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle); |
---|
| 2746 | + if (!q_ctx) |
---|
| 2747 | + goto exit_q_bw_lmt; |
---|
| 2748 | + node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); |
---|
| 2749 | + if (!node) { |
---|
| 2750 | + ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n"); |
---|
| 2751 | + goto exit_q_bw_lmt; |
---|
| 2752 | + } |
---|
| 2753 | + |
---|
| 2754 | + /* Return error if it is not a leaf node */ |
---|
| 2755 | + if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) |
---|
| 2756 | + goto exit_q_bw_lmt; |
---|
| 2757 | + |
---|
| 2758 | + /* SRL bandwidth layer selection */ |
---|
| 2759 | + if (rl_type == ICE_SHARED_BW) { |
---|
| 2760 | + u8 sel_layer; /* selected layer */ |
---|
| 2761 | + |
---|
| 2762 | + sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type, |
---|
| 2763 | + node->tx_sched_layer); |
---|
| 2764 | + if (sel_layer >= pi->hw->num_tx_sched_layers) { |
---|
| 2765 | + status = ICE_ERR_PARAM; |
---|
| 2766 | + goto exit_q_bw_lmt; |
---|
| 2767 | + } |
---|
| 2768 | + status = ice_sched_validate_srl_node(node, sel_layer); |
---|
| 2769 | + if (status) |
---|
| 2770 | + goto exit_q_bw_lmt; |
---|
| 2771 | + } |
---|
| 2772 | + |
---|
| 2773 | + if (bw == ICE_SCHED_DFLT_BW) |
---|
| 2774 | + status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type); |
---|
| 2775 | + else |
---|
| 2776 | + status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw); |
---|
| 2777 | + |
---|
| 2778 | + if (!status) |
---|
| 2779 | + status = ice_sched_save_q_bw(q_ctx, rl_type, bw); |
---|
| 2780 | + |
---|
| 2781 | +exit_q_bw_lmt: |
---|
| 2782 | + mutex_unlock(&pi->sched_lock); |
---|
| 2783 | + return status; |
---|
| 2784 | +} |
---|
| 2785 | + |
---|
| 2786 | +/** |
---|
| 2787 | + * ice_cfg_q_bw_lmt - configure queue BW limit |
---|
| 2788 | + * @pi: port information structure |
---|
| 2789 | + * @vsi_handle: sw VSI handle |
---|
| 2790 | + * @tc: traffic class |
---|
| 2791 | + * @q_handle: software queue handle |
---|
| 2792 | + * @rl_type: min, max, or shared |
---|
| 2793 | + * @bw: bandwidth in Kbps |
---|
| 2794 | + * |
---|
| 2795 | + * This function configures BW limit of queue scheduling node. |
---|
| 2796 | + */ |
---|
| 2797 | +enum ice_status |
---|
| 2798 | +ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
---|
| 2799 | + u16 q_handle, enum ice_rl_type rl_type, u32 bw) |
---|
| 2800 | +{ |
---|
| 2801 | + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, |
---|
| 2802 | + bw); |
---|
| 2803 | +} |
---|
| 2804 | + |
---|
| 2805 | +/** |
---|
| 2806 | + * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit |
---|
| 2807 | + * @pi: port information structure |
---|
| 2808 | + * @vsi_handle: sw VSI handle |
---|
| 2809 | + * @tc: traffic class |
---|
| 2810 | + * @q_handle: software queue handle |
---|
| 2811 | + * @rl_type: min, max, or shared |
---|
| 2812 | + * |
---|
| 2813 | + * This function configures BW default limit of queue scheduling node. |
---|
| 2814 | + */ |
---|
| 2815 | +enum ice_status |
---|
| 2816 | +ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc, |
---|
| 2817 | + u16 q_handle, enum ice_rl_type rl_type) |
---|
| 2818 | +{ |
---|
| 2819 | + return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type, |
---|
| 2820 | + ICE_SCHED_DFLT_BW); |
---|
| 2821 | +} |
---|
| 2822 | + |
---|
| 2823 | +/** |
---|
| 2824 | + * ice_cfg_rl_burst_size - Set burst size value |
---|
| 2825 | + * @hw: pointer to the HW struct |
---|
| 2826 | + * @bytes: burst size in bytes |
---|
| 2827 | + * |
---|
| 2828 | + * This function configures/set the burst size to requested new value. The new |
---|
| 2829 | + * burst size value is used for future rate limit calls. It doesn't change the |
---|
| 2830 | + * existing or previously created RL profiles. |
---|
| 2831 | + */ |
---|
| 2832 | +enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes) |
---|
| 2833 | +{ |
---|
| 2834 | + u16 burst_size_to_prog; |
---|
| 2835 | + |
---|
| 2836 | + if (bytes < ICE_MIN_BURST_SIZE_ALLOWED || |
---|
| 2837 | + bytes > ICE_MAX_BURST_SIZE_ALLOWED) |
---|
| 2838 | + return ICE_ERR_PARAM; |
---|
| 2839 | + if (ice_round_to_num(bytes, 64) <= |
---|
| 2840 | + ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) { |
---|
| 2841 | + /* 64 byte granularity case */ |
---|
| 2842 | + /* Disable MSB granularity bit */ |
---|
| 2843 | + burst_size_to_prog = ICE_64_BYTE_GRANULARITY; |
---|
| 2844 | + /* round number to nearest 64 byte granularity */ |
---|
| 2845 | + bytes = ice_round_to_num(bytes, 64); |
---|
| 2846 | + /* The value is in 64 byte chunks */ |
---|
| 2847 | + burst_size_to_prog |= (u16)(bytes / 64); |
---|
| 2848 | + } else { |
---|
| 2849 | + /* k bytes granularity case */ |
---|
| 2850 | + /* Enable MSB granularity bit */ |
---|
| 2851 | + burst_size_to_prog = ICE_KBYTE_GRANULARITY; |
---|
| 2852 | + /* round number to nearest 1024 granularity */ |
---|
| 2853 | + bytes = ice_round_to_num(bytes, 1024); |
---|
| 2854 | + /* check rounding doesn't go beyond allowed */ |
---|
| 2855 | + if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY) |
---|
| 2856 | + bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY; |
---|
| 2857 | + /* The value is in k bytes */ |
---|
| 2858 | + burst_size_to_prog |= (u16)(bytes / 1024); |
---|
| 2859 | + } |
---|
| 2860 | + hw->max_burst_size = burst_size_to_prog; |
---|
| 2861 | + return 0; |
---|
| 2862 | +} |
---|
| 2863 | + |
---|
| 2864 | +/** |
---|
| 2865 | + * ice_sched_replay_node_prio - re-configure node priority |
---|
| 2866 | + * @hw: pointer to the HW struct |
---|
| 2867 | + * @node: sched node to configure |
---|
| 2868 | + * @priority: priority value |
---|
| 2869 | + * |
---|
| 2870 | + * This function configures node element's priority value. It |
---|
| 2871 | + * needs to be called with scheduler lock held. |
---|
| 2872 | + */ |
---|
| 2873 | +static enum ice_status |
---|
| 2874 | +ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node, |
---|
| 2875 | + u8 priority) |
---|
| 2876 | +{ |
---|
| 2877 | + struct ice_aqc_txsched_elem_data buf; |
---|
| 2878 | + struct ice_aqc_txsched_elem *data; |
---|
| 2879 | + enum ice_status status; |
---|
| 2880 | + |
---|
| 2881 | + buf = node->info; |
---|
| 2882 | + data = &buf.data; |
---|
| 2883 | + data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC; |
---|
| 2884 | + data->generic = priority; |
---|
| 2885 | + |
---|
| 2886 | + /* Configure element */ |
---|
| 2887 | + status = ice_sched_update_elem(hw, node, &buf); |
---|
| 2888 | + return status; |
---|
| 2889 | +} |
---|
| 2890 | + |
---|
| 2891 | +/** |
---|
| 2892 | + * ice_sched_replay_node_bw - replay node(s) BW |
---|
| 2893 | + * @hw: pointer to the HW struct |
---|
| 2894 | + * @node: sched node to configure |
---|
| 2895 | + * @bw_t_info: BW type information |
---|
| 2896 | + * |
---|
| 2897 | + * This function restores node's BW from bw_t_info. The caller needs |
---|
| 2898 | + * to hold the scheduler lock. |
---|
| 2899 | + */ |
---|
| 2900 | +static enum ice_status |
---|
| 2901 | +ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node, |
---|
| 2902 | + struct ice_bw_type_info *bw_t_info) |
---|
| 2903 | +{ |
---|
| 2904 | + struct ice_port_info *pi = hw->port_info; |
---|
| 2905 | + enum ice_status status = ICE_ERR_PARAM; |
---|
| 2906 | + u16 bw_alloc; |
---|
| 2907 | + |
---|
| 2908 | + if (!node) |
---|
| 2909 | + return status; |
---|
| 2910 | + if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT)) |
---|
| 2911 | + return 0; |
---|
| 2912 | + if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) { |
---|
| 2913 | + status = ice_sched_replay_node_prio(hw, node, |
---|
| 2914 | + bw_t_info->generic); |
---|
| 2915 | + if (status) |
---|
| 2916 | + return status; |
---|
| 2917 | + } |
---|
| 2918 | + if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) { |
---|
| 2919 | + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW, |
---|
| 2920 | + bw_t_info->cir_bw.bw); |
---|
| 2921 | + if (status) |
---|
| 2922 | + return status; |
---|
| 2923 | + } |
---|
| 2924 | + if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) { |
---|
| 2925 | + bw_alloc = bw_t_info->cir_bw.bw_alloc; |
---|
| 2926 | + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW, |
---|
| 2927 | + bw_alloc); |
---|
| 2928 | + if (status) |
---|
| 2929 | + return status; |
---|
| 2930 | + } |
---|
| 2931 | + if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) { |
---|
| 2932 | + status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW, |
---|
| 2933 | + bw_t_info->eir_bw.bw); |
---|
| 2934 | + if (status) |
---|
| 2935 | + return status; |
---|
| 2936 | + } |
---|
| 2937 | + if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) { |
---|
| 2938 | + bw_alloc = bw_t_info->eir_bw.bw_alloc; |
---|
| 2939 | + status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW, |
---|
| 2940 | + bw_alloc); |
---|
| 2941 | + if (status) |
---|
| 2942 | + return status; |
---|
| 2943 | + } |
---|
| 2944 | + if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap)) |
---|
| 2945 | + status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW, |
---|
| 2946 | + bw_t_info->shared_bw); |
---|
| 2947 | + return status; |
---|
| 2948 | +} |
---|
| 2949 | + |
---|
| 2950 | +/** |
---|
| 2951 | + * ice_sched_replay_q_bw - replay queue type node BW |
---|
| 2952 | + * @pi: port information structure |
---|
| 2953 | + * @q_ctx: queue context structure |
---|
| 2954 | + * |
---|
| 2955 | + * This function replays queue type node bandwidth. This function needs to be |
---|
| 2956 | + * called with scheduler lock held. |
---|
| 2957 | + */ |
---|
| 2958 | +enum ice_status |
---|
| 2959 | +ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx) |
---|
| 2960 | +{ |
---|
| 2961 | + struct ice_sched_node *q_node; |
---|
| 2962 | + |
---|
| 2963 | + /* Following also checks the presence of node in tree */ |
---|
| 2964 | + q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid); |
---|
| 2965 | + if (!q_node) |
---|
| 2966 | + return ICE_ERR_PARAM; |
---|
| 2967 | + return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info); |
---|
| 2968 | +} |
---|