Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice_sched.h"
5
6/**
7 * ice_sched_add_root_node - Insert the Tx scheduler root node in SW DB
8 * @pi: port information structure
9 * @info: Scheduler element information from firmware
10 *
11 * This function inserts the root node of the scheduling tree topology
12 * to the SW DB.
13 */
14static enum ice_status
15ice_sched_add_root_node(struct ice_port_info *pi,
16 struct ice_aqc_txsched_elem_data *info)
17{
18 struct ice_sched_node *root;
19 struct ice_hw *hw;
20
21 if (!pi)
22 return ICE_ERR_PARAM;
23
24 hw = pi->hw;
25
26 root = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*root), GFP_KERNEL);
27 if (!root)
28 return ICE_ERR_NO_MEMORY;
29
30 /* coverity[suspicious_sizeof] */
31 root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0],
32 sizeof(*root), GFP_KERNEL);
33 if (!root->children) {
34 devm_kfree(ice_hw_to_dev(hw), root);
35 return ICE_ERR_NO_MEMORY;
36 }
37
38 memcpy(&root->info, info, sizeof(*info));
39 pi->root = root;
40 return 0;
41}
42
43/**
44 * ice_sched_find_node_by_teid - Find the Tx scheduler node in SW DB
45 * @start_node: pointer to the starting ice_sched_node struct in a sub-tree
46 * @teid: node TEID to search
47 *
48 * This function searches for a node matching the TEID in the scheduling tree
49 * from the SW DB. The search is recursive and is restricted by the number of
50 * layers it has searched through; stopping at the max supported layer.
51 *
52 * This function needs to be called when holding the port_info->sched_lock
53 */
54struct ice_sched_node *
55ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
56{
57 u16 i;
58
59 /* The TEID is same as that of the start_node */
60 if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
61 return start_node;
62
63 /* The node has no children or is at the max layer */
64 if (!start_node->num_children ||
65 start_node->tx_sched_layer >= ICE_AQC_TOPO_MAX_LEVEL_NUM ||
66 start_node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF)
67 return NULL;
68
69 /* Check if TEID matches to any of the children nodes */
70 for (i = 0; i < start_node->num_children; i++)
71 if (ICE_TXSCHED_GET_NODE_TEID(start_node->children[i]) == teid)
72 return start_node->children[i];
73
74 /* Search within each child's sub-tree */
75 for (i = 0; i < start_node->num_children; i++) {
76 struct ice_sched_node *tmp;
77
78 tmp = ice_sched_find_node_by_teid(start_node->children[i],
79 teid);
80 if (tmp)
81 return tmp;
82 }
83
84 return NULL;
85}
86
87/**
88 * ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
89 * @hw: pointer to the HW struct
90 * @cmd_opc: cmd opcode
91 * @elems_req: number of elements to request
92 * @buf: pointer to buffer
93 * @buf_size: buffer size in bytes
94 * @elems_resp: returns total number of elements response
95 * @cd: pointer to command details structure or NULL
96 *
97 * This function sends a scheduling elements cmd (cmd_opc)
98 */
99static enum ice_status
100ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
101 u16 elems_req, void *buf, u16 buf_size,
102 u16 *elems_resp, struct ice_sq_cd *cd)
103{
104 struct ice_aqc_sched_elem_cmd *cmd;
105 struct ice_aq_desc desc;
106 enum ice_status status;
107
108 cmd = &desc.params.sched_elem_cmd;
109 ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
110 cmd->num_elem_req = cpu_to_le16(elems_req);
111 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
112 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
113 if (!status && elems_resp)
114 *elems_resp = le16_to_cpu(cmd->num_elem_resp);
115
116 return status;
117}
118
119/**
120 * ice_aq_query_sched_elems - query scheduler elements
121 * @hw: pointer to the HW struct
122 * @elems_req: number of elements to query
123 * @buf: pointer to buffer
124 * @buf_size: buffer size in bytes
125 * @elems_ret: returns total number of elements returned
126 * @cd: pointer to command details structure or NULL
127 *
128 * Query scheduling elements (0x0404)
129 */
130enum ice_status
131ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
132 struct ice_aqc_get_elem *buf, u16 buf_size,
133 u16 *elems_ret, struct ice_sq_cd *cd)
134{
135 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_get_sched_elems,
136 elems_req, (void *)buf, buf_size,
137 elems_ret, cd);
138}
139
140/**
141 * ice_sched_add_node - Insert the Tx scheduler node in SW DB
142 * @pi: port information structure
143 * @layer: Scheduler layer of the node
144 * @info: Scheduler element information from firmware
145 *
146 * This function inserts a scheduler node to the SW DB.
147 */
148enum ice_status
149ice_sched_add_node(struct ice_port_info *pi, u8 layer,
150 struct ice_aqc_txsched_elem_data *info)
151{
152 struct ice_sched_node *parent;
153 struct ice_aqc_get_elem elem;
154 struct ice_sched_node *node;
155 enum ice_status status;
156 struct ice_hw *hw;
157
158 if (!pi)
159 return ICE_ERR_PARAM;
160
161 hw = pi->hw;
162
163 /* A valid parent node should be there */
164 parent = ice_sched_find_node_by_teid(pi->root,
165 le32_to_cpu(info->parent_teid));
166 if (!parent) {
167 ice_debug(hw, ICE_DBG_SCHED,
168 "Parent Node not found for parent_teid=0x%x\n",
169 le32_to_cpu(info->parent_teid));
170 return ICE_ERR_PARAM;
171 }
172
173 /* query the current node information from FW before additing it
174 * to the SW DB
175 */
176 status = ice_sched_query_elem(hw, le32_to_cpu(info->node_teid), &elem);
177 if (status)
178 return status;
179
180 node = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*node), GFP_KERNEL);
181 if (!node)
182 return ICE_ERR_NO_MEMORY;
183 if (hw->max_children[layer]) {
184 /* coverity[suspicious_sizeof] */
185 node->children = devm_kcalloc(ice_hw_to_dev(hw),
186 hw->max_children[layer],
187 sizeof(*node), GFP_KERNEL);
188 if (!node->children) {
189 devm_kfree(ice_hw_to_dev(hw), node);
190 return ICE_ERR_NO_MEMORY;
191 }
192 }
193
194 node->in_use = true;
195 node->parent = parent;
196 node->tx_sched_layer = layer;
197 parent->children[parent->num_children++] = node;
198 memcpy(&node->info, &elem.generic[0], sizeof(node->info));
199 return 0;
200}
201
202/**
203 * ice_aq_delete_sched_elems - delete scheduler elements
204 * @hw: pointer to the HW struct
205 * @grps_req: number of groups to delete
206 * @buf: pointer to buffer
207 * @buf_size: buffer size in bytes
208 * @grps_del: returns total number of elements deleted
209 * @cd: pointer to command details structure or NULL
210 *
211 * Delete scheduling elements (0x040F)
212 */
213static enum ice_status
214ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
215 struct ice_aqc_delete_elem *buf, u16 buf_size,
216 u16 *grps_del, struct ice_sq_cd *cd)
217{
218 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_delete_sched_elems,
219 grps_req, (void *)buf, buf_size,
220 grps_del, cd);
221}
222
223/**
224 * ice_sched_remove_elems - remove nodes from HW
225 * @hw: pointer to the HW struct
226 * @parent: pointer to the parent node
227 * @num_nodes: number of nodes
228 * @node_teids: array of node teids to be deleted
229 *
230 * This function remove nodes from HW
231 */
232static enum ice_status
233ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
234 u16 num_nodes, u32 *node_teids)
235{
236 struct ice_aqc_delete_elem *buf;
237 u16 i, num_groups_removed = 0;
238 enum ice_status status;
239 u16 buf_size;
240
241 buf_size = sizeof(*buf) + sizeof(u32) * (num_nodes - 1);
242 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
243 if (!buf)
244 return ICE_ERR_NO_MEMORY;
245
246 buf->hdr.parent_teid = parent->info.node_teid;
247 buf->hdr.num_elems = cpu_to_le16(num_nodes);
248 for (i = 0; i < num_nodes; i++)
249 buf->teid[i] = cpu_to_le32(node_teids[i]);
250
251 status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
252 &num_groups_removed, NULL);
253 if (status || num_groups_removed != 1)
254 ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
255 hw->adminq.sq_last_status);
256
257 devm_kfree(ice_hw_to_dev(hw), buf);
258 return status;
259}
260
261/**
262 * ice_sched_get_first_node - get the first node of the given layer
263 * @pi: port information structure
264 * @parent: pointer the base node of the subtree
265 * @layer: layer number
266 *
267 * This function retrieves the first node of the given layer from the subtree
268 */
269static struct ice_sched_node *
270ice_sched_get_first_node(struct ice_port_info *pi,
271 struct ice_sched_node *parent, u8 layer)
272{
273 return pi->sib_head[parent->tc_num][layer];
274}
275
276/**
277 * ice_sched_get_tc_node - get pointer to TC node
278 * @pi: port information structure
279 * @tc: TC number
280 *
281 * This function returns the TC node pointer
282 */
283struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc)
284{
285 u8 i;
286
287 if (!pi || !pi->root)
288 return NULL;
289 for (i = 0; i < pi->root->num_children; i++)
290 if (pi->root->children[i]->tc_num == tc)
291 return pi->root->children[i];
292 return NULL;
293}
294
295/**
296 * ice_free_sched_node - Free a Tx scheduler node from SW DB
297 * @pi: port information structure
298 * @node: pointer to the ice_sched_node struct
299 *
300 * This function frees up a node from SW DB as well as from HW
301 *
302 * This function needs to be called with the port_info->sched_lock held
303 */
304void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
305{
306 struct ice_sched_node *parent;
307 struct ice_hw *hw = pi->hw;
308 u8 i, j;
309
310 /* Free the children before freeing up the parent node
311 * The parent array is updated below and that shifts the nodes
312 * in the array. So always pick the first child if num children > 0
313 */
314 while (node->num_children)
315 ice_free_sched_node(pi, node->children[0]);
316
317 /* Leaf, TC and root nodes can't be deleted by SW */
318 if (node->tx_sched_layer >= hw->sw_entry_point_layer &&
319 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
320 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
321 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
322 u32 teid = le32_to_cpu(node->info.node_teid);
323
324 ice_sched_remove_elems(hw, node->parent, 1, &teid);
325 }
326 parent = node->parent;
327 /* root has no parent */
328 if (parent) {
329 struct ice_sched_node *p;
330
331 /* update the parent */
332 for (i = 0; i < parent->num_children; i++)
333 if (parent->children[i] == node) {
334 for (j = i + 1; j < parent->num_children; j++)
335 parent->children[j - 1] =
336 parent->children[j];
337 parent->num_children--;
338 break;
339 }
340
341 p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
342 while (p) {
343 if (p->sibling == node) {
344 p->sibling = node->sibling;
345 break;
346 }
347 p = p->sibling;
348 }
349
350 /* update the sibling head if head is getting removed */
351 if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
352 pi->sib_head[node->tc_num][node->tx_sched_layer] =
353 node->sibling;
354 }
355
356 /* leaf nodes have no children */
357 if (node->children)
358 devm_kfree(ice_hw_to_dev(hw), node->children);
359 devm_kfree(ice_hw_to_dev(hw), node);
360}
361
362/**
363 * ice_aq_get_dflt_topo - gets default scheduler topology
364 * @hw: pointer to the HW struct
365 * @lport: logical port number
366 * @buf: pointer to buffer
367 * @buf_size: buffer size in bytes
368 * @num_branches: returns total number of queue to port branches
369 * @cd: pointer to command details structure or NULL
370 *
371 * Get default scheduler topology (0x400)
372 */
373static enum ice_status
374ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
375 struct ice_aqc_get_topo_elem *buf, u16 buf_size,
376 u8 *num_branches, struct ice_sq_cd *cd)
377{
378 struct ice_aqc_get_topo *cmd;
379 struct ice_aq_desc desc;
380 enum ice_status status;
381
382 cmd = &desc.params.get_topo;
383 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
384 cmd->port_num = lport;
385 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
386 if (!status && num_branches)
387 *num_branches = cmd->num_branches;
388
389 return status;
390}
391
392/**
393 * ice_aq_add_sched_elems - adds scheduling element
394 * @hw: pointer to the HW struct
395 * @grps_req: the number of groups that are requested to be added
396 * @buf: pointer to buffer
397 * @buf_size: buffer size in bytes
398 * @grps_added: returns total number of groups added
399 * @cd: pointer to command details structure or NULL
400 *
401 * Add scheduling elements (0x0401)
402 */
403static enum ice_status
404ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
405 struct ice_aqc_add_elem *buf, u16 buf_size,
406 u16 *grps_added, struct ice_sq_cd *cd)
407{
408 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_add_sched_elems,
409 grps_req, (void *)buf, buf_size,
410 grps_added, cd);
411}
412
413/**
414 * ice_aq_cfg_sched_elems - configures scheduler elements
415 * @hw: pointer to the HW struct
416 * @elems_req: number of elements to configure
417 * @buf: pointer to buffer
418 * @buf_size: buffer size in bytes
419 * @elems_cfgd: returns total number of elements configured
420 * @cd: pointer to command details structure or NULL
421 *
422 * Configure scheduling elements (0x0403)
423 */
424static enum ice_status
425ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
426 struct ice_aqc_conf_elem *buf, u16 buf_size,
427 u16 *elems_cfgd, struct ice_sq_cd *cd)
428{
429 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_cfg_sched_elems,
430 elems_req, (void *)buf, buf_size,
431 elems_cfgd, cd);
432}
433
434/**
435 * ice_aq_suspend_sched_elems - suspend scheduler elements
436 * @hw: pointer to the HW struct
437 * @elems_req: number of elements to suspend
438 * @buf: pointer to buffer
439 * @buf_size: buffer size in bytes
440 * @elems_ret: returns total number of elements suspended
441 * @cd: pointer to command details structure or NULL
442 *
443 * Suspend scheduling elements (0x0409)
444 */
445static enum ice_status
446ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req,
447 struct ice_aqc_suspend_resume_elem *buf,
448 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
449{
450 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_suspend_sched_elems,
451 elems_req, (void *)buf, buf_size,
452 elems_ret, cd);
453}
454
455/**
456 * ice_aq_resume_sched_elems - resume scheduler elements
457 * @hw: pointer to the HW struct
458 * @elems_req: number of elements to resume
459 * @buf: pointer to buffer
460 * @buf_size: buffer size in bytes
461 * @elems_ret: returns total number of elements resumed
462 * @cd: pointer to command details structure or NULL
463 *
464 * resume scheduling elements (0x040A)
465 */
466static enum ice_status
467ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req,
468 struct ice_aqc_suspend_resume_elem *buf,
469 u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
470{
471 return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_resume_sched_elems,
472 elems_req, (void *)buf, buf_size,
473 elems_ret, cd);
474}
475
476/**
477 * ice_aq_query_sched_res - query scheduler resource
478 * @hw: pointer to the HW struct
479 * @buf_size: buffer size in bytes
480 * @buf: pointer to buffer
481 * @cd: pointer to command details structure or NULL
482 *
483 * Query scheduler resource allocation (0x0412)
484 */
485static enum ice_status
486ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
487 struct ice_aqc_query_txsched_res_resp *buf,
488 struct ice_sq_cd *cd)
489{
490 struct ice_aq_desc desc;
491
492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
493 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
494}
495
496/**
497 * ice_sched_suspend_resume_elems - suspend or resume HW nodes
498 * @hw: pointer to the HW struct
499 * @num_nodes: number of nodes
500 * @node_teids: array of node teids to be suspended or resumed
501 * @suspend: true means suspend / false means resume
502 *
503 * This function suspends or resumes HW nodes
504 */
505static enum ice_status
506ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
507 bool suspend)
508{
509 struct ice_aqc_suspend_resume_elem *buf;
510 u16 i, buf_size, num_elem_ret = 0;
511 enum ice_status status;
512
513 buf_size = sizeof(*buf) * num_nodes;
514 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
515 if (!buf)
516 return ICE_ERR_NO_MEMORY;
517
518 for (i = 0; i < num_nodes; i++)
519 buf->teid[i] = cpu_to_le32(node_teids[i]);
520
521 if (suspend)
522 status = ice_aq_suspend_sched_elems(hw, num_nodes, buf,
523 buf_size, &num_elem_ret,
524 NULL);
525 else
526 status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
527 buf_size, &num_elem_ret,
528 NULL);
529 if (status || num_elem_ret != num_nodes)
530 ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
531
532 devm_kfree(ice_hw_to_dev(hw), buf);
533 return status;
534}
535
536/**
537 * ice_alloc_lan_q_ctx - allocate LAN queue contexts for the given VSI and TC
538 * @hw: pointer to the HW struct
539 * @vsi_handle: VSI handle
540 * @tc: TC number
541 * @new_numqs: number of queues
542 */
543static enum ice_status
544ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
545{
546 struct ice_vsi_ctx *vsi_ctx;
547 struct ice_q_ctx *q_ctx;
548
549 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
550 if (!vsi_ctx)
551 return ICE_ERR_PARAM;
552 /* allocate LAN queue contexts */
553 if (!vsi_ctx->lan_q_ctx[tc]) {
554 vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
555 new_numqs,
556 sizeof(*q_ctx),
557 GFP_KERNEL);
558 if (!vsi_ctx->lan_q_ctx[tc])
559 return ICE_ERR_NO_MEMORY;
560 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
561 return 0;
562 }
563 /* num queues are increased, update the queue contexts */
564 if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
565 u16 prev_num = vsi_ctx->num_lan_q_entries[tc];
566
567 q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
568 sizeof(*q_ctx), GFP_KERNEL);
569 if (!q_ctx)
570 return ICE_ERR_NO_MEMORY;
571 memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
572 prev_num * sizeof(*q_ctx));
573 devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
574 vsi_ctx->lan_q_ctx[tc] = q_ctx;
575 vsi_ctx->num_lan_q_entries[tc] = new_numqs;
576 }
577 return 0;
578}
579
580/**
581 * ice_aq_rl_profile - performs a rate limiting task
582 * @hw: pointer to the HW struct
583 * @opcode:opcode for add, query, or remove profile(s)
584 * @num_profiles: the number of profiles
585 * @buf: pointer to buffer
586 * @buf_size: buffer size in bytes
587 * @num_processed: number of processed add or remove profile(s) to return
588 * @cd: pointer to command details structure
589 *
590 * RL profile function to add, query, or remove profile(s)
591 */
592static enum ice_status
593ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
594 u16 num_profiles, struct ice_aqc_rl_profile_generic_elem *buf,
595 u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
596{
597 struct ice_aqc_rl_profile *cmd;
598 struct ice_aq_desc desc;
599 enum ice_status status;
600
601 cmd = &desc.params.rl_profile;
602
603 ice_fill_dflt_direct_cmd_desc(&desc, opcode);
604 desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
605 cmd->num_profiles = cpu_to_le16(num_profiles);
606 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
607 if (!status && num_processed)
608 *num_processed = le16_to_cpu(cmd->num_processed);
609 return status;
610}
611
612/**
613 * ice_aq_add_rl_profile - adds rate limiting profile(s)
614 * @hw: pointer to the HW struct
615 * @num_profiles: the number of profile(s) to be add
616 * @buf: pointer to buffer
617 * @buf_size: buffer size in bytes
618 * @num_profiles_added: total number of profiles added to return
619 * @cd: pointer to command details structure
620 *
621 * Add RL profile (0x0410)
622 */
623static enum ice_status
624ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
625 struct ice_aqc_rl_profile_generic_elem *buf,
626 u16 buf_size, u16 *num_profiles_added,
627 struct ice_sq_cd *cd)
628{
629 return ice_aq_rl_profile(hw, ice_aqc_opc_add_rl_profiles,
630 num_profiles, buf,
631 buf_size, num_profiles_added, cd);
632}
633
634/**
635 * ice_aq_remove_rl_profile - removes RL profile(s)
636 * @hw: pointer to the HW struct
637 * @num_profiles: the number of profile(s) to remove
638 * @buf: pointer to buffer
639 * @buf_size: buffer size in bytes
640 * @num_profiles_removed: total number of profiles removed to return
641 * @cd: pointer to command details structure or NULL
642 *
643 * Remove RL profile (0x0415)
644 */
645static enum ice_status
646ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
647 struct ice_aqc_rl_profile_generic_elem *buf,
648 u16 buf_size, u16 *num_profiles_removed,
649 struct ice_sq_cd *cd)
650{
651 return ice_aq_rl_profile(hw, ice_aqc_opc_remove_rl_profiles,
652 num_profiles, buf,
653 buf_size, num_profiles_removed, cd);
654}
655
656/**
657 * ice_sched_del_rl_profile - remove RL profile
658 * @hw: pointer to the HW struct
659 * @rl_info: rate limit profile information
660 *
661 * If the profile ID is not referenced anymore, it removes profile ID with
662 * its associated parameters from HW DB,and locally. The caller needs to
663 * hold scheduler lock.
664 */
665static enum ice_status
666ice_sched_del_rl_profile(struct ice_hw *hw,
667 struct ice_aqc_rl_profile_info *rl_info)
668{
669 struct ice_aqc_rl_profile_generic_elem *buf;
670 u16 num_profiles_removed;
671 enum ice_status status;
672 u16 num_profiles = 1;
673
674 if (rl_info->prof_id_ref != 0)
675 return ICE_ERR_IN_USE;
676
677 /* Safe to remove profile ID */
678 buf = (struct ice_aqc_rl_profile_generic_elem *)
679 &rl_info->profile;
680 status = ice_aq_remove_rl_profile(hw, num_profiles, buf, sizeof(*buf),
681 &num_profiles_removed, NULL);
682 if (status || num_profiles_removed != num_profiles)
683 return ICE_ERR_CFG;
684
685 /* Delete stale entry now */
686 list_del(&rl_info->list_entry);
687 devm_kfree(ice_hw_to_dev(hw), rl_info);
688 return status;
689}
690
691/**
692 * ice_sched_clear_rl_prof - clears RL prof entries
693 * @pi: port information structure
694 *
695 * This function removes all RL profile from HW as well as from SW DB.
696 */
697static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
698{
699 u16 ln;
700
701 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
702 struct ice_aqc_rl_profile_info *rl_prof_elem;
703 struct ice_aqc_rl_profile_info *rl_prof_tmp;
704
705 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
706 &pi->rl_prof_list[ln], list_entry) {
707 struct ice_hw *hw = pi->hw;
708 enum ice_status status;
709
710 rl_prof_elem->prof_id_ref = 0;
711 status = ice_sched_del_rl_profile(hw, rl_prof_elem);
712 if (status) {
713 ice_debug(hw, ICE_DBG_SCHED,
714 "Remove rl profile failed\n");
715 /* On error, free mem required */
716 list_del(&rl_prof_elem->list_entry);
717 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
718 }
719 }
720 }
721}
722
723/**
724 * ice_sched_clear_agg - clears the aggregator related information
725 * @hw: pointer to the hardware structure
726 *
727 * This function removes aggregator list and free up aggregator related memory
728 * previously allocated.
729 */
730void ice_sched_clear_agg(struct ice_hw *hw)
731{
732 struct ice_sched_agg_info *agg_info;
733 struct ice_sched_agg_info *atmp;
734
735 list_for_each_entry_safe(agg_info, atmp, &hw->agg_list, list_entry) {
736 struct ice_sched_agg_vsi_info *agg_vsi_info;
737 struct ice_sched_agg_vsi_info *vtmp;
738
739 list_for_each_entry_safe(agg_vsi_info, vtmp,
740 &agg_info->agg_vsi_list, list_entry) {
741 list_del(&agg_vsi_info->list_entry);
742 devm_kfree(ice_hw_to_dev(hw), agg_vsi_info);
743 }
744 list_del(&agg_info->list_entry);
745 devm_kfree(ice_hw_to_dev(hw), agg_info);
746 }
747}
748
749/**
750 * ice_sched_clear_tx_topo - clears the scheduler tree nodes
751 * @pi: port information structure
752 *
753 * This function removes all the nodes from HW as well as from SW DB.
754 */
755static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
756{
757 if (!pi)
758 return;
759 /* remove RL profiles related lists */
760 ice_sched_clear_rl_prof(pi);
761 if (pi->root) {
762 ice_free_sched_node(pi, pi->root);
763 pi->root = NULL;
764 }
765}
766
767/**
768 * ice_sched_clear_port - clear the scheduler elements from SW DB for a port
769 * @pi: port information structure
770 *
771 * Cleanup scheduling elements from SW DB
772 */
773void ice_sched_clear_port(struct ice_port_info *pi)
774{
775 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
776 return;
777
778 pi->port_state = ICE_SCHED_PORT_STATE_INIT;
779 mutex_lock(&pi->sched_lock);
780 ice_sched_clear_tx_topo(pi);
781 mutex_unlock(&pi->sched_lock);
782 mutex_destroy(&pi->sched_lock);
783}
784
785/**
786 * ice_sched_cleanup_all - cleanup scheduler elements from SW DB for all ports
787 * @hw: pointer to the HW struct
788 *
789 * Cleanup scheduling elements from SW DB for all the ports
790 */
791void ice_sched_cleanup_all(struct ice_hw *hw)
792{
793 if (!hw)
794 return;
795
796 if (hw->layer_info) {
797 devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
798 hw->layer_info = NULL;
799 }
800
801 ice_sched_clear_port(hw->port_info);
802
803 hw->num_tx_sched_layers = 0;
804 hw->num_tx_sched_phys_layers = 0;
805 hw->flattened_layers = 0;
806 hw->max_cgds = 0;
807}
808
809/**
810 * ice_sched_add_elems - add nodes to HW and SW DB
811 * @pi: port information structure
812 * @tc_node: pointer to the branch node
813 * @parent: pointer to the parent node
814 * @layer: layer number to add nodes
815 * @num_nodes: number of nodes
816 * @num_nodes_added: pointer to num nodes added
817 * @first_node_teid: if new nodes are added then return the TEID of first node
818 *
819 * This function add nodes to HW as well as to SW DB for a given layer
820 */
821static enum ice_status
822ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
823 struct ice_sched_node *parent, u8 layer, u16 num_nodes,
824 u16 *num_nodes_added, u32 *first_node_teid)
825{
826 struct ice_sched_node *prev, *new_node;
827 struct ice_aqc_add_elem *buf;
828 u16 i, num_groups_added = 0;
829 enum ice_status status = 0;
830 struct ice_hw *hw = pi->hw;
831 size_t buf_size;
832 u32 teid;
833
834 buf_size = struct_size(buf, generic, num_nodes - 1);
835 buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
836 if (!buf)
837 return ICE_ERR_NO_MEMORY;
838
839 buf->hdr.parent_teid = parent->info.node_teid;
840 buf->hdr.num_elems = cpu_to_le16(num_nodes);
841 for (i = 0; i < num_nodes; i++) {
842 buf->generic[i].parent_teid = parent->info.node_teid;
843 buf->generic[i].data.elem_type = ICE_AQC_ELEM_TYPE_SE_GENERIC;
844 buf->generic[i].data.valid_sections =
845 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
846 ICE_AQC_ELEM_VALID_EIR;
847 buf->generic[i].data.generic = 0;
848 buf->generic[i].data.cir_bw.bw_profile_idx =
849 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
850 buf->generic[i].data.cir_bw.bw_alloc =
851 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
852 buf->generic[i].data.eir_bw.bw_profile_idx =
853 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
854 buf->generic[i].data.eir_bw.bw_alloc =
855 cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
856 }
857
858 status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
859 &num_groups_added, NULL);
860 if (status || num_groups_added != 1) {
861 ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
862 hw->adminq.sq_last_status);
863 devm_kfree(ice_hw_to_dev(hw), buf);
864 return ICE_ERR_CFG;
865 }
866
867 *num_nodes_added = num_nodes;
868 /* add nodes to the SW DB */
869 for (i = 0; i < num_nodes; i++) {
870 status = ice_sched_add_node(pi, layer, &buf->generic[i]);
871 if (status) {
872 ice_debug(hw, ICE_DBG_SCHED,
873 "add nodes in SW DB failed status =%d\n",
874 status);
875 break;
876 }
877
878 teid = le32_to_cpu(buf->generic[i].node_teid);
879 new_node = ice_sched_find_node_by_teid(parent, teid);
880 if (!new_node) {
881 ice_debug(hw, ICE_DBG_SCHED,
882 "Node is missing for teid =%d\n", teid);
883 break;
884 }
885
886 new_node->sibling = NULL;
887 new_node->tc_num = tc_node->tc_num;
888
889 /* add it to previous node sibling pointer */
890 /* Note: siblings are not linked across branches */
891 prev = ice_sched_get_first_node(pi, tc_node, layer);
892 if (prev && prev != new_node) {
893 while (prev->sibling)
894 prev = prev->sibling;
895 prev->sibling = new_node;
896 }
897
898 /* initialize the sibling head */
899 if (!pi->sib_head[tc_node->tc_num][layer])
900 pi->sib_head[tc_node->tc_num][layer] = new_node;
901
902 if (i == 0)
903 *first_node_teid = teid;
904 }
905
906 devm_kfree(ice_hw_to_dev(hw), buf);
907 return status;
908}
909
910/**
911 * ice_sched_add_nodes_to_layer - Add nodes to a given layer
912 * @pi: port information structure
913 * @tc_node: pointer to TC node
914 * @parent: pointer to parent node
915 * @layer: layer number to add nodes
916 * @num_nodes: number of nodes to be added
917 * @first_node_teid: pointer to the first node TEID
918 * @num_nodes_added: pointer to number of nodes added
919 *
920 * This function add nodes to a given layer.
921 */
922static enum ice_status
923ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
924 struct ice_sched_node *tc_node,
925 struct ice_sched_node *parent, u8 layer,
926 u16 num_nodes, u32 *first_node_teid,
927 u16 *num_nodes_added)
928{
929 u32 *first_teid_ptr = first_node_teid;
930 u16 new_num_nodes, max_child_nodes;
931 enum ice_status status = 0;
932 struct ice_hw *hw = pi->hw;
933 u16 num_added = 0;
934 u32 temp;
935
936 *num_nodes_added = 0;
937
938 if (!num_nodes)
939 return status;
940
941 if (!parent || layer < hw->sw_entry_point_layer)
942 return ICE_ERR_PARAM;
943
944 /* max children per node per layer */
945 max_child_nodes = hw->max_children[parent->tx_sched_layer];
946
947 /* current number of children + required nodes exceed max children ? */
948 if ((parent->num_children + num_nodes) > max_child_nodes) {
949 /* Fail if the parent is a TC node */
950 if (parent == tc_node)
951 return ICE_ERR_CFG;
952
953 /* utilize all the spaces if the parent is not full */
954 if (parent->num_children < max_child_nodes) {
955 new_num_nodes = max_child_nodes - parent->num_children;
956 /* this recursion is intentional, and wouldn't
957 * go more than 2 calls
958 */
959 status = ice_sched_add_nodes_to_layer(pi, tc_node,
960 parent, layer,
961 new_num_nodes,
962 first_node_teid,
963 &num_added);
964 if (status)
965 return status;
966
967 *num_nodes_added += num_added;
968 }
969 /* Don't modify the first node TEID memory if the first node was
970 * added already in the above call. Instead send some temp
971 * memory for all other recursive calls.
972 */
973 if (num_added)
974 first_teid_ptr = &temp;
975
976 new_num_nodes = num_nodes - num_added;
977
978 /* This parent is full, try the next sibling */
979 parent = parent->sibling;
980
981 /* this recursion is intentional, for 1024 queues
982 * per VSI, it goes max of 16 iterations.
983 * 1024 / 8 = 128 layer 8 nodes
984 * 128 /8 = 16 (add 8 nodes per iteration)
985 */
986 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
987 layer, new_num_nodes,
988 first_teid_ptr,
989 &num_added);
990 *num_nodes_added += num_added;
991 return status;
992 }
993
994 status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
995 num_nodes_added, first_node_teid);
996 return status;
997}
998
999/**
1000 * ice_sched_get_qgrp_layer - get the current queue group layer number
1001 * @hw: pointer to the HW struct
1002 *
1003 * This function returns the current queue group layer number
1004 */
1005static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
1006{
1007 /* It's always total layers - 1, the array is 0 relative so -2 */
1008 return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
1009}
1010
1011/**
1012 * ice_sched_get_vsi_layer - get the current VSI layer number
1013 * @hw: pointer to the HW struct
1014 *
1015 * This function returns the current VSI layer number
1016 */
1017static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
1018{
1019 /* Num Layers VSI layer
1020 * 9 6
1021 * 7 4
1022 * 5 or less sw_entry_point_layer
1023 */
1024 /* calculate the VSI layer based on number of layers. */
1025 if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
1026 u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
1027
1028 if (layer > hw->sw_entry_point_layer)
1029 return layer;
1030 }
1031 return hw->sw_entry_point_layer;
1032}
1033
1034/**
1035 * ice_rm_dflt_leaf_node - remove the default leaf node in the tree
1036 * @pi: port information structure
1037 *
1038 * This function removes the leaf node that was created by the FW
1039 * during initialization
1040 */
1041static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
1042{
1043 struct ice_sched_node *node;
1044
1045 node = pi->root;
1046 while (node) {
1047 if (!node->num_children)
1048 break;
1049 node = node->children[0];
1050 }
1051 if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
1052 u32 teid = le32_to_cpu(node->info.node_teid);
1053 enum ice_status status;
1054
1055 /* remove the default leaf node */
1056 status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
1057 if (!status)
1058 ice_free_sched_node(pi, node);
1059 }
1060}
1061
1062/**
1063 * ice_sched_rm_dflt_nodes - free the default nodes in the tree
1064 * @pi: port information structure
1065 *
1066 * This function frees all the nodes except root and TC that were created by
1067 * the FW during initialization
1068 */
1069static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
1070{
1071 struct ice_sched_node *node;
1072
1073 ice_rm_dflt_leaf_node(pi);
1074
1075 /* remove the default nodes except TC and root nodes */
1076 node = pi->root;
1077 while (node) {
1078 if (node->tx_sched_layer >= pi->hw->sw_entry_point_layer &&
1079 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_TC &&
1080 node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT) {
1081 ice_free_sched_node(pi, node);
1082 break;
1083 }
1084
1085 if (!node->num_children)
1086 break;
1087 node = node->children[0];
1088 }
1089}
1090
1091/**
1092 * ice_sched_init_port - Initialize scheduler by querying information from FW
1093 * @pi: port info structure for the tree to cleanup
1094 *
1095 * This function is the initial call to find the total number of Tx scheduler
1096 * resources, default topology created by firmware and storing the information
1097 * in SW DB.
1098 */
1099enum ice_status ice_sched_init_port(struct ice_port_info *pi)
1100{
1101 struct ice_aqc_get_topo_elem *buf;
1102 enum ice_status status;
1103 struct ice_hw *hw;
1104 u8 num_branches;
1105 u16 num_elems;
1106 u8 i, j;
1107
1108 if (!pi)
1109 return ICE_ERR_PARAM;
1110 hw = pi->hw;
1111
1112 /* Query the Default Topology from FW */
1113 buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
1114 if (!buf)
1115 return ICE_ERR_NO_MEMORY;
1116
1117 /* Query default scheduling tree topology */
1118 status = ice_aq_get_dflt_topo(hw, pi->lport, buf, ICE_AQ_MAX_BUF_LEN,
1119 &num_branches, NULL);
1120 if (status)
1121 goto err_init_port;
1122
1123 /* num_branches should be between 1-8 */
1124 if (num_branches < 1 || num_branches > ICE_TXSCHED_MAX_BRANCHES) {
1125 ice_debug(hw, ICE_DBG_SCHED, "num_branches unexpected %d\n",
1126 num_branches);
1127 status = ICE_ERR_PARAM;
1128 goto err_init_port;
1129 }
1130
1131 /* get the number of elements on the default/first branch */
1132 num_elems = le16_to_cpu(buf[0].hdr.num_elems);
1133
1134 /* num_elems should always be between 1-9 */
1135 if (num_elems < 1 || num_elems > ICE_AQC_TOPO_MAX_LEVEL_NUM) {
1136 ice_debug(hw, ICE_DBG_SCHED, "num_elems unexpected %d\n",
1137 num_elems);
1138 status = ICE_ERR_PARAM;
1139 goto err_init_port;
1140 }
1141
1142 /* If the last node is a leaf node then the index of the queue group
1143 * layer is two less than the number of elements.
1144 */
1145 if (num_elems > 2 && buf[0].generic[num_elems - 1].data.elem_type ==
1146 ICE_AQC_ELEM_TYPE_LEAF)
1147 pi->last_node_teid =
1148 le32_to_cpu(buf[0].generic[num_elems - 2].node_teid);
1149 else
1150 pi->last_node_teid =
1151 le32_to_cpu(buf[0].generic[num_elems - 1].node_teid);
1152
1153 /* Insert the Tx Sched root node */
1154 status = ice_sched_add_root_node(pi, &buf[0].generic[0]);
1155 if (status)
1156 goto err_init_port;
1157
1158 /* Parse the default tree and cache the information */
1159 for (i = 0; i < num_branches; i++) {
1160 num_elems = le16_to_cpu(buf[i].hdr.num_elems);
1161
1162 /* Skip root element as already inserted */
1163 for (j = 1; j < num_elems; j++) {
1164 /* update the sw entry point */
1165 if (buf[0].generic[j].data.elem_type ==
1166 ICE_AQC_ELEM_TYPE_ENTRY_POINT)
1167 hw->sw_entry_point_layer = j;
1168
1169 status = ice_sched_add_node(pi, j, &buf[i].generic[j]);
1170 if (status)
1171 goto err_init_port;
1172 }
1173 }
1174
1175 /* Remove the default nodes. */
1176 if (pi->root)
1177 ice_sched_rm_dflt_nodes(pi);
1178
1179 /* initialize the port for handling the scheduler tree */
1180 pi->port_state = ICE_SCHED_PORT_STATE_READY;
1181 mutex_init(&pi->sched_lock);
1182 for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
1183 INIT_LIST_HEAD(&pi->rl_prof_list[i]);
1184
1185err_init_port:
1186 if (status && pi->root) {
1187 ice_free_sched_node(pi, pi->root);
1188 pi->root = NULL;
1189 }
1190
1191 devm_kfree(ice_hw_to_dev(hw), buf);
1192 return status;
1193}
1194
1195/**
1196 * ice_sched_query_res_alloc - query the FW for num of logical sched layers
1197 * @hw: pointer to the HW struct
1198 *
1199 * query FW for allocated scheduler resources and store in HW struct
1200 */
1201enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1202{
1203 struct ice_aqc_query_txsched_res_resp *buf;
1204 enum ice_status status = 0;
1205 __le16 max_sibl;
1206 u16 i;
1207
1208 if (hw->layer_info)
1209 return status;
1210
1211 buf = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*buf), GFP_KERNEL);
1212 if (!buf)
1213 return ICE_ERR_NO_MEMORY;
1214
1215 status = ice_aq_query_sched_res(hw, sizeof(*buf), buf, NULL);
1216 if (status)
1217 goto sched_query_out;
1218
1219 hw->num_tx_sched_layers = le16_to_cpu(buf->sched_props.logical_levels);
1220 hw->num_tx_sched_phys_layers =
1221 le16_to_cpu(buf->sched_props.phys_levels);
1222 hw->flattened_layers = buf->sched_props.flattening_bitmap;
1223 hw->max_cgds = buf->sched_props.max_pf_cgds;
1224
1225 /* max sibling group size of current layer refers to the max children
1226 * of the below layer node.
1227 * layer 1 node max children will be layer 2 max sibling group size
1228 * layer 2 node max children will be layer 3 max sibling group size
1229 * and so on. This array will be populated from root (index 0) to
1230 * qgroup layer 7. Leaf node has no children.
1231 */
1232 for (i = 0; i < hw->num_tx_sched_layers - 1; i++) {
1233 max_sibl = buf->layer_props[i + 1].max_sibl_grp_sz;
1234 hw->max_children[i] = le16_to_cpu(max_sibl);
1235 }
1236
1237 hw->layer_info = devm_kmemdup(ice_hw_to_dev(hw), buf->layer_props,
1238 (hw->num_tx_sched_layers *
1239 sizeof(*hw->layer_info)),
1240 GFP_KERNEL);
1241 if (!hw->layer_info) {
1242 status = ICE_ERR_NO_MEMORY;
1243 goto sched_query_out;
1244 }
1245
1246sched_query_out:
1247 devm_kfree(ice_hw_to_dev(hw), buf);
1248 return status;
1249}
1250
1251/**
1252 * ice_sched_find_node_in_subtree - Find node in part of base node subtree
1253 * @hw: pointer to the HW struct
1254 * @base: pointer to the base node
1255 * @node: pointer to the node to search
1256 *
1257 * This function checks whether a given node is part of the base node
1258 * subtree or not
1259 */
1260static bool
1261ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
1262 struct ice_sched_node *node)
1263{
1264 u8 i;
1265
1266 for (i = 0; i < base->num_children; i++) {
1267 struct ice_sched_node *child = base->children[i];
1268
1269 if (node == child)
1270 return true;
1271
1272 if (child->tx_sched_layer > node->tx_sched_layer)
1273 return false;
1274
1275 /* this recursion is intentional, and wouldn't
1276 * go more than 8 calls
1277 */
1278 if (ice_sched_find_node_in_subtree(hw, child, node))
1279 return true;
1280 }
1281 return false;
1282}
1283
1284/**
1285 * ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
1286 * @pi: port information structure
1287 * @vsi_handle: software VSI handle
1288 * @tc: branch number
1289 * @owner: LAN or RDMA
1290 *
1291 * This function retrieves a free LAN or RDMA queue group node
1292 */
1293struct ice_sched_node *
1294ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
1295 u8 owner)
1296{
1297 struct ice_sched_node *vsi_node, *qgrp_node = NULL;
1298 struct ice_vsi_ctx *vsi_ctx;
1299 u16 max_children;
1300 u8 qgrp_layer;
1301
1302 qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
1303 max_children = pi->hw->max_children[qgrp_layer];
1304
1305 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1306 if (!vsi_ctx)
1307 return NULL;
1308 vsi_node = vsi_ctx->sched.vsi_node[tc];
1309 /* validate invalid VSI ID */
1310 if (!vsi_node)
1311 goto lan_q_exit;
1312
1313 /* get the first queue group node from VSI sub-tree */
1314 qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
1315 while (qgrp_node) {
1316 /* make sure the qgroup node is part of the VSI subtree */
1317 if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
1318 if (qgrp_node->num_children < max_children &&
1319 qgrp_node->owner == owner)
1320 break;
1321 qgrp_node = qgrp_node->sibling;
1322 }
1323
1324lan_q_exit:
1325 return qgrp_node;
1326}
1327
1328/**
1329 * ice_sched_get_vsi_node - Get a VSI node based on VSI ID
1330 * @hw: pointer to the HW struct
1331 * @tc_node: pointer to the TC node
1332 * @vsi_handle: software VSI handle
1333 *
1334 * This function retrieves a VSI node for a given VSI ID from a given
1335 * TC branch
1336 */
1337static struct ice_sched_node *
1338ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
1339 u16 vsi_handle)
1340{
1341 struct ice_sched_node *node;
1342 u8 vsi_layer;
1343
1344 vsi_layer = ice_sched_get_vsi_layer(hw);
1345 node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
1346
1347 /* Check whether it already exists */
1348 while (node) {
1349 if (node->vsi_handle == vsi_handle)
1350 return node;
1351 node = node->sibling;
1352 }
1353
1354 return node;
1355}
1356
1357/**
1358 * ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
1359 * @hw: pointer to the HW struct
1360 * @num_qs: number of queues
1361 * @num_nodes: num nodes array
1362 *
1363 * This function calculates the number of VSI child nodes based on the
1364 * number of queues.
1365 */
1366static void
1367ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
1368{
1369 u16 num = num_qs;
1370 u8 i, qgl, vsil;
1371
1372 qgl = ice_sched_get_qgrp_layer(hw);
1373 vsil = ice_sched_get_vsi_layer(hw);
1374
1375 /* calculate num nodes from queue group to VSI layer */
1376 for (i = qgl; i > vsil; i--) {
1377 /* round to the next integer if there is a remainder */
1378 num = DIV_ROUND_UP(num, hw->max_children[i]);
1379
1380 /* need at least one node */
1381 num_nodes[i] = num ? num : 1;
1382 }
1383}
1384
1385/**
1386 * ice_sched_add_vsi_child_nodes - add VSI child nodes to tree
1387 * @pi: port information structure
1388 * @vsi_handle: software VSI handle
1389 * @tc_node: pointer to the TC node
1390 * @num_nodes: pointer to the num nodes that needs to be added per layer
1391 * @owner: node owner (LAN or RDMA)
1392 *
1393 * This function adds the VSI child nodes to tree. It gets called for
1394 * LAN and RDMA separately.
1395 */
1396static enum ice_status
1397ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1398 struct ice_sched_node *tc_node, u16 *num_nodes,
1399 u8 owner)
1400{
1401 struct ice_sched_node *parent, *node;
1402 struct ice_hw *hw = pi->hw;
1403 enum ice_status status;
1404 u32 first_node_teid;
1405 u16 num_added = 0;
1406 u8 i, qgl, vsil;
1407
1408 qgl = ice_sched_get_qgrp_layer(hw);
1409 vsil = ice_sched_get_vsi_layer(hw);
1410 parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1411 for (i = vsil + 1; i <= qgl; i++) {
1412 if (!parent)
1413 return ICE_ERR_CFG;
1414
1415 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
1416 num_nodes[i],
1417 &first_node_teid,
1418 &num_added);
1419 if (status || num_nodes[i] != num_added)
1420 return ICE_ERR_CFG;
1421
1422 /* The newly added node can be a new parent for the next
1423 * layer nodes
1424 */
1425 if (num_added) {
1426 parent = ice_sched_find_node_by_teid(tc_node,
1427 first_node_teid);
1428 node = parent;
1429 while (node) {
1430 node->owner = owner;
1431 node = node->sibling;
1432 }
1433 } else {
1434 parent = parent->children[0];
1435 }
1436 }
1437
1438 return 0;
1439}
1440
1441/**
1442 * ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
1443 * @hw: pointer to the HW struct
1444 * @tc_node: pointer to TC node
1445 * @num_nodes: pointer to num nodes array
1446 *
1447 * This function calculates the number of supported nodes needed to add this
1448 * VSI into Tx tree including the VSI, parent and intermediate nodes in below
1449 * layers
1450 */
1451static void
1452ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
1453 struct ice_sched_node *tc_node, u16 *num_nodes)
1454{
1455 struct ice_sched_node *node;
1456 u8 vsil;
1457 int i;
1458
1459 vsil = ice_sched_get_vsi_layer(hw);
1460 for (i = vsil; i >= hw->sw_entry_point_layer; i--)
1461 /* Add intermediate nodes if TC has no children and
1462 * need at least one node for VSI
1463 */
1464 if (!tc_node->num_children || i == vsil) {
1465 num_nodes[i]++;
1466 } else {
1467 /* If intermediate nodes are reached max children
1468 * then add a new one.
1469 */
1470 node = ice_sched_get_first_node(hw->port_info, tc_node,
1471 (u8)i);
1472 /* scan all the siblings */
1473 while (node) {
1474 if (node->num_children < hw->max_children[i])
1475 break;
1476 node = node->sibling;
1477 }
1478
1479 /* tree has one intermediate node to add this new VSI.
1480 * So no need to calculate supported nodes for below
1481 * layers.
1482 */
1483 if (node)
1484 break;
1485 /* all the nodes are full, allocate a new one */
1486 num_nodes[i]++;
1487 }
1488}
1489
1490/**
1491 * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
1492 * @pi: port information structure
1493 * @vsi_handle: software VSI handle
1494 * @tc_node: pointer to TC node
1495 * @num_nodes: pointer to num nodes array
1496 *
1497 * This function adds the VSI supported nodes into Tx tree including the
1498 * VSI, its parent and intermediate nodes in below layers
1499 */
1500static enum ice_status
1501ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
1502 struct ice_sched_node *tc_node, u16 *num_nodes)
1503{
1504 struct ice_sched_node *parent = tc_node;
1505 enum ice_status status;
1506 u32 first_node_teid;
1507 u16 num_added = 0;
1508 u8 i, vsil;
1509
1510 if (!pi)
1511 return ICE_ERR_PARAM;
1512
1513 vsil = ice_sched_get_vsi_layer(pi->hw);
1514 for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
1515 status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
1516 i, num_nodes[i],
1517 &first_node_teid,
1518 &num_added);
1519 if (status || num_nodes[i] != num_added)
1520 return ICE_ERR_CFG;
1521
1522 /* The newly added node can be a new parent for the next
1523 * layer nodes
1524 */
1525 if (num_added)
1526 parent = ice_sched_find_node_by_teid(tc_node,
1527 first_node_teid);
1528 else
1529 parent = parent->children[0];
1530
1531 if (!parent)
1532 return ICE_ERR_CFG;
1533
1534 if (i == vsil)
1535 parent->vsi_handle = vsi_handle;
1536 }
1537
1538 return 0;
1539}
1540
1541/**
1542 * ice_sched_add_vsi_to_topo - add a new VSI into tree
1543 * @pi: port information structure
1544 * @vsi_handle: software VSI handle
1545 * @tc: TC number
1546 *
1547 * This function adds a new VSI into scheduler tree
1548 */
1549static enum ice_status
1550ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
1551{
1552 u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1553 struct ice_sched_node *tc_node;
1554 struct ice_hw *hw = pi->hw;
1555
1556 tc_node = ice_sched_get_tc_node(pi, tc);
1557 if (!tc_node)
1558 return ICE_ERR_PARAM;
1559
1560 /* calculate number of supported nodes needed for this VSI */
1561 ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
1562
1563 /* add VSI supported nodes to TC subtree */
1564 return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
1565 num_nodes);
1566}
1567
1568/**
1569 * ice_sched_update_vsi_child_nodes - update VSI child nodes
1570 * @pi: port information structure
1571 * @vsi_handle: software VSI handle
1572 * @tc: TC number
1573 * @new_numqs: new number of max queues
1574 * @owner: owner of this subtree
1575 *
1576 * This function updates the VSI child nodes based on the number of queues
1577 */
1578static enum ice_status
1579ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
1580 u8 tc, u16 new_numqs, u8 owner)
1581{
1582 u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
1583 struct ice_sched_node *vsi_node;
1584 struct ice_sched_node *tc_node;
1585 struct ice_vsi_ctx *vsi_ctx;
1586 enum ice_status status = 0;
1587 struct ice_hw *hw = pi->hw;
1588 u16 prev_numqs;
1589
1590 tc_node = ice_sched_get_tc_node(pi, tc);
1591 if (!tc_node)
1592 return ICE_ERR_CFG;
1593
1594 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1595 if (!vsi_node)
1596 return ICE_ERR_CFG;
1597
1598 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1599 if (!vsi_ctx)
1600 return ICE_ERR_PARAM;
1601
1602 prev_numqs = vsi_ctx->sched.max_lanq[tc];
1603 /* num queues are not changed or less than the previous number */
1604 if (new_numqs <= prev_numqs)
1605 return status;
1606 status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
1607 if (status)
1608 return status;
1609
1610 if (new_numqs)
1611 ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
1612 /* Keep the max number of queue configuration all the time. Update the
1613 * tree only if number of queues > previous number of queues. This may
1614 * leave some extra nodes in the tree if number of queues < previous
1615 * number but that wouldn't harm anything. Removing those extra nodes
1616 * may complicate the code if those nodes are part of SRL or
1617 * individually rate limited.
1618 */
1619 status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
1620 new_num_nodes, owner);
1621 if (status)
1622 return status;
1623 vsi_ctx->sched.max_lanq[tc] = new_numqs;
1624
1625 return 0;
1626}
1627
1628/**
1629 * ice_sched_cfg_vsi - configure the new/existing VSI
1630 * @pi: port information structure
1631 * @vsi_handle: software VSI handle
1632 * @tc: TC number
1633 * @maxqs: max number of queues
1634 * @owner: LAN or RDMA
1635 * @enable: TC enabled or disabled
1636 *
1637 * This function adds/updates VSI nodes based on the number of queues. If TC is
1638 * enabled and VSI is in suspended state then resume the VSI back. If TC is
1639 * disabled then suspend the VSI if it is not already.
1640 */
1641enum ice_status
1642ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
1643 u8 owner, bool enable)
1644{
1645 struct ice_sched_node *vsi_node, *tc_node;
1646 struct ice_vsi_ctx *vsi_ctx;
1647 enum ice_status status = 0;
1648 struct ice_hw *hw = pi->hw;
1649
1650 ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
1651 tc_node = ice_sched_get_tc_node(pi, tc);
1652 if (!tc_node)
1653 return ICE_ERR_PARAM;
1654 vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
1655 if (!vsi_ctx)
1656 return ICE_ERR_PARAM;
1657 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1658
1659 /* suspend the VSI if TC is not enabled */
1660 if (!enable) {
1661 if (vsi_node && vsi_node->in_use) {
1662 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1663
1664 status = ice_sched_suspend_resume_elems(hw, 1, &teid,
1665 true);
1666 if (!status)
1667 vsi_node->in_use = false;
1668 }
1669 return status;
1670 }
1671
1672 /* TC is enabled, if it is a new VSI then add it to the tree */
1673 if (!vsi_node) {
1674 status = ice_sched_add_vsi_to_topo(pi, vsi_handle, tc);
1675 if (status)
1676 return status;
1677
1678 vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
1679 if (!vsi_node)
1680 return ICE_ERR_CFG;
1681
1682 vsi_ctx->sched.vsi_node[tc] = vsi_node;
1683 vsi_node->in_use = true;
1684 /* invalidate the max queues whenever VSI gets added first time
1685 * into the scheduler tree (boot or after reset). We need to
1686 * recreate the child nodes all the time in these cases.
1687 */
1688 vsi_ctx->sched.max_lanq[tc] = 0;
1689 }
1690
1691 /* update the VSI child nodes */
1692 status = ice_sched_update_vsi_child_nodes(pi, vsi_handle, tc, maxqs,
1693 owner);
1694 if (status)
1695 return status;
1696
1697 /* TC is enabled, resume the VSI if it is in the suspend state */
1698 if (!vsi_node->in_use) {
1699 u32 teid = le32_to_cpu(vsi_node->info.node_teid);
1700
1701 status = ice_sched_suspend_resume_elems(hw, 1, &teid, false);
1702 if (!status)
1703 vsi_node->in_use = true;
1704 }
1705
1706 return status;
1707}
1708
1709/**
1710 * ice_sched_rm_agg_vsi_entry - remove aggregator related VSI info entry
1711 * @pi: port information structure
1712 * @vsi_handle: software VSI handle
1713 *
1714 * This function removes single aggregator VSI info entry from
1715 * aggregator list.
1716 */
1717static void
1718ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
1719{
1720 struct ice_sched_agg_info *agg_info;
1721 struct ice_sched_agg_info *atmp;
1722
1723 list_for_each_entry_safe(agg_info, atmp, &pi->hw->agg_list,
1724 list_entry) {
1725 struct ice_sched_agg_vsi_info *agg_vsi_info;
1726 struct ice_sched_agg_vsi_info *vtmp;
1727
1728 list_for_each_entry_safe(agg_vsi_info, vtmp,
1729 &agg_info->agg_vsi_list, list_entry)
1730 if (agg_vsi_info->vsi_handle == vsi_handle) {
1731 list_del(&agg_vsi_info->list_entry);
1732 devm_kfree(ice_hw_to_dev(pi->hw),
1733 agg_vsi_info);
1734 return;
1735 }
1736 }
1737}
1738
1739/**
1740 * ice_sched_is_leaf_node_present - check for a leaf node in the sub-tree
1741 * @node: pointer to the sub-tree node
1742 *
1743 * This function checks for a leaf node presence in a given sub-tree node.
1744 */
1745static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
1746{
1747 u8 i;
1748
1749 for (i = 0; i < node->num_children; i++)
1750 if (ice_sched_is_leaf_node_present(node->children[i]))
1751 return true;
1752 /* check for a leaf node */
1753 return (node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF);
1754}
1755
1756/**
1757 * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
1758 * @pi: port information structure
1759 * @vsi_handle: software VSI handle
1760 * @owner: LAN or RDMA
1761 *
1762 * This function removes the VSI and its LAN or RDMA children nodes from the
1763 * scheduler tree.
1764 */
1765static enum ice_status
1766ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
1767{
1768 enum ice_status status = ICE_ERR_PARAM;
1769 struct ice_vsi_ctx *vsi_ctx;
1770 u8 i;
1771
1772 ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
1773 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
1774 return status;
1775 mutex_lock(&pi->sched_lock);
1776 vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
1777 if (!vsi_ctx)
1778 goto exit_sched_rm_vsi_cfg;
1779
1780 ice_for_each_traffic_class(i) {
1781 struct ice_sched_node *vsi_node, *tc_node;
1782 u8 j = 0;
1783
1784 tc_node = ice_sched_get_tc_node(pi, i);
1785 if (!tc_node)
1786 continue;
1787
1788 vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
1789 if (!vsi_node)
1790 continue;
1791
1792 if (ice_sched_is_leaf_node_present(vsi_node)) {
1793 ice_debug(pi->hw, ICE_DBG_SCHED,
1794 "VSI has leaf nodes in TC %d\n", i);
1795 status = ICE_ERR_IN_USE;
1796 goto exit_sched_rm_vsi_cfg;
1797 }
1798 while (j < vsi_node->num_children) {
1799 if (vsi_node->children[j]->owner == owner) {
1800 ice_free_sched_node(pi, vsi_node->children[j]);
1801
1802 /* reset the counter again since the num
1803 * children will be updated after node removal
1804 */
1805 j = 0;
1806 } else {
1807 j++;
1808 }
1809 }
1810 /* remove the VSI if it has no children */
1811 if (!vsi_node->num_children) {
1812 ice_free_sched_node(pi, vsi_node);
1813 vsi_ctx->sched.vsi_node[i] = NULL;
1814
1815 /* clean up aggregator related VSI info if any */
1816 ice_sched_rm_agg_vsi_info(pi, vsi_handle);
1817 }
1818 if (owner == ICE_SCHED_NODE_OWNER_LAN)
1819 vsi_ctx->sched.max_lanq[i] = 0;
1820 }
1821 status = 0;
1822
1823exit_sched_rm_vsi_cfg:
1824 mutex_unlock(&pi->sched_lock);
1825 return status;
1826}
1827
1828/**
1829 * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
1830 * @pi: port information structure
1831 * @vsi_handle: software VSI handle
1832 *
1833 * This function clears the VSI and its LAN children nodes from scheduler tree
1834 * for all TCs.
1835 */
1836enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
1837{
1838 return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
1839}
1840
1841/**
1842 * ice_sched_rm_unused_rl_prof - remove unused RL profile
1843 * @pi: port information structure
1844 *
1845 * This function removes unused rate limit profiles from the HW and
1846 * SW DB. The caller needs to hold scheduler lock.
1847 */
1848static void ice_sched_rm_unused_rl_prof(struct ice_port_info *pi)
1849{
1850 u16 ln;
1851
1852 for (ln = 0; ln < pi->hw->num_tx_sched_layers; ln++) {
1853 struct ice_aqc_rl_profile_info *rl_prof_elem;
1854 struct ice_aqc_rl_profile_info *rl_prof_tmp;
1855
1856 list_for_each_entry_safe(rl_prof_elem, rl_prof_tmp,
1857 &pi->rl_prof_list[ln], list_entry) {
1858 if (!ice_sched_del_rl_profile(pi->hw, rl_prof_elem))
1859 ice_debug(pi->hw, ICE_DBG_SCHED,
1860 "Removed rl profile\n");
1861 }
1862 }
1863}
1864
1865/**
1866 * ice_sched_update_elem - update element
1867 * @hw: pointer to the HW struct
1868 * @node: pointer to node
1869 * @info: node info to update
1870 *
1871 * It updates the HW DB, and local SW DB of node. It updates the scheduling
1872 * parameters of node from argument info data buffer (Info->data buf) and
1873 * returns success or error on config sched element failure. The caller
1874 * needs to hold scheduler lock.
1875 */
1876static enum ice_status
1877ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
1878 struct ice_aqc_txsched_elem_data *info)
1879{
1880 struct ice_aqc_conf_elem buf;
1881 enum ice_status status;
1882 u16 elem_cfgd = 0;
1883 u16 num_elems = 1;
1884
1885 buf.generic[0] = *info;
1886 /* Parent TEID is reserved field in this aq call */
1887 buf.generic[0].parent_teid = 0;
1888 /* Element type is reserved field in this aq call */
1889 buf.generic[0].data.elem_type = 0;
1890 /* Flags is reserved field in this aq call */
1891 buf.generic[0].data.flags = 0;
1892
1893 /* Update HW DB */
1894 /* Configure element node */
1895 status = ice_aq_cfg_sched_elems(hw, num_elems, &buf, sizeof(buf),
1896 &elem_cfgd, NULL);
1897 if (status || elem_cfgd != num_elems) {
1898 ice_debug(hw, ICE_DBG_SCHED, "Config sched elem error\n");
1899 return ICE_ERR_CFG;
1900 }
1901
1902 /* Config success case */
1903 /* Now update local SW DB */
1904 /* Only copy the data portion of info buffer */
1905 node->info.data = info->data;
1906 return status;
1907}
1908
1909/**
1910 * ice_sched_cfg_node_bw_alloc - configure node BW weight/alloc params
1911 * @hw: pointer to the HW struct
1912 * @node: sched node to configure
1913 * @rl_type: rate limit type CIR, EIR, or shared
1914 * @bw_alloc: BW weight/allocation
1915 *
1916 * This function configures node element's BW allocation.
1917 */
1918static enum ice_status
1919ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
1920 enum ice_rl_type rl_type, u8 bw_alloc)
1921{
1922 struct ice_aqc_txsched_elem_data buf;
1923 struct ice_aqc_txsched_elem *data;
1924 enum ice_status status;
1925
1926 buf = node->info;
1927 data = &buf.data;
1928 if (rl_type == ICE_MIN_BW) {
1929 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
1930 data->cir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1931 } else if (rl_type == ICE_MAX_BW) {
1932 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
1933 data->eir_bw.bw_alloc = cpu_to_le16(bw_alloc);
1934 } else {
1935 return ICE_ERR_PARAM;
1936 }
1937
1938 /* Configure element */
1939 status = ice_sched_update_elem(hw, node, &buf);
1940 return status;
1941}
1942
1943/**
1944 * ice_set_clear_cir_bw - set or clear CIR BW
1945 * @bw_t_info: bandwidth type information structure
1946 * @bw: bandwidth in Kbps - Kilo bits per sec
1947 *
1948 * Save or clear CIR bandwidth (BW) in the passed param bw_t_info.
1949 */
1950static void
1951ice_set_clear_cir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1952{
1953 if (bw == ICE_SCHED_DFLT_BW) {
1954 clear_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1955 bw_t_info->cir_bw.bw = 0;
1956 } else {
1957 /* Save type of BW information */
1958 set_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap);
1959 bw_t_info->cir_bw.bw = bw;
1960 }
1961}
1962
1963/**
1964 * ice_set_clear_eir_bw - set or clear EIR BW
1965 * @bw_t_info: bandwidth type information structure
1966 * @bw: bandwidth in Kbps - Kilo bits per sec
1967 *
1968 * Save or clear EIR bandwidth (BW) in the passed param bw_t_info.
1969 */
1970static void
1971ice_set_clear_eir_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1972{
1973 if (bw == ICE_SCHED_DFLT_BW) {
1974 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
1975 bw_t_info->eir_bw.bw = 0;
1976 } else {
1977 /* EIR BW and Shared BW profiles are mutually exclusive and
1978 * hence only one of them may be set for any given element.
1979 * First clear earlier saved shared BW information.
1980 */
1981 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
1982 bw_t_info->shared_bw = 0;
1983 /* save EIR BW information */
1984 set_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
1985 bw_t_info->eir_bw.bw = bw;
1986 }
1987}
1988
1989/**
1990 * ice_set_clear_shared_bw - set or clear shared BW
1991 * @bw_t_info: bandwidth type information structure
1992 * @bw: bandwidth in Kbps - Kilo bits per sec
1993 *
1994 * Save or clear shared bandwidth (BW) in the passed param bw_t_info.
1995 */
1996static void
1997ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
1998{
1999 if (bw == ICE_SCHED_DFLT_BW) {
2000 clear_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2001 bw_t_info->shared_bw = 0;
2002 } else {
2003 /* EIR BW and Shared BW profiles are mutually exclusive and
2004 * hence only one of them may be set for any given element.
2005 * First clear earlier saved EIR BW information.
2006 */
2007 clear_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap);
2008 bw_t_info->eir_bw.bw = 0;
2009 /* save shared BW information */
2010 set_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap);
2011 bw_t_info->shared_bw = bw;
2012 }
2013}
2014
2015/**
2016 * ice_sched_calc_wakeup - calculate RL profile wakeup parameter
2017 * @bw: bandwidth in Kbps
2018 *
2019 * This function calculates the wakeup parameter of RL profile.
2020 */
2021static u16 ice_sched_calc_wakeup(s32 bw)
2022{
2023 s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
2024 s32 wakeup_f_int;
2025 u16 wakeup = 0;
2026
2027 /* Get the wakeup integer value */
2028 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2029 wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
2030 if (wakeup_int > 63) {
2031 wakeup = (u16)((1 << 15) | wakeup_int);
2032 } else {
2033 /* Calculate fraction value up to 4 decimals
2034 * Convert Integer value to a constant multiplier
2035 */
2036 wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
2037 wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
2038 ICE_RL_PROF_FREQUENCY,
2039 bytes_per_sec);
2040
2041 /* Get Fraction value */
2042 wakeup_f = wakeup_a - wakeup_b;
2043
2044 /* Round up the Fractional value via Ceil(Fractional value) */
2045 if (wakeup_f > div64_long(ICE_RL_PROF_MULTIPLIER, 2))
2046 wakeup_f += 1;
2047
2048 wakeup_f_int = (s32)div64_long(wakeup_f * ICE_RL_PROF_FRACTION,
2049 ICE_RL_PROF_MULTIPLIER);
2050 wakeup |= (u16)(wakeup_int << 9);
2051 wakeup |= (u16)(0x1ff & wakeup_f_int);
2052 }
2053
2054 return wakeup;
2055}
2056
2057/**
2058 * ice_sched_bw_to_rl_profile - convert BW to profile parameters
2059 * @bw: bandwidth in Kbps
2060 * @profile: profile parameters to return
2061 *
2062 * This function converts the BW to profile structure format.
2063 */
2064static enum ice_status
2065ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
2066{
2067 enum ice_status status = ICE_ERR_PARAM;
2068 s64 bytes_per_sec, ts_rate, mv_tmp;
2069 bool found = false;
2070 s32 encode = 0;
2071 s64 mv = 0;
2072 s32 i;
2073
2074 /* Bw settings range is from 0.5Mb/sec to 100Gb/sec */
2075 if (bw < ICE_SCHED_MIN_BW || bw > ICE_SCHED_MAX_BW)
2076 return status;
2077
2078 /* Bytes per second from Kbps */
2079 bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
2080
2081 /* encode is 6 bits but really useful are 5 bits */
2082 for (i = 0; i < 64; i++) {
2083 u64 pow_result = BIT_ULL(i);
2084
2085 ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
2086 pow_result * ICE_RL_PROF_TS_MULTIPLIER);
2087 if (ts_rate <= 0)
2088 continue;
2089
2090 /* Multiplier value */
2091 mv_tmp = div64_long(bytes_per_sec * ICE_RL_PROF_MULTIPLIER,
2092 ts_rate);
2093
2094 /* Round to the nearest ICE_RL_PROF_MULTIPLIER */
2095 mv = round_up_64bit(mv_tmp, ICE_RL_PROF_MULTIPLIER);
2096
2097 /* First multiplier value greater than the given
2098 * accuracy bytes
2099 */
2100 if (mv > ICE_RL_PROF_ACCURACY_BYTES) {
2101 encode = i;
2102 found = true;
2103 break;
2104 }
2105 }
2106 if (found) {
2107 u16 wm;
2108
2109 wm = ice_sched_calc_wakeup(bw);
2110 profile->rl_multiply = cpu_to_le16(mv);
2111 profile->wake_up_calc = cpu_to_le16(wm);
2112 profile->rl_encode = cpu_to_le16(encode);
2113 status = 0;
2114 } else {
2115 status = ICE_ERR_DOES_NOT_EXIST;
2116 }
2117
2118 return status;
2119}
2120
2121/**
2122 * ice_sched_add_rl_profile - add RL profile
2123 * @pi: port information structure
2124 * @rl_type: type of rate limit BW - min, max, or shared
2125 * @bw: bandwidth in Kbps - Kilo bits per sec
2126 * @layer_num: specifies in which layer to create profile
2127 *
2128 * This function first checks the existing list for corresponding BW
2129 * parameter. If it exists, it returns the associated profile otherwise
2130 * it creates a new rate limit profile for requested BW, and adds it to
2131 * the HW DB and local list. It returns the new profile or null on error.
2132 * The caller needs to hold the scheduler lock.
2133 */
2134static struct ice_aqc_rl_profile_info *
2135ice_sched_add_rl_profile(struct ice_port_info *pi,
2136 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2137{
2138 struct ice_aqc_rl_profile_generic_elem *buf;
2139 struct ice_aqc_rl_profile_info *rl_prof_elem;
2140 u16 profiles_added = 0, num_profiles = 1;
2141 enum ice_status status;
2142 struct ice_hw *hw;
2143 u8 profile_type;
2144
2145 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2146 return NULL;
2147 switch (rl_type) {
2148 case ICE_MIN_BW:
2149 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2150 break;
2151 case ICE_MAX_BW:
2152 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2153 break;
2154 case ICE_SHARED_BW:
2155 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2156 break;
2157 default:
2158 return NULL;
2159 }
2160
2161 if (!pi)
2162 return NULL;
2163 hw = pi->hw;
2164 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2165 list_entry)
2166 if (rl_prof_elem->profile.flags == profile_type &&
2167 rl_prof_elem->bw == bw)
2168 /* Return existing profile ID info */
2169 return rl_prof_elem;
2170
2171 /* Create new profile ID */
2172 rl_prof_elem = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rl_prof_elem),
2173 GFP_KERNEL);
2174
2175 if (!rl_prof_elem)
2176 return NULL;
2177
2178 status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
2179 if (status)
2180 goto exit_add_rl_prof;
2181
2182 rl_prof_elem->bw = bw;
2183 /* layer_num is zero relative, and fw expects level from 1 to 9 */
2184 rl_prof_elem->profile.level = layer_num + 1;
2185 rl_prof_elem->profile.flags = profile_type;
2186 rl_prof_elem->profile.max_burst_size = cpu_to_le16(hw->max_burst_size);
2187
2188 /* Create new entry in HW DB */
2189 buf = (struct ice_aqc_rl_profile_generic_elem *)
2190 &rl_prof_elem->profile;
2191 status = ice_aq_add_rl_profile(hw, num_profiles, buf, sizeof(*buf),
2192 &profiles_added, NULL);
2193 if (status || profiles_added != num_profiles)
2194 goto exit_add_rl_prof;
2195
2196 /* Good entry - add in the list */
2197 rl_prof_elem->prof_id_ref = 0;
2198 list_add(&rl_prof_elem->list_entry, &pi->rl_prof_list[layer_num]);
2199 return rl_prof_elem;
2200
2201exit_add_rl_prof:
2202 devm_kfree(ice_hw_to_dev(hw), rl_prof_elem);
2203 return NULL;
2204}
2205
2206/**
2207 * ice_sched_cfg_node_bw_lmt - configure node sched params
2208 * @hw: pointer to the HW struct
2209 * @node: sched node to configure
2210 * @rl_type: rate limit type CIR, EIR, or shared
2211 * @rl_prof_id: rate limit profile ID
2212 *
2213 * This function configures node element's BW limit.
2214 */
2215static enum ice_status
2216ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
2217 enum ice_rl_type rl_type, u16 rl_prof_id)
2218{
2219 struct ice_aqc_txsched_elem_data buf;
2220 struct ice_aqc_txsched_elem *data;
2221
2222 buf = node->info;
2223 data = &buf.data;
2224 switch (rl_type) {
2225 case ICE_MIN_BW:
2226 data->valid_sections |= ICE_AQC_ELEM_VALID_CIR;
2227 data->cir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2228 break;
2229 case ICE_MAX_BW:
2230 /* EIR BW and Shared BW profiles are mutually exclusive and
2231 * hence only one of them may be set for any given element
2232 */
2233 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2234 return ICE_ERR_CFG;
2235 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2236 data->eir_bw.bw_profile_idx = cpu_to_le16(rl_prof_id);
2237 break;
2238 case ICE_SHARED_BW:
2239 /* Check for removing shared BW */
2240 if (rl_prof_id == ICE_SCHED_NO_SHARED_RL_PROF_ID) {
2241 /* remove shared profile */
2242 data->valid_sections &= ~ICE_AQC_ELEM_VALID_SHARED;
2243 data->srl_id = 0; /* clear SRL field */
2244
2245 /* enable back EIR to default profile */
2246 data->valid_sections |= ICE_AQC_ELEM_VALID_EIR;
2247 data->eir_bw.bw_profile_idx =
2248 cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
2249 break;
2250 }
2251 /* EIR BW and Shared BW profiles are mutually exclusive and
2252 * hence only one of them may be set for any given element
2253 */
2254 if ((data->valid_sections & ICE_AQC_ELEM_VALID_EIR) &&
2255 (le16_to_cpu(data->eir_bw.bw_profile_idx) !=
2256 ICE_SCHED_DFLT_RL_PROF_ID))
2257 return ICE_ERR_CFG;
2258 /* EIR BW is set to default, disable it */
2259 data->valid_sections &= ~ICE_AQC_ELEM_VALID_EIR;
2260 /* Okay to enable shared BW now */
2261 data->valid_sections |= ICE_AQC_ELEM_VALID_SHARED;
2262 data->srl_id = cpu_to_le16(rl_prof_id);
2263 break;
2264 default:
2265 /* Unknown rate limit type */
2266 return ICE_ERR_PARAM;
2267 }
2268
2269 /* Configure element */
2270 return ice_sched_update_elem(hw, node, &buf);
2271}
2272
2273/**
2274 * ice_sched_get_node_rl_prof_id - get node's rate limit profile ID
2275 * @node: sched node
2276 * @rl_type: rate limit type
2277 *
2278 * If existing profile matches, it returns the corresponding rate
2279 * limit profile ID, otherwise it returns an invalid ID as error.
2280 */
2281static u16
2282ice_sched_get_node_rl_prof_id(struct ice_sched_node *node,
2283 enum ice_rl_type rl_type)
2284{
2285 u16 rl_prof_id = ICE_SCHED_INVAL_PROF_ID;
2286 struct ice_aqc_txsched_elem *data;
2287
2288 data = &node->info.data;
2289 switch (rl_type) {
2290 case ICE_MIN_BW:
2291 if (data->valid_sections & ICE_AQC_ELEM_VALID_CIR)
2292 rl_prof_id = le16_to_cpu(data->cir_bw.bw_profile_idx);
2293 break;
2294 case ICE_MAX_BW:
2295 if (data->valid_sections & ICE_AQC_ELEM_VALID_EIR)
2296 rl_prof_id = le16_to_cpu(data->eir_bw.bw_profile_idx);
2297 break;
2298 case ICE_SHARED_BW:
2299 if (data->valid_sections & ICE_AQC_ELEM_VALID_SHARED)
2300 rl_prof_id = le16_to_cpu(data->srl_id);
2301 break;
2302 default:
2303 break;
2304 }
2305
2306 return rl_prof_id;
2307}
2308
2309/**
2310 * ice_sched_get_rl_prof_layer - selects rate limit profile creation layer
2311 * @pi: port information structure
2312 * @rl_type: type of rate limit BW - min, max, or shared
2313 * @layer_index: layer index
2314 *
2315 * This function returns requested profile creation layer.
2316 */
2317static u8
2318ice_sched_get_rl_prof_layer(struct ice_port_info *pi, enum ice_rl_type rl_type,
2319 u8 layer_index)
2320{
2321 struct ice_hw *hw = pi->hw;
2322
2323 if (layer_index >= hw->num_tx_sched_layers)
2324 return ICE_SCHED_INVAL_LAYER_NUM;
2325 switch (rl_type) {
2326 case ICE_MIN_BW:
2327 if (hw->layer_info[layer_index].max_cir_rl_profiles)
2328 return layer_index;
2329 break;
2330 case ICE_MAX_BW:
2331 if (hw->layer_info[layer_index].max_eir_rl_profiles)
2332 return layer_index;
2333 break;
2334 case ICE_SHARED_BW:
2335 /* if current layer doesn't support SRL profile creation
2336 * then try a layer up or down.
2337 */
2338 if (hw->layer_info[layer_index].max_srl_profiles)
2339 return layer_index;
2340 else if (layer_index < hw->num_tx_sched_layers - 1 &&
2341 hw->layer_info[layer_index + 1].max_srl_profiles)
2342 return layer_index + 1;
2343 else if (layer_index > 0 &&
2344 hw->layer_info[layer_index - 1].max_srl_profiles)
2345 return layer_index - 1;
2346 break;
2347 default:
2348 break;
2349 }
2350 return ICE_SCHED_INVAL_LAYER_NUM;
2351}
2352
2353/**
2354 * ice_sched_get_srl_node - get shared rate limit node
2355 * @node: tree node
2356 * @srl_layer: shared rate limit layer
2357 *
2358 * This function returns SRL node to be used for shared rate limit purpose.
2359 * The caller needs to hold scheduler lock.
2360 */
2361static struct ice_sched_node *
2362ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
2363{
2364 if (srl_layer > node->tx_sched_layer)
2365 return node->children[0];
2366 else if (srl_layer < node->tx_sched_layer)
2367 /* Node can't be created without a parent. It will always
2368 * have a valid parent except root node.
2369 */
2370 return node->parent;
2371 else
2372 return node;
2373}
2374
2375/**
2376 * ice_sched_rm_rl_profile - remove RL profile ID
2377 * @pi: port information structure
2378 * @layer_num: layer number where profiles are saved
2379 * @profile_type: profile type like EIR, CIR, or SRL
2380 * @profile_id: profile ID to remove
2381 *
2382 * This function removes rate limit profile from layer 'layer_num' of type
2383 * 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
2384 * scheduler lock.
2385 */
2386static enum ice_status
2387ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
2388 u16 profile_id)
2389{
2390 struct ice_aqc_rl_profile_info *rl_prof_elem;
2391 enum ice_status status = 0;
2392
2393 if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
2394 return ICE_ERR_PARAM;
2395 /* Check the existing list for RL profile */
2396 list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
2397 list_entry)
2398 if (rl_prof_elem->profile.flags == profile_type &&
2399 le16_to_cpu(rl_prof_elem->profile.profile_id) ==
2400 profile_id) {
2401 if (rl_prof_elem->prof_id_ref)
2402 rl_prof_elem->prof_id_ref--;
2403
2404 /* Remove old profile ID from database */
2405 status = ice_sched_del_rl_profile(pi->hw, rl_prof_elem);
2406 if (status && status != ICE_ERR_IN_USE)
2407 ice_debug(pi->hw, ICE_DBG_SCHED,
2408 "Remove rl profile failed\n");
2409 break;
2410 }
2411 if (status == ICE_ERR_IN_USE)
2412 status = 0;
2413 return status;
2414}
2415
2416/**
2417 * ice_sched_set_node_bw_dflt - set node's bandwidth limit to default
2418 * @pi: port information structure
2419 * @node: pointer to node structure
2420 * @rl_type: rate limit type min, max, or shared
2421 * @layer_num: layer number where RL profiles are saved
2422 *
2423 * This function configures node element's BW rate limit profile ID of
2424 * type CIR, EIR, or SRL to default. This function needs to be called
2425 * with the scheduler lock held.
2426 */
2427static enum ice_status
2428ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
2429 struct ice_sched_node *node,
2430 enum ice_rl_type rl_type, u8 layer_num)
2431{
2432 enum ice_status status;
2433 struct ice_hw *hw;
2434 u8 profile_type;
2435 u16 rl_prof_id;
2436 u16 old_id;
2437
2438 hw = pi->hw;
2439 switch (rl_type) {
2440 case ICE_MIN_BW:
2441 profile_type = ICE_AQC_RL_PROFILE_TYPE_CIR;
2442 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2443 break;
2444 case ICE_MAX_BW:
2445 profile_type = ICE_AQC_RL_PROFILE_TYPE_EIR;
2446 rl_prof_id = ICE_SCHED_DFLT_RL_PROF_ID;
2447 break;
2448 case ICE_SHARED_BW:
2449 profile_type = ICE_AQC_RL_PROFILE_TYPE_SRL;
2450 /* No SRL is configured for default case */
2451 rl_prof_id = ICE_SCHED_NO_SHARED_RL_PROF_ID;
2452 break;
2453 default:
2454 return ICE_ERR_PARAM;
2455 }
2456 /* Save existing RL prof ID for later clean up */
2457 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2458 /* Configure BW scheduling parameters */
2459 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2460 if (status)
2461 return status;
2462
2463 /* Remove stale RL profile ID */
2464 if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
2465 old_id == ICE_SCHED_INVAL_PROF_ID)
2466 return 0;
2467
2468 return ice_sched_rm_rl_profile(pi, layer_num, profile_type, old_id);
2469}
2470
2471/**
2472 * ice_sched_set_eir_srl_excl - set EIR/SRL exclusiveness
2473 * @pi: port information structure
2474 * @node: pointer to node structure
2475 * @layer_num: layer number where rate limit profiles are saved
2476 * @rl_type: rate limit type min, max, or shared
2477 * @bw: bandwidth value
2478 *
2479 * This function prepares node element's bandwidth to SRL or EIR exclusively.
2480 * EIR BW and Shared BW profiles are mutually exclusive and hence only one of
2481 * them may be set for any given element. This function needs to be called
2482 * with the scheduler lock held.
2483 */
2484static enum ice_status
2485ice_sched_set_eir_srl_excl(struct ice_port_info *pi,
2486 struct ice_sched_node *node,
2487 u8 layer_num, enum ice_rl_type rl_type, u32 bw)
2488{
2489 if (rl_type == ICE_SHARED_BW) {
2490 /* SRL node passed in this case, it may be different node */
2491 if (bw == ICE_SCHED_DFLT_BW)
2492 /* SRL being removed, ice_sched_cfg_node_bw_lmt()
2493 * enables EIR to default. EIR is not set in this
2494 * case, so no additional action is required.
2495 */
2496 return 0;
2497
2498 /* SRL being configured, set EIR to default here.
2499 * ice_sched_cfg_node_bw_lmt() disables EIR when it
2500 * configures SRL
2501 */
2502 return ice_sched_set_node_bw_dflt(pi, node, ICE_MAX_BW,
2503 layer_num);
2504 } else if (rl_type == ICE_MAX_BW &&
2505 node->info.data.valid_sections & ICE_AQC_ELEM_VALID_SHARED) {
2506 /* Remove Shared profile. Set default shared BW call
2507 * removes shared profile for a node.
2508 */
2509 return ice_sched_set_node_bw_dflt(pi, node,
2510 ICE_SHARED_BW,
2511 layer_num);
2512 }
2513 return 0;
2514}
2515
2516/**
2517 * ice_sched_set_node_bw - set node's bandwidth
2518 * @pi: port information structure
2519 * @node: tree node
2520 * @rl_type: rate limit type min, max, or shared
2521 * @bw: bandwidth in Kbps - Kilo bits per sec
2522 * @layer_num: layer number
2523 *
2524 * This function adds new profile corresponding to requested BW, configures
2525 * node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
2526 * ID from local database. The caller needs to hold scheduler lock.
2527 */
2528static enum ice_status
2529ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
2530 enum ice_rl_type rl_type, u32 bw, u8 layer_num)
2531{
2532 struct ice_aqc_rl_profile_info *rl_prof_info;
2533 enum ice_status status = ICE_ERR_PARAM;
2534 struct ice_hw *hw = pi->hw;
2535 u16 old_id, rl_prof_id;
2536
2537 rl_prof_info = ice_sched_add_rl_profile(pi, rl_type, bw, layer_num);
2538 if (!rl_prof_info)
2539 return status;
2540
2541 rl_prof_id = le16_to_cpu(rl_prof_info->profile.profile_id);
2542
2543 /* Save existing RL prof ID for later clean up */
2544 old_id = ice_sched_get_node_rl_prof_id(node, rl_type);
2545 /* Configure BW scheduling parameters */
2546 status = ice_sched_cfg_node_bw_lmt(hw, node, rl_type, rl_prof_id);
2547 if (status)
2548 return status;
2549
2550 /* New changes has been applied */
2551 /* Increment the profile ID reference count */
2552 rl_prof_info->prof_id_ref++;
2553
2554 /* Check for old ID removal */
2555 if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
2556 old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
2557 return 0;
2558
2559 return ice_sched_rm_rl_profile(pi, layer_num,
2560 rl_prof_info->profile.flags,
2561 old_id);
2562}
2563
2564/**
2565 * ice_sched_set_node_bw_lmt - set node's BW limit
2566 * @pi: port information structure
2567 * @node: tree node
2568 * @rl_type: rate limit type min, max, or shared
2569 * @bw: bandwidth in Kbps - Kilo bits per sec
2570 *
2571 * It updates node's BW limit parameters like BW RL profile ID of type CIR,
2572 * EIR, or SRL. The caller needs to hold scheduler lock.
2573 */
2574static enum ice_status
2575ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
2576 enum ice_rl_type rl_type, u32 bw)
2577{
2578 struct ice_sched_node *cfg_node = node;
2579 enum ice_status status;
2580
2581 struct ice_hw *hw;
2582 u8 layer_num;
2583
2584 if (!pi)
2585 return ICE_ERR_PARAM;
2586 hw = pi->hw;
2587 /* Remove unused RL profile IDs from HW and SW DB */
2588 ice_sched_rm_unused_rl_prof(pi);
2589 layer_num = ice_sched_get_rl_prof_layer(pi, rl_type,
2590 node->tx_sched_layer);
2591 if (layer_num >= hw->num_tx_sched_layers)
2592 return ICE_ERR_PARAM;
2593
2594 if (rl_type == ICE_SHARED_BW) {
2595 /* SRL node may be different */
2596 cfg_node = ice_sched_get_srl_node(node, layer_num);
2597 if (!cfg_node)
2598 return ICE_ERR_CFG;
2599 }
2600 /* EIR BW and Shared BW profiles are mutually exclusive and
2601 * hence only one of them may be set for any given element
2602 */
2603 status = ice_sched_set_eir_srl_excl(pi, cfg_node, layer_num, rl_type,
2604 bw);
2605 if (status)
2606 return status;
2607 if (bw == ICE_SCHED_DFLT_BW)
2608 return ice_sched_set_node_bw_dflt(pi, cfg_node, rl_type,
2609 layer_num);
2610 return ice_sched_set_node_bw(pi, cfg_node, rl_type, bw, layer_num);
2611}
2612
2613/**
2614 * ice_sched_set_node_bw_dflt_lmt - set node's BW limit to default
2615 * @pi: port information structure
2616 * @node: pointer to node structure
2617 * @rl_type: rate limit type min, max, or shared
2618 *
2619 * This function configures node element's BW rate limit profile ID of
2620 * type CIR, EIR, or SRL to default. This function needs to be called
2621 * with the scheduler lock held.
2622 */
2623static enum ice_status
2624ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
2625 struct ice_sched_node *node,
2626 enum ice_rl_type rl_type)
2627{
2628 return ice_sched_set_node_bw_lmt(pi, node, rl_type,
2629 ICE_SCHED_DFLT_BW);
2630}
2631
2632/**
2633 * ice_sched_validate_srl_node - Check node for SRL applicability
2634 * @node: sched node to configure
2635 * @sel_layer: selected SRL layer
2636 *
2637 * This function checks if the SRL can be applied to a selected layer node on
2638 * behalf of the requested node (first argument). This function needs to be
2639 * called with scheduler lock held.
2640 */
2641static enum ice_status
2642ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
2643{
2644 /* SRL profiles are not available on all layers. Check if the
2645 * SRL profile can be applied to a node above or below the
2646 * requested node. SRL configuration is possible only if the
2647 * selected layer's node has single child.
2648 */
2649 if (sel_layer == node->tx_sched_layer ||
2650 ((sel_layer == node->tx_sched_layer + 1) &&
2651 node->num_children == 1) ||
2652 ((sel_layer == node->tx_sched_layer - 1) &&
2653 (node->parent && node->parent->num_children == 1)))
2654 return 0;
2655
2656 return ICE_ERR_CFG;
2657}
2658
2659/**
2660 * ice_sched_save_q_bw - save queue node's BW information
2661 * @q_ctx: queue context structure
2662 * @rl_type: rate limit type min, max, or shared
2663 * @bw: bandwidth in Kbps - Kilo bits per sec
2664 *
2665 * Save BW information of queue type node for post replay use.
2666 */
2667static enum ice_status
2668ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
2669{
2670 switch (rl_type) {
2671 case ICE_MIN_BW:
2672 ice_set_clear_cir_bw(&q_ctx->bw_t_info, bw);
2673 break;
2674 case ICE_MAX_BW:
2675 ice_set_clear_eir_bw(&q_ctx->bw_t_info, bw);
2676 break;
2677 case ICE_SHARED_BW:
2678 ice_set_clear_shared_bw(&q_ctx->bw_t_info, bw);
2679 break;
2680 default:
2681 return ICE_ERR_PARAM;
2682 }
2683 return 0;
2684}
2685
2686/**
2687 * ice_sched_set_q_bw_lmt - sets queue BW limit
2688 * @pi: port information structure
2689 * @vsi_handle: sw VSI handle
2690 * @tc: traffic class
2691 * @q_handle: software queue handle
2692 * @rl_type: min, max, or shared
2693 * @bw: bandwidth in Kbps
2694 *
2695 * This function sets BW limit of queue scheduling node.
2696 */
2697static enum ice_status
2698ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2699 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2700{
2701 enum ice_status status = ICE_ERR_PARAM;
2702 struct ice_sched_node *node;
2703 struct ice_q_ctx *q_ctx;
2704
2705 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
2706 return ICE_ERR_PARAM;
2707 mutex_lock(&pi->sched_lock);
2708 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handle);
2709 if (!q_ctx)
2710 goto exit_q_bw_lmt;
2711 node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2712 if (!node) {
2713 ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong q_teid\n");
2714 goto exit_q_bw_lmt;
2715 }
2716
2717 /* Return error if it is not a leaf node */
2718 if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF)
2719 goto exit_q_bw_lmt;
2720
2721 /* SRL bandwidth layer selection */
2722 if (rl_type == ICE_SHARED_BW) {
2723 u8 sel_layer; /* selected layer */
2724
2725 sel_layer = ice_sched_get_rl_prof_layer(pi, rl_type,
2726 node->tx_sched_layer);
2727 if (sel_layer >= pi->hw->num_tx_sched_layers) {
2728 status = ICE_ERR_PARAM;
2729 goto exit_q_bw_lmt;
2730 }
2731 status = ice_sched_validate_srl_node(node, sel_layer);
2732 if (status)
2733 goto exit_q_bw_lmt;
2734 }
2735
2736 if (bw == ICE_SCHED_DFLT_BW)
2737 status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
2738 else
2739 status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
2740
2741 if (!status)
2742 status = ice_sched_save_q_bw(q_ctx, rl_type, bw);
2743
2744exit_q_bw_lmt:
2745 mutex_unlock(&pi->sched_lock);
2746 return status;
2747}
2748
2749/**
2750 * ice_cfg_q_bw_lmt - configure queue BW limit
2751 * @pi: port information structure
2752 * @vsi_handle: sw VSI handle
2753 * @tc: traffic class
2754 * @q_handle: software queue handle
2755 * @rl_type: min, max, or shared
2756 * @bw: bandwidth in Kbps
2757 *
2758 * This function configures BW limit of queue scheduling node.
2759 */
2760enum ice_status
2761ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2762 u16 q_handle, enum ice_rl_type rl_type, u32 bw)
2763{
2764 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2765 bw);
2766}
2767
2768/**
2769 * ice_cfg_q_bw_dflt_lmt - configure queue BW default limit
2770 * @pi: port information structure
2771 * @vsi_handle: sw VSI handle
2772 * @tc: traffic class
2773 * @q_handle: software queue handle
2774 * @rl_type: min, max, or shared
2775 *
2776 * This function configures BW default limit of queue scheduling node.
2777 */
2778enum ice_status
2779ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
2780 u16 q_handle, enum ice_rl_type rl_type)
2781{
2782 return ice_sched_set_q_bw_lmt(pi, vsi_handle, tc, q_handle, rl_type,
2783 ICE_SCHED_DFLT_BW);
2784}
2785
2786/**
2787 * ice_cfg_rl_burst_size - Set burst size value
2788 * @hw: pointer to the HW struct
2789 * @bytes: burst size in bytes
2790 *
2791 * This function configures/set the burst size to requested new value. The new
2792 * burst size value is used for future rate limit calls. It doesn't change the
2793 * existing or previously created RL profiles.
2794 */
2795enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
2796{
2797 u16 burst_size_to_prog;
2798
2799 if (bytes < ICE_MIN_BURST_SIZE_ALLOWED ||
2800 bytes > ICE_MAX_BURST_SIZE_ALLOWED)
2801 return ICE_ERR_PARAM;
2802 if (ice_round_to_num(bytes, 64) <=
2803 ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY) {
2804 /* 64 byte granularity case */
2805 /* Disable MSB granularity bit */
2806 burst_size_to_prog = ICE_64_BYTE_GRANULARITY;
2807 /* round number to nearest 64 byte granularity */
2808 bytes = ice_round_to_num(bytes, 64);
2809 /* The value is in 64 byte chunks */
2810 burst_size_to_prog |= (u16)(bytes / 64);
2811 } else {
2812 /* k bytes granularity case */
2813 /* Enable MSB granularity bit */
2814 burst_size_to_prog = ICE_KBYTE_GRANULARITY;
2815 /* round number to nearest 1024 granularity */
2816 bytes = ice_round_to_num(bytes, 1024);
2817 /* check rounding doesn't go beyond allowed */
2818 if (bytes > ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY)
2819 bytes = ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY;
2820 /* The value is in k bytes */
2821 burst_size_to_prog |= (u16)(bytes / 1024);
2822 }
2823 hw->max_burst_size = burst_size_to_prog;
2824 return 0;
2825}
2826
2827/**
2828 * ice_sched_replay_node_prio - re-configure node priority
2829 * @hw: pointer to the HW struct
2830 * @node: sched node to configure
2831 * @priority: priority value
2832 *
2833 * This function configures node element's priority value. It
2834 * needs to be called with scheduler lock held.
2835 */
2836static enum ice_status
2837ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
2838 u8 priority)
2839{
2840 struct ice_aqc_txsched_elem_data buf;
2841 struct ice_aqc_txsched_elem *data;
2842 enum ice_status status;
2843
2844 buf = node->info;
2845 data = &buf.data;
2846 data->valid_sections |= ICE_AQC_ELEM_VALID_GENERIC;
2847 data->generic = priority;
2848
2849 /* Configure element */
2850 status = ice_sched_update_elem(hw, node, &buf);
2851 return status;
2852}
2853
2854/**
2855 * ice_sched_replay_node_bw - replay node(s) BW
2856 * @hw: pointer to the HW struct
2857 * @node: sched node to configure
2858 * @bw_t_info: BW type information
2859 *
2860 * This function restores node's BW from bw_t_info. The caller needs
2861 * to hold the scheduler lock.
2862 */
2863static enum ice_status
2864ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
2865 struct ice_bw_type_info *bw_t_info)
2866{
2867 struct ice_port_info *pi = hw->port_info;
2868 enum ice_status status = ICE_ERR_PARAM;
2869 u16 bw_alloc;
2870
2871 if (!node)
2872 return status;
2873 if (bitmap_empty(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
2874 return 0;
2875 if (test_bit(ICE_BW_TYPE_PRIO, bw_t_info->bw_t_bitmap)) {
2876 status = ice_sched_replay_node_prio(hw, node,
2877 bw_t_info->generic);
2878 if (status)
2879 return status;
2880 }
2881 if (test_bit(ICE_BW_TYPE_CIR, bw_t_info->bw_t_bitmap)) {
2882 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MIN_BW,
2883 bw_t_info->cir_bw.bw);
2884 if (status)
2885 return status;
2886 }
2887 if (test_bit(ICE_BW_TYPE_CIR_WT, bw_t_info->bw_t_bitmap)) {
2888 bw_alloc = bw_t_info->cir_bw.bw_alloc;
2889 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MIN_BW,
2890 bw_alloc);
2891 if (status)
2892 return status;
2893 }
2894 if (test_bit(ICE_BW_TYPE_EIR, bw_t_info->bw_t_bitmap)) {
2895 status = ice_sched_set_node_bw_lmt(pi, node, ICE_MAX_BW,
2896 bw_t_info->eir_bw.bw);
2897 if (status)
2898 return status;
2899 }
2900 if (test_bit(ICE_BW_TYPE_EIR_WT, bw_t_info->bw_t_bitmap)) {
2901 bw_alloc = bw_t_info->eir_bw.bw_alloc;
2902 status = ice_sched_cfg_node_bw_alloc(hw, node, ICE_MAX_BW,
2903 bw_alloc);
2904 if (status)
2905 return status;
2906 }
2907 if (test_bit(ICE_BW_TYPE_SHARED, bw_t_info->bw_t_bitmap))
2908 status = ice_sched_set_node_bw_lmt(pi, node, ICE_SHARED_BW,
2909 bw_t_info->shared_bw);
2910 return status;
2911}
2912
2913/**
2914 * ice_sched_replay_q_bw - replay queue type node BW
2915 * @pi: port information structure
2916 * @q_ctx: queue context structure
2917 *
2918 * This function replays queue type node bandwidth. This function needs to be
2919 * called with scheduler lock held.
2920 */
2921enum ice_status
2922ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
2923{
2924 struct ice_sched_node *q_node;
2925
2926 /* Following also checks the presence of node in tree */
2927 q_node = ice_sched_find_node_by_teid(pi->root, q_ctx->q_teid);
2928 if (!q_node)
2929 return ICE_ERR_PARAM;
2930 return ice_sched_replay_node_bw(pi->hw, q_node, &q_ctx->bw_t_info);
2931}