1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
4 #define dev_fmt(fmt) "RateLimiting: " fmt
9 #include <linux/dev_printk.h>
10 #include <linux/kernel.h>
11 #include <linux/pci.h>
12 #include <linux/slab.h>
13 #include <linux/units.h>
15 #include "adf_accel_devices.h"
16 #include "adf_common_drv.h"
17 #include "adf_rl_admin.h"
19 #include "adf_sysfs_rl.h"
21 #define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U
22 #define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U
23 #define RL_TOKEN_PCIE_SIZE 64
24 #define RL_TOKEN_ASYM_SIZE 1024
25 #define RL_CSR_SIZE 4U
26 #define RL_CAPABILITY_MASK GENMASK(6, 4)
27 #define RL_CAPABILITY_VALUE 0x70
28 #define RL_VALIDATE_NON_ZERO(input) ((input) == 0)
29 #define ROOT_MASK GENMASK(1, 0)
30 #define CLUSTER_MASK GENMASK(3, 0)
31 #define LEAF_MASK GENMASK(5, 0)
33 static int validate_user_input(struct adf_accel_dev *accel_dev,
34 struct adf_rl_sla_input_data *sla_in,
37 const unsigned long rp_mask = sla_in->rp_mask;
41 if (sla_in->pir < sla_in->cir) {
42 dev_notice(&GET_DEV(accel_dev),
43 "PIR must be >= CIR, setting PIR to CIR\n");
44 sla_in->pir = sla_in->cir;
49 rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE;
50 for_each_set_bit(i, &rp_mask, rp_mask_size) {
51 if (++cnt > RL_RP_CNT_PER_LEAF_MAX) {
52 dev_notice(&GET_DEV(accel_dev),
53 "Too many ring pairs selected for this SLA\n");
58 if (sla_in->srv >= ADF_SVC_NONE) {
59 dev_notice(&GET_DEV(accel_dev),
60 "Wrong service type\n");
64 if (sla_in->type > RL_LEAF) {
65 dev_notice(&GET_DEV(accel_dev),
70 if (sla_in->parent_id < RL_PARENT_DEFAULT_ID ||
71 sla_in->parent_id >= RL_NODES_CNT_MAX) {
72 dev_notice(&GET_DEV(accel_dev),
81 static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id)
85 if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) {
86 dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n");
90 sla = accel_dev->rate_limiting->sla[sla_id];
93 dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n");
97 if (sla->type != RL_LEAF) {
98 dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n");
106 * find_parent() - Find the parent for a new SLA
107 * @rl_data: pointer to ratelimiting data
108 * @sla_in: pointer to user input data for a new SLA
110 * Function returns a pointer to the parent SLA. If the parent ID is provided
111 * as input in the user data, then such ID is validated and the parent SLA
113 * Otherwise, it returns the default parent SLA (root or cluster) for
117 * * Pointer to the parent SLA object
118 * * NULL - when parent cannot be found
120 static struct rl_sla *find_parent(struct adf_rl *rl_data,
121 struct adf_rl_sla_input_data *sla_in)
123 int input_parent_id = sla_in->parent_id;
124 struct rl_sla *root = NULL;
125 struct rl_sla *parent_sla;
128 if (sla_in->type == RL_ROOT)
131 if (input_parent_id > RL_PARENT_DEFAULT_ID) {
132 parent_sla = rl_data->sla[input_parent_id];
134 * SLA can be a parent if it has the same service as the child
135 * and its type is higher in the hierarchy,
136 * for example the parent type of a LEAF must be a CLUSTER.
138 if (parent_sla && parent_sla->srv == sla_in->srv &&
139 parent_sla->type == sla_in->type - 1)
145 /* If input_parent_id is not valid, get root for this service type. */
146 for (i = 0; i < RL_ROOT_MAX; i++) {
147 if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) {
148 root = rl_data->root[i];
157 * If the type of this SLA is cluster, then return the root.
158 * Otherwise, find the default (i.e. first) cluster for this service.
160 if (sla_in->type == RL_CLUSTER)
163 for (i = 0; i < RL_CLUSTER_MAX; i++) {
164 if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root)
165 return rl_data->cluster[i];
171 static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv)
186 * get_sla_arr_of_type() - Returns a pointer to SLA type specific array
187 * @rl_data: pointer to ratelimiting data
189 * @sla_arr: pointer to variable where requested pointer will be stored
191 * Return: Max number of elements allowed for the returned array
193 static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type,
194 struct rl_sla ***sla_arr)
198 *sla_arr = rl_data->leaf;
201 *sla_arr = rl_data->cluster;
202 return RL_CLUSTER_MAX;
204 *sla_arr = rl_data->root;
212 static bool is_service_enabled(struct adf_accel_dev *accel_dev,
213 enum adf_base_services rl_srv)
215 enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv);
216 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
217 u8 rps_per_bundle = hw_data->num_banks_per_vf;
220 for (i = 0; i < rps_per_bundle; i++) {
221 if (GET_SRV_TYPE(accel_dev, i) == arb_srv)
229 * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask
230 * @accel_dev: pointer to acceleration device structure
231 * @sla: SLA object data where result will be written
232 * @rp_mask: bitmask of ring pair IDs
234 * Function tries to convert provided bitmap to an array of IDs. It checks if
235 * RPs aren't in use, are assigned to SLA service or if a number of provided
236 * IDs is not too big. If successful, writes the result into the field
237 * sla->ring_pairs_cnt.
241 * * -EINVAL - ring pairs array cannot be created from provided mask
243 static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
244 const unsigned long rp_mask)
246 enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv);
247 u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf;
248 bool *rp_in_use = accel_dev->rate_limiting->rp_in_use;
249 size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids);
250 u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks;
254 for_each_set_bit(rp_id, &rp_mask, rp_id_max) {
255 if (cnt >= rp_cnt_max) {
256 dev_notice(&GET_DEV(accel_dev),
257 "Assigned more ring pairs than supported");
261 if (rp_in_use[rp_id]) {
262 dev_notice(&GET_DEV(accel_dev),
263 "RP %u already assigned to other SLA", rp_id);
267 if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) {
268 dev_notice(&GET_DEV(accel_dev),
269 "RP %u does not support SLA service", rp_id);
273 sla->ring_pairs_ids[cnt++] = rp_id;
276 sla->ring_pairs_cnt = cnt;
281 static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used)
286 for (i = 0; i < sla->ring_pairs_cnt; i++) {
287 rp_id = sla->ring_pairs_ids[i];
288 rp_in_use[rp_id] = used;
292 static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev,
293 struct rl_sla *sla, bool clear)
295 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
296 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
297 u32 base_offset = hw_data->rl_data.r2l_offset;
298 u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK);
302 for (i = 0; i < sla->ring_pairs_cnt; i++) {
303 offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]);
304 ADF_CSR_WR(pmisc_addr, offset, node_id);
308 static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev,
309 struct rl_sla *sla, bool clear)
311 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
312 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
313 u32 base_offset = hw_data->rl_data.l2c_offset;
314 u32 node_id = sla->node_id & LEAF_MASK;
315 u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK);
318 offset = base_offset + (RL_CSR_SIZE * node_id);
319 ADF_CSR_WR(pmisc_addr, offset, parent_id);
322 static void assign_cluster_to_root(struct adf_accel_dev *accel_dev,
323 struct rl_sla *sla, bool clear)
325 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
326 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
327 u32 base_offset = hw_data->rl_data.c2s_offset;
328 u32 node_id = sla->node_id & CLUSTER_MASK;
329 u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK);
332 offset = base_offset + (RL_CSR_SIZE * node_id);
333 ADF_CSR_WR(pmisc_addr, offset, parent_id);
336 static void assign_node_to_parent(struct adf_accel_dev *accel_dev,
337 struct rl_sla *sla, bool clear_assignment)
341 assign_rps_to_leaf(accel_dev, sla, clear_assignment);
342 assign_leaf_to_cluster(accel_dev, sla, clear_assignment);
345 assign_cluster_to_root(accel_dev, sla, clear_assignment);
353 * can_parent_afford_sla() - Verifies if parent allows to create an SLA
354 * @sla_in: pointer to user input data for a new SLA
355 * @sla_parent: pointer to parent SLA object
356 * @sla_cir: current child CIR value (only for update)
357 * @is_update: request is a update
359 * Algorithm verifies if parent has enough remaining budget to take assignment
360 * of a child with provided parameters. In update case current CIR value must be
361 * returned to budget first.
362 * PIR value cannot exceed the PIR assigned to parent.
365 * * true - SLA can be created
366 * * false - SLA cannot be created
368 static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in,
369 struct rl_sla *sla_parent, u32 sla_cir,
372 u32 rem_cir = sla_parent->rem_cir;
377 if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir)
384 * can_node_afford_update() - Verifies if SLA can be updated with input data
385 * @sla_in: pointer to user input data for a new SLA
386 * @sla: pointer to SLA object selected for update
388 * Algorithm verifies if a new CIR value is big enough to satisfy currently
389 * assigned child SLAs and if PIR can be updated
392 * * true - SLA can be updated
393 * * false - SLA cannot be updated
395 static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in,
398 u32 cir_in_use = sla->cir - sla->rem_cir;
400 /* new CIR cannot be smaller then currently consumed value */
401 if (cir_in_use > sla_in->cir)
404 /* PIR of root/cluster cannot be reduced in node with assigned children */
405 if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0)
411 static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla,
412 struct adf_rl_sla_input_data *sla_in,
415 u32 max_val = rl_data->device_data->scale_ref;
416 struct rl_sla *parent = sla->parent;
419 if (sla_in->cir > max_val || sla_in->pir > max_val)
424 ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
428 ret &= can_parent_afford_sla(sla_in, parent, sla->cir,
432 ret &= can_node_afford_update(sla_in, sla);
437 ret &= can_node_afford_update(sla_in, sla);
448 static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update)
453 sla->parent->rem_cir += old_cir;
455 sla->parent->rem_cir -= sla->cir;
460 sla->parent->rem_cir += old_cir;
461 sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
463 sla->rem_cir = sla->cir;
466 sla->parent->rem_cir -= sla->cir;
470 sla->rem_cir = sla->cir - (old_cir - sla->rem_cir);
472 sla->rem_cir = sla->cir;
480 * get_next_free_sla_id() - finds next free ID in the SLA array
481 * @rl_data: Pointer to ratelimiting data structure
484 * * 0 : RL_NODES_CNT_MAX - correct ID
485 * * -ENOSPC - all SLA slots are in use
487 static int get_next_free_sla_id(struct adf_rl *rl_data)
491 while (i < RL_NODES_CNT_MAX && rl_data->sla[i++])
494 if (i == RL_NODES_CNT_MAX)
501 * get_next_free_node_id() - finds next free ID in the array of that node type
502 * @rl_data: Pointer to ratelimiting data structure
503 * @sla: Pointer to SLA object for which the ID is searched
506 * * 0 : RL_[NODE_TYPE]_MAX - correct ID
507 * * -ENOSPC - all slots of that type are in use
509 static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla)
511 struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev);
512 int max_id, i, step, rp_per_leaf;
513 struct rl_sla **sla_list;
515 rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf;
518 * Static nodes mapping:
519 * root0 - cluster[0,4,8,12] - leaf[0-15]
520 * root1 - cluster[1,5,9,13] - leaf[16-31]
521 * root2 - cluster[2,6,10,14] - leaf[32-47]
525 i = sla->srv * rp_per_leaf;
527 max_id = i + rp_per_leaf;
528 sla_list = rl_data->leaf;
533 max_id = RL_CLUSTER_MAX;
534 sla_list = rl_data->cluster;
542 while (i < max_id && sla_list[i])
551 u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val,
552 enum adf_base_services svc_type)
554 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
555 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
556 u64 avail_slice_cycles, allocated_tokens;
561 avail_slice_cycles = hw_data->clock_frequency;
565 avail_slice_cycles *= device_data->slices.pke_cnt;
568 avail_slice_cycles *= device_data->slices.cph_cnt;
571 avail_slice_cycles *= device_data->slices.dcpr_cnt;
577 do_div(avail_slice_cycles, device_data->scan_interval);
578 allocated_tokens = avail_slice_cycles * sla_val;
579 do_div(allocated_tokens, device_data->scale_ref);
581 return allocated_tokens;
584 u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val,
585 enum adf_base_services svc_type)
587 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
588 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
589 u64 allocated_ae_cycles, avail_ae_cycles;
594 avail_ae_cycles = hw_data->clock_frequency;
595 avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1;
596 do_div(avail_ae_cycles, device_data->scan_interval);
598 sla_val *= device_data->max_tp[svc_type];
599 sla_val /= device_data->scale_ref;
601 allocated_ae_cycles = (sla_val * avail_ae_cycles);
602 do_div(allocated_ae_cycles, device_data->max_tp[svc_type]);
604 return allocated_ae_cycles;
607 u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val,
608 enum adf_base_services svc_type, bool is_bw_out)
610 struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
611 u64 sla_to_bytes, allocated_bw, sla_scaled;
616 sla_to_bytes = sla_val;
617 sla_to_bytes *= device_data->max_tp[svc_type];
618 do_div(sla_to_bytes, device_data->scale_ref);
620 sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE :
622 if (svc_type == ADF_SVC_DC && is_bw_out)
623 sla_to_bytes *= device_data->slices.dcpr_cnt -
624 device_data->dcpr_correction;
626 sla_scaled = sla_to_bytes * device_data->pcie_scale_mul;
627 do_div(sla_scaled, device_data->pcie_scale_div);
628 allocated_bw = sla_scaled;
629 do_div(allocated_bw, RL_TOKEN_PCIE_SIZE);
630 do_div(allocated_bw, device_data->scan_interval);
636 * add_new_sla_entry() - creates a new SLA object and fills it with user data
637 * @accel_dev: pointer to acceleration device structure
638 * @sla_in: pointer to user input data for a new SLA
639 * @sla_out: Pointer to variable that will contain the address of a new
640 * SLA object if the operation succeeds
644 * * -ENOMEM - memory allocation failed
645 * * -EINVAL - invalid user input
646 * * -ENOSPC - all available SLAs are in use
648 static int add_new_sla_entry(struct adf_accel_dev *accel_dev,
649 struct adf_rl_sla_input_data *sla_in,
650 struct rl_sla **sla_out)
652 struct adf_rl *rl_data = accel_dev->rate_limiting;
656 sla = kzalloc(sizeof(*sla), GFP_KERNEL);
663 if (!is_service_enabled(accel_dev, sla_in->srv)) {
664 dev_notice(&GET_DEV(accel_dev),
665 "Provided service is not enabled\n");
670 sla->srv = sla_in->srv;
671 sla->type = sla_in->type;
672 ret = get_next_free_node_id(rl_data, sla);
674 dev_notice(&GET_DEV(accel_dev),
675 "Exceeded number of available nodes for that service\n");
680 ret = get_next_free_sla_id(rl_data);
682 dev_notice(&GET_DEV(accel_dev),
683 "Allocated maximum SLAs number\n");
688 sla->parent = find_parent(rl_data, sla_in);
689 if (!sla->parent && sla->type != RL_ROOT) {
690 if (sla_in->parent_id != RL_PARENT_DEFAULT_ID)
691 dev_notice(&GET_DEV(accel_dev),
692 "Provided parent ID does not exist or cannot be parent for this SLA.");
694 dev_notice(&GET_DEV(accel_dev),
695 "Unable to find parent node for this service. Is service enabled?");
700 if (sla->type == RL_LEAF) {
701 ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask);
702 if (!sla->ring_pairs_cnt || ret) {
703 dev_notice(&GET_DEV(accel_dev),
704 "Unable to find ring pairs to assign to the leaf");
721 static int initialize_default_nodes(struct adf_accel_dev *accel_dev)
723 struct adf_rl *rl_data = accel_dev->rate_limiting;
724 struct adf_rl_hw_data *device_data = rl_data->device_data;
725 struct adf_rl_sla_input_data sla_in = { };
729 /* Init root for each enabled service */
730 sla_in.type = RL_ROOT;
731 sla_in.parent_id = RL_PARENT_DEFAULT_ID;
733 for (i = 0; i < ADF_SVC_NONE; i++) {
734 if (!is_service_enabled(accel_dev, i))
737 sla_in.cir = device_data->scale_ref;
738 sla_in.pir = sla_in.cir;
741 ret = adf_rl_add_sla(accel_dev, &sla_in);
746 /* Init default cluster for each root */
747 sla_in.type = RL_CLUSTER;
748 for (i = 0; i < ADF_SVC_NONE; i++) {
749 if (!rl_data->root[i])
752 sla_in.cir = rl_data->root[i]->cir;
753 sla_in.pir = sla_in.cir;
754 sla_in.srv = rl_data->root[i]->srv;
756 ret = adf_rl_add_sla(accel_dev, &sla_in);
764 static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla)
766 bool *rp_in_use = rl_data->rp_in_use;
767 struct rl_sla **sla_type_arr = NULL;
768 int i, sla_id, node_id;
771 sla_id = sla->sla_id;
772 node_id = sla->node_id;
777 for (i = 0; i < sla->ring_pairs_cnt; i++)
778 rp_in_use[sla->ring_pairs_ids[i]] = false;
780 update_budget(sla, old_cir, true);
781 get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
782 assign_node_to_parent(rl_data->accel_dev, sla, true);
783 adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type);
784 mark_rps_usage(sla, rl_data->rp_in_use, false);
787 rl_data->sla[sla_id] = NULL;
788 sla_type_arr[node_id] = NULL;
792 * add_update_sla() - handles the creation and the update of an SLA
793 * @accel_dev: pointer to acceleration device structure
794 * @sla_in: pointer to user input data for a new/updated SLA
795 * @is_update: flag to indicate if this is an update or an add operation
799 * * -ENOMEM - memory allocation failed
800 * * -EINVAL - user input data cannot be used to create SLA
801 * * -ENOSPC - all available SLAs are in use
803 static int add_update_sla(struct adf_accel_dev *accel_dev,
804 struct adf_rl_sla_input_data *sla_in, bool is_update)
806 struct adf_rl *rl_data = accel_dev->rate_limiting;
807 struct rl_sla **sla_type_arr = NULL;
808 struct rl_sla *sla = NULL;
813 dev_warn(&GET_DEV(accel_dev),
814 "SLA input data pointer is missing\n");
818 mutex_lock(&rl_data->rl_lock);
820 /* Input validation */
821 ret = validate_user_input(accel_dev, sla_in, is_update);
826 ret = validate_sla_id(accel_dev, sla_in->sla_id);
830 sla = rl_data->sla[sla_in->sla_id];
833 ret = add_new_sla_entry(accel_dev, sla_in, &sla);
838 if (!is_enough_budget(rl_data, sla, sla_in, is_update)) {
839 dev_notice(&GET_DEV(accel_dev),
840 "Input value exceeds the remaining budget%s\n",
841 is_update ? " or more budget is already in use" : "");
845 sla->cir = sla_in->cir;
846 sla->pir = sla_in->pir;
849 assign_node_to_parent(accel_dev, sla, false);
850 ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update);
852 dev_notice(&GET_DEV(accel_dev),
853 "Failed to apply an SLA\n");
856 update_budget(sla, old_cir, is_update);
859 mark_rps_usage(sla, rl_data->rp_in_use, true);
860 get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr);
861 sla_type_arr[sla->node_id] = sla;
862 rl_data->sla[sla->sla_id] = sla;
865 sla_in->sla_id = sla->sla_id;
874 mutex_unlock(&rl_data->rl_lock);
879 * adf_rl_add_sla() - handles the creation of an SLA
880 * @accel_dev: pointer to acceleration device structure
881 * @sla_in: pointer to user input data required to add an SLA
885 * * -ENOMEM - memory allocation failed
886 * * -EINVAL - invalid user input
887 * * -ENOSPC - all available SLAs are in use
889 int adf_rl_add_sla(struct adf_accel_dev *accel_dev,
890 struct adf_rl_sla_input_data *sla_in)
892 return add_update_sla(accel_dev, sla_in, false);
896 * adf_rl_update_sla() - handles the update of an SLA
897 * @accel_dev: pointer to acceleration device structure
898 * @sla_in: pointer to user input data required to update an SLA
902 * * -EINVAL - user input data cannot be used to update SLA
904 int adf_rl_update_sla(struct adf_accel_dev *accel_dev,
905 struct adf_rl_sla_input_data *sla_in)
907 return add_update_sla(accel_dev, sla_in, true);
911 * adf_rl_get_sla() - returns an existing SLA data
912 * @accel_dev: pointer to acceleration device structure
913 * @sla_in: pointer to user data where SLA info will be stored
915 * The sla_id for which data are requested should be set in sla_id structure
919 * * -EINVAL - provided sla_id does not exist
921 int adf_rl_get_sla(struct adf_accel_dev *accel_dev,
922 struct adf_rl_sla_input_data *sla_in)
927 ret = validate_sla_id(accel_dev, sla_in->sla_id);
931 sla = accel_dev->rate_limiting->sla[sla_in->sla_id];
932 sla_in->type = sla->type;
933 sla_in->srv = sla->srv;
934 sla_in->cir = sla->cir;
935 sla_in->pir = sla->pir;
936 sla_in->rp_mask = 0U;
938 sla_in->parent_id = sla->parent->sla_id;
940 sla_in->parent_id = RL_PARENT_DEFAULT_ID;
942 for (i = 0; i < sla->ring_pairs_cnt; i++)
943 sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]);
949 * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for
950 * selected service or provided sla_id
951 * @accel_dev: pointer to acceleration device structure
952 * @srv: service ID for which capability is requested
953 * @sla_id: ID of the cluster or root to which we want assign a new SLA
955 * Check if the provided SLA id is valid. If it is and the service matches
956 * the requested service and the type is cluster or root, return the remaining
958 * If the provided ID does not match the service or type, return the remaining
959 * capacity of the default cluster for that service.
962 * * Positive value - correct remaining value
963 * * -EINVAL - algorithm cannot find a remaining value for provided data
965 int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev,
966 enum adf_base_services srv, int sla_id)
968 struct adf_rl *rl_data = accel_dev->rate_limiting;
969 struct rl_sla *sla = NULL;
972 if (srv >= ADF_SVC_NONE)
975 if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) {
976 sla = rl_data->sla[sla_id];
978 if (sla->srv == srv && sla->type <= RL_CLUSTER)
982 for (i = 0; i < RL_CLUSTER_MAX; i++) {
983 if (!rl_data->cluster[i])
986 if (rl_data->cluster[i]->srv == srv) {
987 sla = rl_data->cluster[i];
998 * adf_rl_remove_sla() - removes provided sla_id
999 * @accel_dev: pointer to acceleration device structure
1000 * @sla_id: ID of the cluster or root to which we want assign an new SLA
1004 * * -EINVAL - wrong sla_id or it still have assigned children
1006 int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id)
1008 struct adf_rl *rl_data = accel_dev->rate_limiting;
1012 mutex_lock(&rl_data->rl_lock);
1013 ret = validate_sla_id(accel_dev, sla_id);
1017 sla = rl_data->sla[sla_id];
1019 if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) {
1020 dev_notice(&GET_DEV(accel_dev),
1021 "To remove parent SLA all its children must be removed first");
1026 clear_sla(rl_data, sla);
1029 mutex_unlock(&rl_data->rl_lock);
1034 * adf_rl_remove_sla_all() - removes all SLAs from device
1035 * @accel_dev: pointer to acceleration device structure
1036 * @incl_default: set to true if default SLAs also should be removed
1038 void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default)
1040 struct adf_rl *rl_data = accel_dev->rate_limiting;
1041 int end_type = incl_default ? RL_ROOT : RL_LEAF;
1042 struct rl_sla **sla_type_arr = NULL;
1046 mutex_lock(&rl_data->rl_lock);
1048 /* Unregister and remove all SLAs */
1049 for (j = RL_LEAF; j >= end_type; j--) {
1050 max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr);
1052 for (i = 0; i < max_id; i++) {
1053 if (!sla_type_arr[i])
1056 clear_sla(rl_data, sla_type_arr[i]);
1060 mutex_unlock(&rl_data->rl_lock);
1063 int adf_rl_init(struct adf_accel_dev *accel_dev)
1065 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
1066 struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data;
1070 /* Validate device parameters */
1071 if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) ||
1072 RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) ||
1073 RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) ||
1074 RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) ||
1075 RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) ||
1076 RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) ||
1077 RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) {
1082 rl = kzalloc(sizeof(*rl), GFP_KERNEL);
1088 mutex_init(&rl->rl_lock);
1089 rl->device_data = &accel_dev->hw_device->rl_data;
1090 rl->accel_dev = accel_dev;
1091 accel_dev->rate_limiting = rl;
1097 int adf_rl_start(struct adf_accel_dev *accel_dev)
1099 struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data;
1100 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
1101 u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities;
1104 if (!accel_dev->rate_limiting) {
1109 if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) {
1110 dev_info(&GET_DEV(accel_dev), "not supported\n");
1115 ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset,
1116 RL_TOKEN_GRANULARITY_PCIEIN_BUCKET);
1117 ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset,
1118 RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET);
1120 ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices);
1122 dev_err(&GET_DEV(accel_dev), "initialization failed\n");
1126 ret = initialize_default_nodes(accel_dev);
1128 dev_err(&GET_DEV(accel_dev),
1129 "failed to initialize default SLAs\n");
1133 ret = adf_sysfs_rl_add(accel_dev);
1135 dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n");
1142 adf_sysfs_rl_rm(accel_dev);
1144 adf_rl_remove_sla_all(accel_dev, true);
1146 kfree(accel_dev->rate_limiting);
1147 accel_dev->rate_limiting = NULL;
1152 void adf_rl_stop(struct adf_accel_dev *accel_dev)
1154 if (!accel_dev->rate_limiting)
1157 adf_sysfs_rl_rm(accel_dev);
1158 adf_rl_remove_sla_all(accel_dev, true);
1161 void adf_rl_exit(struct adf_accel_dev *accel_dev)
1163 if (!accel_dev->rate_limiting)
1166 kfree(accel_dev->rate_limiting);
1167 accel_dev->rate_limiting = NULL;