Merge tag 'tegra-for-6.9-firmware' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / firmware / arm_scmi / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * System Control and Management Interface (SCMI) Clock Protocol
4  *
5  * Copyright (C) 2018-2022 ARM Ltd.
6  */
7
8 #include <linux/module.h>
9 #include <linux/limits.h>
10 #include <linux/sort.h>
11
12 #include "protocols.h"
13 #include "notify.h"
14
15 /* Updated only after ALL the mandatory features for that version are merged */
16 #define SCMI_PROTOCOL_SUPPORTED_VERSION         0x30000
17
18 enum scmi_clock_protocol_cmd {
19         CLOCK_ATTRIBUTES = 0x3,
20         CLOCK_DESCRIBE_RATES = 0x4,
21         CLOCK_RATE_SET = 0x5,
22         CLOCK_RATE_GET = 0x6,
23         CLOCK_CONFIG_SET = 0x7,
24         CLOCK_NAME_GET = 0x8,
25         CLOCK_RATE_NOTIFY = 0x9,
26         CLOCK_RATE_CHANGE_REQUESTED_NOTIFY = 0xA,
27         CLOCK_CONFIG_GET = 0xB,
28         CLOCK_POSSIBLE_PARENTS_GET = 0xC,
29         CLOCK_PARENT_SET = 0xD,
30         CLOCK_PARENT_GET = 0xE,
31         CLOCK_GET_PERMISSIONS = 0xF,
32 };
33
34 #define CLOCK_STATE_CONTROL_ALLOWED     BIT(31)
35 #define CLOCK_PARENT_CONTROL_ALLOWED    BIT(30)
36 #define CLOCK_RATE_CONTROL_ALLOWED      BIT(29)
37
38 enum clk_state {
39         CLK_STATE_DISABLE,
40         CLK_STATE_ENABLE,
41         CLK_STATE_RESERVED,
42         CLK_STATE_UNCHANGED,
43 };
44
45 struct scmi_msg_resp_clock_protocol_attributes {
46         __le16 num_clocks;
47         u8 max_async_req;
48         u8 reserved;
49 };
50
51 struct scmi_msg_resp_clock_attributes {
52         __le32 attributes;
53 #define SUPPORTS_RATE_CHANGED_NOTIF(x)          ((x) & BIT(31))
54 #define SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(x) ((x) & BIT(30))
55 #define SUPPORTS_EXTENDED_NAMES(x)              ((x) & BIT(29))
56 #define SUPPORTS_PARENT_CLOCK(x)                ((x) & BIT(28))
57 #define SUPPORTS_EXTENDED_CONFIG(x)             ((x) & BIT(27))
58 #define SUPPORTS_GET_PERMISSIONS(x)             ((x) & BIT(1))
59         u8 name[SCMI_SHORT_NAME_MAX_SIZE];
60         __le32 clock_enable_latency;
61 };
62
63 struct scmi_msg_clock_possible_parents {
64         __le32 id;
65         __le32 skip_parents;
66 };
67
68 struct scmi_msg_resp_clock_possible_parents {
69         __le32 num_parent_flags;
70 #define NUM_PARENTS_RETURNED(x)         ((x) & 0xff)
71 #define NUM_PARENTS_REMAINING(x)        ((x) >> 24)
72         __le32 possible_parents[];
73 };
74
75 struct scmi_msg_clock_set_parent {
76         __le32 id;
77         __le32 parent_id;
78 };
79
80 struct scmi_msg_clock_config_set {
81         __le32 id;
82         __le32 attributes;
83 };
84
85 /* Valid only from SCMI clock v2.1 */
86 struct scmi_msg_clock_config_set_v2 {
87         __le32 id;
88         __le32 attributes;
89 #define NULL_OEM_TYPE                   0
90 #define REGMASK_OEM_TYPE_SET            GENMASK(23, 16)
91 #define REGMASK_CLK_STATE               GENMASK(1, 0)
92         __le32 oem_config_val;
93 };
94
95 struct scmi_msg_clock_config_get {
96         __le32 id;
97         __le32 flags;
98 #define REGMASK_OEM_TYPE_GET            GENMASK(7, 0)
99 };
100
101 struct scmi_msg_resp_clock_config_get {
102         __le32 attributes;
103         __le32 config;
104 #define IS_CLK_ENABLED(x)               le32_get_bits((x), BIT(0))
105         __le32 oem_config_val;
106 };
107
108 struct scmi_msg_clock_describe_rates {
109         __le32 id;
110         __le32 rate_index;
111 };
112
113 struct scmi_msg_resp_clock_describe_rates {
114         __le32 num_rates_flags;
115 #define NUM_RETURNED(x)         ((x) & 0xfff)
116 #define RATE_DISCRETE(x)        !((x) & BIT(12))
117 #define NUM_REMAINING(x)        ((x) >> 16)
118         struct {
119                 __le32 value_low;
120                 __le32 value_high;
121         } rate[];
122 #define RATE_TO_U64(X)          \
123 ({                              \
124         typeof(X) x = (X);      \
125         le32_to_cpu((x).value_low) | (u64)le32_to_cpu((x).value_high) << 32; \
126 })
127 };
128
129 struct scmi_clock_set_rate {
130         __le32 flags;
131 #define CLOCK_SET_ASYNC         BIT(0)
132 #define CLOCK_SET_IGNORE_RESP   BIT(1)
133 #define CLOCK_SET_ROUND_UP      BIT(2)
134 #define CLOCK_SET_ROUND_AUTO    BIT(3)
135         __le32 id;
136         __le32 value_low;
137         __le32 value_high;
138 };
139
140 struct scmi_msg_resp_set_rate_complete {
141         __le32 id;
142         __le32 rate_low;
143         __le32 rate_high;
144 };
145
146 struct scmi_msg_clock_rate_notify {
147         __le32 clk_id;
148         __le32 notify_enable;
149 };
150
151 struct scmi_clock_rate_notify_payld {
152         __le32 agent_id;
153         __le32 clock_id;
154         __le32 rate_low;
155         __le32 rate_high;
156 };
157
158 struct clock_info {
159         u32 version;
160         int num_clocks;
161         int max_async_req;
162         bool notify_rate_changed_cmd;
163         bool notify_rate_change_requested_cmd;
164         atomic_t cur_async_req;
165         struct scmi_clock_info *clk;
166         int (*clock_config_set)(const struct scmi_protocol_handle *ph,
167                                 u32 clk_id, enum clk_state state,
168                                 enum scmi_clock_oem_config oem_type,
169                                 u32 oem_val, bool atomic);
170         int (*clock_config_get)(const struct scmi_protocol_handle *ph,
171                                 u32 clk_id, enum scmi_clock_oem_config oem_type,
172                                 u32 *attributes, bool *enabled, u32 *oem_val,
173                                 bool atomic);
174 };
175
176 static enum scmi_clock_protocol_cmd evt_2_cmd[] = {
177         CLOCK_RATE_NOTIFY,
178         CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
179 };
180
181 static inline struct scmi_clock_info *
182 scmi_clock_domain_lookup(struct clock_info *ci, u32 clk_id)
183 {
184         if (clk_id >= ci->num_clocks)
185                 return ERR_PTR(-EINVAL);
186
187         return ci->clk + clk_id;
188 }
189
190 static int
191 scmi_clock_protocol_attributes_get(const struct scmi_protocol_handle *ph,
192                                    struct clock_info *ci)
193 {
194         int ret;
195         struct scmi_xfer *t;
196         struct scmi_msg_resp_clock_protocol_attributes *attr;
197
198         ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES,
199                                       0, sizeof(*attr), &t);
200         if (ret)
201                 return ret;
202
203         attr = t->rx.buf;
204
205         ret = ph->xops->do_xfer(ph, t);
206         if (!ret) {
207                 ci->num_clocks = le16_to_cpu(attr->num_clocks);
208                 ci->max_async_req = attr->max_async_req;
209         }
210
211         ph->xops->xfer_put(ph, t);
212
213         if (!ret) {
214                 if (!ph->hops->protocol_msg_check(ph, CLOCK_RATE_NOTIFY, NULL))
215                         ci->notify_rate_changed_cmd = true;
216
217                 if (!ph->hops->protocol_msg_check(ph,
218                                                   CLOCK_RATE_CHANGE_REQUESTED_NOTIFY,
219                                                   NULL))
220                         ci->notify_rate_change_requested_cmd = true;
221         }
222
223         return ret;
224 }
225
226 struct scmi_clk_ipriv {
227         struct device *dev;
228         u32 clk_id;
229         struct scmi_clock_info *clk;
230 };
231
232 static void iter_clk_possible_parents_prepare_message(void *message, unsigned int desc_index,
233                                                       const void *priv)
234 {
235         struct scmi_msg_clock_possible_parents *msg = message;
236         const struct scmi_clk_ipriv *p = priv;
237
238         msg->id = cpu_to_le32(p->clk_id);
239         /* Set the number of OPPs to be skipped/already read */
240         msg->skip_parents = cpu_to_le32(desc_index);
241 }
242
243 static int iter_clk_possible_parents_update_state(struct scmi_iterator_state *st,
244                                                   const void *response, void *priv)
245 {
246         const struct scmi_msg_resp_clock_possible_parents *r = response;
247         struct scmi_clk_ipriv *p = priv;
248         struct device *dev = ((struct scmi_clk_ipriv *)p)->dev;
249         u32 flags;
250
251         flags = le32_to_cpu(r->num_parent_flags);
252         st->num_returned = NUM_PARENTS_RETURNED(flags);
253         st->num_remaining = NUM_PARENTS_REMAINING(flags);
254
255         /*
256          * num parents is not declared previously anywhere so we
257          * assume it's returned+remaining on first call.
258          */
259         if (!st->max_resources) {
260                 p->clk->num_parents = st->num_returned + st->num_remaining;
261                 p->clk->parents = devm_kcalloc(dev, p->clk->num_parents,
262                                                sizeof(*p->clk->parents),
263                                                GFP_KERNEL);
264                 if (!p->clk->parents) {
265                         p->clk->num_parents = 0;
266                         return -ENOMEM;
267                 }
268                 st->max_resources = st->num_returned + st->num_remaining;
269         }
270
271         return 0;
272 }
273
274 static int iter_clk_possible_parents_process_response(const struct scmi_protocol_handle *ph,
275                                                       const void *response,
276                                                       struct scmi_iterator_state *st,
277                                                       void *priv)
278 {
279         const struct scmi_msg_resp_clock_possible_parents *r = response;
280         struct scmi_clk_ipriv *p = priv;
281
282         u32 *parent = &p->clk->parents[st->desc_index + st->loop_idx];
283
284         *parent = le32_to_cpu(r->possible_parents[st->loop_idx]);
285
286         return 0;
287 }
288
289 static int scmi_clock_possible_parents(const struct scmi_protocol_handle *ph, u32 clk_id,
290                                        struct scmi_clock_info *clk)
291 {
292         struct scmi_iterator_ops ops = {
293                 .prepare_message = iter_clk_possible_parents_prepare_message,
294                 .update_state = iter_clk_possible_parents_update_state,
295                 .process_response = iter_clk_possible_parents_process_response,
296         };
297
298         struct scmi_clk_ipriv ppriv = {
299                 .clk_id = clk_id,
300                 .clk = clk,
301                 .dev = ph->dev,
302         };
303         void *iter;
304         int ret;
305
306         iter = ph->hops->iter_response_init(ph, &ops, 0,
307                                             CLOCK_POSSIBLE_PARENTS_GET,
308                                             sizeof(struct scmi_msg_clock_possible_parents),
309                                             &ppriv);
310         if (IS_ERR(iter))
311                 return PTR_ERR(iter);
312
313         ret = ph->hops->iter_response_run(iter);
314
315         return ret;
316 }
317
318 static int
319 scmi_clock_get_permissions(const struct scmi_protocol_handle *ph, u32 clk_id,
320                            struct scmi_clock_info *clk)
321 {
322         struct scmi_xfer *t;
323         u32 perm;
324         int ret;
325
326         ret = ph->xops->xfer_get_init(ph, CLOCK_GET_PERMISSIONS,
327                                       sizeof(clk_id), sizeof(perm), &t);
328         if (ret)
329                 return ret;
330
331         put_unaligned_le32(clk_id, t->tx.buf);
332
333         ret = ph->xops->do_xfer(ph, t);
334         if (!ret) {
335                 perm = get_unaligned_le32(t->rx.buf);
336
337                 clk->state_ctrl_forbidden = !(perm & CLOCK_STATE_CONTROL_ALLOWED);
338                 clk->rate_ctrl_forbidden = !(perm & CLOCK_RATE_CONTROL_ALLOWED);
339                 clk->parent_ctrl_forbidden = !(perm & CLOCK_PARENT_CONTROL_ALLOWED);
340         }
341
342         ph->xops->xfer_put(ph, t);
343
344         return ret;
345 }
346
347 static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph,
348                                      u32 clk_id, struct clock_info *cinfo,
349                                      u32 version)
350 {
351         int ret;
352         u32 attributes;
353         struct scmi_xfer *t;
354         struct scmi_msg_resp_clock_attributes *attr;
355         struct scmi_clock_info *clk = cinfo->clk + clk_id;
356
357         ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
358                                       sizeof(clk_id), sizeof(*attr), &t);
359         if (ret)
360                 return ret;
361
362         put_unaligned_le32(clk_id, t->tx.buf);
363         attr = t->rx.buf;
364
365         ret = ph->xops->do_xfer(ph, t);
366         if (!ret) {
367                 u32 latency = 0;
368                 attributes = le32_to_cpu(attr->attributes);
369                 strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE);
370                 /* clock_enable_latency field is present only since SCMI v3.1 */
371                 if (PROTOCOL_REV_MAJOR(version) >= 0x2)
372                         latency = le32_to_cpu(attr->clock_enable_latency);
373                 clk->enable_latency = latency ? : U32_MAX;
374         }
375
376         ph->xops->xfer_put(ph, t);
377
378         /*
379          * If supported overwrite short name with the extended one;
380          * on error just carry on and use already provided short name.
381          */
382         if (!ret && PROTOCOL_REV_MAJOR(version) >= 0x2) {
383                 if (SUPPORTS_EXTENDED_NAMES(attributes))
384                         ph->hops->extended_name_get(ph, CLOCK_NAME_GET, clk_id,
385                                                     NULL, clk->name,
386                                                     SCMI_MAX_STR_SIZE);
387
388                 if (cinfo->notify_rate_changed_cmd &&
389                     SUPPORTS_RATE_CHANGED_NOTIF(attributes))
390                         clk->rate_changed_notifications = true;
391                 if (cinfo->notify_rate_change_requested_cmd &&
392                     SUPPORTS_RATE_CHANGE_REQUESTED_NOTIF(attributes))
393                         clk->rate_change_requested_notifications = true;
394                 if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
395                         if (SUPPORTS_PARENT_CLOCK(attributes))
396                                 scmi_clock_possible_parents(ph, clk_id, clk);
397                         if (SUPPORTS_GET_PERMISSIONS(attributes))
398                                 scmi_clock_get_permissions(ph, clk_id, clk);
399                         if (SUPPORTS_EXTENDED_CONFIG(attributes))
400                                 clk->extended_config = true;
401                 }
402         }
403
404         return ret;
405 }
406
407 static int rate_cmp_func(const void *_r1, const void *_r2)
408 {
409         const u64 *r1 = _r1, *r2 = _r2;
410
411         if (*r1 < *r2)
412                 return -1;
413         else if (*r1 == *r2)
414                 return 0;
415         else
416                 return 1;
417 }
418
419 static void iter_clk_describe_prepare_message(void *message,
420                                               const unsigned int desc_index,
421                                               const void *priv)
422 {
423         struct scmi_msg_clock_describe_rates *msg = message;
424         const struct scmi_clk_ipriv *p = priv;
425
426         msg->id = cpu_to_le32(p->clk_id);
427         /* Set the number of rates to be skipped/already read */
428         msg->rate_index = cpu_to_le32(desc_index);
429 }
430
431 static int
432 iter_clk_describe_update_state(struct scmi_iterator_state *st,
433                                const void *response, void *priv)
434 {
435         u32 flags;
436         struct scmi_clk_ipriv *p = priv;
437         const struct scmi_msg_resp_clock_describe_rates *r = response;
438
439         flags = le32_to_cpu(r->num_rates_flags);
440         st->num_remaining = NUM_REMAINING(flags);
441         st->num_returned = NUM_RETURNED(flags);
442         p->clk->rate_discrete = RATE_DISCRETE(flags);
443
444         /* Warn about out of spec replies ... */
445         if (!p->clk->rate_discrete &&
446             (st->num_returned != 3 || st->num_remaining != 0)) {
447                 dev_warn(p->dev,
448                          "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n",
449                          p->clk->name, st->num_returned, st->num_remaining,
450                          st->rx_len);
451
452                 /*
453                  * A known quirk: a triplet is returned but num_returned != 3
454                  * Check for a safe payload size and fix.
455                  */
456                 if (st->num_returned != 3 && st->num_remaining == 0 &&
457                     st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) {
458                         st->num_returned = 3;
459                         st->num_remaining = 0;
460                 } else {
461                         dev_err(p->dev,
462                                 "Cannot fix out-of-spec reply !\n");
463                         return -EPROTO;
464                 }
465         }
466
467         return 0;
468 }
469
470 static int
471 iter_clk_describe_process_response(const struct scmi_protocol_handle *ph,
472                                    const void *response,
473                                    struct scmi_iterator_state *st, void *priv)
474 {
475         int ret = 0;
476         struct scmi_clk_ipriv *p = priv;
477         const struct scmi_msg_resp_clock_describe_rates *r = response;
478
479         if (!p->clk->rate_discrete) {
480                 switch (st->desc_index + st->loop_idx) {
481                 case 0:
482                         p->clk->range.min_rate = RATE_TO_U64(r->rate[0]);
483                         break;
484                 case 1:
485                         p->clk->range.max_rate = RATE_TO_U64(r->rate[1]);
486                         break;
487                 case 2:
488                         p->clk->range.step_size = RATE_TO_U64(r->rate[2]);
489                         break;
490                 default:
491                         ret = -EINVAL;
492                         break;
493                 }
494         } else {
495                 u64 *rate = &p->clk->list.rates[st->desc_index + st->loop_idx];
496
497                 *rate = RATE_TO_U64(r->rate[st->loop_idx]);
498                 p->clk->list.num_rates++;
499         }
500
501         return ret;
502 }
503
504 static int
505 scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id,
506                               struct scmi_clock_info *clk)
507 {
508         int ret;
509         void *iter;
510         struct scmi_iterator_ops ops = {
511                 .prepare_message = iter_clk_describe_prepare_message,
512                 .update_state = iter_clk_describe_update_state,
513                 .process_response = iter_clk_describe_process_response,
514         };
515         struct scmi_clk_ipriv cpriv = {
516                 .clk_id = clk_id,
517                 .clk = clk,
518                 .dev = ph->dev,
519         };
520
521         iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES,
522                                             CLOCK_DESCRIBE_RATES,
523                                             sizeof(struct scmi_msg_clock_describe_rates),
524                                             &cpriv);
525         if (IS_ERR(iter))
526                 return PTR_ERR(iter);
527
528         ret = ph->hops->iter_response_run(iter);
529         if (ret)
530                 return ret;
531
532         if (!clk->rate_discrete) {
533                 dev_dbg(ph->dev, "Min %llu Max %llu Step %llu Hz\n",
534                         clk->range.min_rate, clk->range.max_rate,
535                         clk->range.step_size);
536         } else if (clk->list.num_rates) {
537                 sort(clk->list.rates, clk->list.num_rates,
538                      sizeof(clk->list.rates[0]), rate_cmp_func, NULL);
539         }
540
541         return ret;
542 }
543
544 static int
545 scmi_clock_rate_get(const struct scmi_protocol_handle *ph,
546                     u32 clk_id, u64 *value)
547 {
548         int ret;
549         struct scmi_xfer *t;
550
551         ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_GET,
552                                       sizeof(__le32), sizeof(u64), &t);
553         if (ret)
554                 return ret;
555
556         put_unaligned_le32(clk_id, t->tx.buf);
557
558         ret = ph->xops->do_xfer(ph, t);
559         if (!ret)
560                 *value = get_unaligned_le64(t->rx.buf);
561
562         ph->xops->xfer_put(ph, t);
563         return ret;
564 }
565
566 static int scmi_clock_rate_set(const struct scmi_protocol_handle *ph,
567                                u32 clk_id, u64 rate)
568 {
569         int ret;
570         u32 flags = 0;
571         struct scmi_xfer *t;
572         struct scmi_clock_set_rate *cfg;
573         struct clock_info *ci = ph->get_priv(ph);
574         struct scmi_clock_info *clk;
575
576         clk = scmi_clock_domain_lookup(ci, clk_id);
577         if (IS_ERR(clk))
578                 return PTR_ERR(clk);
579
580         if (clk->rate_ctrl_forbidden)
581                 return -EACCES;
582
583         ret = ph->xops->xfer_get_init(ph, CLOCK_RATE_SET, sizeof(*cfg), 0, &t);
584         if (ret)
585                 return ret;
586
587         if (ci->max_async_req &&
588             atomic_inc_return(&ci->cur_async_req) < ci->max_async_req)
589                 flags |= CLOCK_SET_ASYNC;
590
591         cfg = t->tx.buf;
592         cfg->flags = cpu_to_le32(flags);
593         cfg->id = cpu_to_le32(clk_id);
594         cfg->value_low = cpu_to_le32(rate & 0xffffffff);
595         cfg->value_high = cpu_to_le32(rate >> 32);
596
597         if (flags & CLOCK_SET_ASYNC) {
598                 ret = ph->xops->do_xfer_with_response(ph, t);
599                 if (!ret) {
600                         struct scmi_msg_resp_set_rate_complete *resp;
601
602                         resp = t->rx.buf;
603                         if (le32_to_cpu(resp->id) == clk_id)
604                                 dev_dbg(ph->dev,
605                                         "Clk ID %d set async to %llu\n", clk_id,
606                                         get_unaligned_le64(&resp->rate_low));
607                         else
608                                 ret = -EPROTO;
609                 }
610         } else {
611                 ret = ph->xops->do_xfer(ph, t);
612         }
613
614         if (ci->max_async_req)
615                 atomic_dec(&ci->cur_async_req);
616
617         ph->xops->xfer_put(ph, t);
618         return ret;
619 }
620
621 static int
622 scmi_clock_config_set(const struct scmi_protocol_handle *ph, u32 clk_id,
623                       enum clk_state state,
624                       enum scmi_clock_oem_config __unused0, u32 __unused1,
625                       bool atomic)
626 {
627         int ret;
628         struct scmi_xfer *t;
629         struct scmi_msg_clock_config_set *cfg;
630
631         if (state >= CLK_STATE_RESERVED)
632                 return -EINVAL;
633
634         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
635                                       sizeof(*cfg), 0, &t);
636         if (ret)
637                 return ret;
638
639         t->hdr.poll_completion = atomic;
640
641         cfg = t->tx.buf;
642         cfg->id = cpu_to_le32(clk_id);
643         cfg->attributes = cpu_to_le32(state);
644
645         ret = ph->xops->do_xfer(ph, t);
646
647         ph->xops->xfer_put(ph, t);
648         return ret;
649 }
650
651 static int
652 scmi_clock_set_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
653                       u32 parent_id)
654 {
655         int ret;
656         struct scmi_xfer *t;
657         struct scmi_msg_clock_set_parent *cfg;
658         struct clock_info *ci = ph->get_priv(ph);
659         struct scmi_clock_info *clk;
660
661         clk = scmi_clock_domain_lookup(ci, clk_id);
662         if (IS_ERR(clk))
663                 return PTR_ERR(clk);
664
665         if (parent_id >= clk->num_parents)
666                 return -EINVAL;
667
668         if (clk->parent_ctrl_forbidden)
669                 return -EACCES;
670
671         ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_SET,
672                                       sizeof(*cfg), 0, &t);
673         if (ret)
674                 return ret;
675
676         t->hdr.poll_completion = false;
677
678         cfg = t->tx.buf;
679         cfg->id = cpu_to_le32(clk_id);
680         cfg->parent_id = cpu_to_le32(clk->parents[parent_id]);
681
682         ret = ph->xops->do_xfer(ph, t);
683
684         ph->xops->xfer_put(ph, t);
685
686         return ret;
687 }
688
689 static int
690 scmi_clock_get_parent(const struct scmi_protocol_handle *ph, u32 clk_id,
691                       u32 *parent_id)
692 {
693         int ret;
694         struct scmi_xfer *t;
695
696         ret = ph->xops->xfer_get_init(ph, CLOCK_PARENT_GET,
697                                       sizeof(__le32), sizeof(u32), &t);
698         if (ret)
699                 return ret;
700
701         put_unaligned_le32(clk_id, t->tx.buf);
702
703         ret = ph->xops->do_xfer(ph, t);
704         if (!ret)
705                 *parent_id = get_unaligned_le32(t->rx.buf);
706
707         ph->xops->xfer_put(ph, t);
708         return ret;
709 }
710
711 /* For SCMI clock v3.0 and onwards */
712 static int
713 scmi_clock_config_set_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
714                          enum clk_state state,
715                          enum scmi_clock_oem_config oem_type, u32 oem_val,
716                          bool atomic)
717 {
718         int ret;
719         u32 attrs;
720         struct scmi_xfer *t;
721         struct scmi_msg_clock_config_set_v2 *cfg;
722
723         if (state == CLK_STATE_RESERVED ||
724             (!oem_type && state == CLK_STATE_UNCHANGED))
725                 return -EINVAL;
726
727         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_SET,
728                                       sizeof(*cfg), 0, &t);
729         if (ret)
730                 return ret;
731
732         t->hdr.poll_completion = atomic;
733
734         attrs = FIELD_PREP(REGMASK_OEM_TYPE_SET, oem_type) |
735                  FIELD_PREP(REGMASK_CLK_STATE, state);
736
737         cfg = t->tx.buf;
738         cfg->id = cpu_to_le32(clk_id);
739         cfg->attributes = cpu_to_le32(attrs);
740         /* Clear in any case */
741         cfg->oem_config_val = cpu_to_le32(0);
742         if (oem_type)
743                 cfg->oem_config_val = cpu_to_le32(oem_val);
744
745         ret = ph->xops->do_xfer(ph, t);
746
747         ph->xops->xfer_put(ph, t);
748         return ret;
749 }
750
751 static int scmi_clock_enable(const struct scmi_protocol_handle *ph, u32 clk_id,
752                              bool atomic)
753 {
754         struct clock_info *ci = ph->get_priv(ph);
755         struct scmi_clock_info *clk;
756
757         clk = scmi_clock_domain_lookup(ci, clk_id);
758         if (IS_ERR(clk))
759                 return PTR_ERR(clk);
760
761         if (clk->state_ctrl_forbidden)
762                 return -EACCES;
763
764         return ci->clock_config_set(ph, clk_id, CLK_STATE_ENABLE,
765                                     NULL_OEM_TYPE, 0, atomic);
766 }
767
768 static int scmi_clock_disable(const struct scmi_protocol_handle *ph, u32 clk_id,
769                               bool atomic)
770 {
771         struct clock_info *ci = ph->get_priv(ph);
772         struct scmi_clock_info *clk;
773
774         clk = scmi_clock_domain_lookup(ci, clk_id);
775         if (IS_ERR(clk))
776                 return PTR_ERR(clk);
777
778         if (clk->state_ctrl_forbidden)
779                 return -EACCES;
780
781         return ci->clock_config_set(ph, clk_id, CLK_STATE_DISABLE,
782                                     NULL_OEM_TYPE, 0, atomic);
783 }
784
785 /* For SCMI clock v3.0 and onwards */
786 static int
787 scmi_clock_config_get_v2(const struct scmi_protocol_handle *ph, u32 clk_id,
788                          enum scmi_clock_oem_config oem_type, u32 *attributes,
789                          bool *enabled, u32 *oem_val, bool atomic)
790 {
791         int ret;
792         u32 flags;
793         struct scmi_xfer *t;
794         struct scmi_msg_clock_config_get *cfg;
795
796         ret = ph->xops->xfer_get_init(ph, CLOCK_CONFIG_GET,
797                                       sizeof(*cfg), 0, &t);
798         if (ret)
799                 return ret;
800
801         t->hdr.poll_completion = atomic;
802
803         flags = FIELD_PREP(REGMASK_OEM_TYPE_GET, oem_type);
804
805         cfg = t->tx.buf;
806         cfg->id = cpu_to_le32(clk_id);
807         cfg->flags = cpu_to_le32(flags);
808
809         ret = ph->xops->do_xfer(ph, t);
810         if (!ret) {
811                 struct scmi_msg_resp_clock_config_get *resp = t->rx.buf;
812
813                 if (attributes)
814                         *attributes = le32_to_cpu(resp->attributes);
815
816                 if (enabled)
817                         *enabled = IS_CLK_ENABLED(resp->config);
818
819                 if (oem_val && oem_type)
820                         *oem_val = le32_to_cpu(resp->oem_config_val);
821         }
822
823         ph->xops->xfer_put(ph, t);
824
825         return ret;
826 }
827
828 static int
829 scmi_clock_config_get(const struct scmi_protocol_handle *ph, u32 clk_id,
830                       enum scmi_clock_oem_config oem_type, u32 *attributes,
831                       bool *enabled, u32 *oem_val, bool atomic)
832 {
833         int ret;
834         struct scmi_xfer *t;
835         struct scmi_msg_resp_clock_attributes *resp;
836
837         if (!enabled)
838                 return -EINVAL;
839
840         ret = ph->xops->xfer_get_init(ph, CLOCK_ATTRIBUTES,
841                                       sizeof(clk_id), sizeof(*resp), &t);
842         if (ret)
843                 return ret;
844
845         t->hdr.poll_completion = atomic;
846         put_unaligned_le32(clk_id, t->tx.buf);
847         resp = t->rx.buf;
848
849         ret = ph->xops->do_xfer(ph, t);
850         if (!ret)
851                 *enabled = IS_CLK_ENABLED(resp->attributes);
852
853         ph->xops->xfer_put(ph, t);
854
855         return ret;
856 }
857
858 static int scmi_clock_state_get(const struct scmi_protocol_handle *ph,
859                                 u32 clk_id, bool *enabled, bool atomic)
860 {
861         struct clock_info *ci = ph->get_priv(ph);
862
863         return ci->clock_config_get(ph, clk_id, NULL_OEM_TYPE, NULL,
864                                     enabled, NULL, atomic);
865 }
866
867 static int scmi_clock_config_oem_set(const struct scmi_protocol_handle *ph,
868                                      u32 clk_id,
869                                      enum scmi_clock_oem_config oem_type,
870                                      u32 oem_val, bool atomic)
871 {
872         struct clock_info *ci = ph->get_priv(ph);
873         struct scmi_clock_info *clk;
874
875         clk = scmi_clock_domain_lookup(ci, clk_id);
876         if (IS_ERR(clk))
877                 return PTR_ERR(clk);
878
879         if (!clk->extended_config)
880                 return -EOPNOTSUPP;
881
882         return ci->clock_config_set(ph, clk_id, CLK_STATE_UNCHANGED,
883                                     oem_type, oem_val, atomic);
884 }
885
886 static int scmi_clock_config_oem_get(const struct scmi_protocol_handle *ph,
887                                      u32 clk_id,
888                                      enum scmi_clock_oem_config oem_type,
889                                      u32 *oem_val, u32 *attributes, bool atomic)
890 {
891         struct clock_info *ci = ph->get_priv(ph);
892         struct scmi_clock_info *clk;
893
894         clk = scmi_clock_domain_lookup(ci, clk_id);
895         if (IS_ERR(clk))
896                 return PTR_ERR(clk);
897
898         if (!clk->extended_config)
899                 return -EOPNOTSUPP;
900
901         return ci->clock_config_get(ph, clk_id, oem_type, attributes,
902                                     NULL, oem_val, atomic);
903 }
904
905 static int scmi_clock_count_get(const struct scmi_protocol_handle *ph)
906 {
907         struct clock_info *ci = ph->get_priv(ph);
908
909         return ci->num_clocks;
910 }
911
912 static const struct scmi_clock_info *
913 scmi_clock_info_get(const struct scmi_protocol_handle *ph, u32 clk_id)
914 {
915         struct scmi_clock_info *clk;
916         struct clock_info *ci = ph->get_priv(ph);
917
918         clk = scmi_clock_domain_lookup(ci, clk_id);
919         if (IS_ERR(clk))
920                 return NULL;
921
922         if (!clk->name[0])
923                 return NULL;
924
925         return clk;
926 }
927
928 static const struct scmi_clk_proto_ops clk_proto_ops = {
929         .count_get = scmi_clock_count_get,
930         .info_get = scmi_clock_info_get,
931         .rate_get = scmi_clock_rate_get,
932         .rate_set = scmi_clock_rate_set,
933         .enable = scmi_clock_enable,
934         .disable = scmi_clock_disable,
935         .state_get = scmi_clock_state_get,
936         .config_oem_get = scmi_clock_config_oem_get,
937         .config_oem_set = scmi_clock_config_oem_set,
938         .parent_set = scmi_clock_set_parent,
939         .parent_get = scmi_clock_get_parent,
940 };
941
942 static bool scmi_clk_notify_supported(const struct scmi_protocol_handle *ph,
943                                       u8 evt_id, u32 src_id)
944 {
945         bool supported;
946         struct scmi_clock_info *clk;
947         struct clock_info *ci = ph->get_priv(ph);
948
949         if (evt_id >= ARRAY_SIZE(evt_2_cmd))
950                 return false;
951
952         clk = scmi_clock_domain_lookup(ci, src_id);
953         if (IS_ERR(clk))
954                 return false;
955
956         if (evt_id == SCMI_EVENT_CLOCK_RATE_CHANGED)
957                 supported = clk->rate_changed_notifications;
958         else
959                 supported = clk->rate_change_requested_notifications;
960
961         return supported;
962 }
963
964 static int scmi_clk_rate_notify(const struct scmi_protocol_handle *ph,
965                                 u32 clk_id, int message_id, bool enable)
966 {
967         int ret;
968         struct scmi_xfer *t;
969         struct scmi_msg_clock_rate_notify *notify;
970
971         ret = ph->xops->xfer_get_init(ph, message_id, sizeof(*notify), 0, &t);
972         if (ret)
973                 return ret;
974
975         notify = t->tx.buf;
976         notify->clk_id = cpu_to_le32(clk_id);
977         notify->notify_enable = enable ? cpu_to_le32(BIT(0)) : 0;
978
979         ret = ph->xops->do_xfer(ph, t);
980
981         ph->xops->xfer_put(ph, t);
982         return ret;
983 }
984
985 static int scmi_clk_set_notify_enabled(const struct scmi_protocol_handle *ph,
986                                        u8 evt_id, u32 src_id, bool enable)
987 {
988         int ret, cmd_id;
989
990         if (evt_id >= ARRAY_SIZE(evt_2_cmd))
991                 return -EINVAL;
992
993         cmd_id = evt_2_cmd[evt_id];
994         ret = scmi_clk_rate_notify(ph, src_id, cmd_id, enable);
995         if (ret)
996                 pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
997                          evt_id, src_id, ret);
998
999         return ret;
1000 }
1001
1002 static void *scmi_clk_fill_custom_report(const struct scmi_protocol_handle *ph,
1003                                          u8 evt_id, ktime_t timestamp,
1004                                          const void *payld, size_t payld_sz,
1005                                          void *report, u32 *src_id)
1006 {
1007         const struct scmi_clock_rate_notify_payld *p = payld;
1008         struct scmi_clock_rate_notif_report *r = report;
1009
1010         if (sizeof(*p) != payld_sz ||
1011             (evt_id != SCMI_EVENT_CLOCK_RATE_CHANGED &&
1012              evt_id != SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED))
1013                 return NULL;
1014
1015         r->timestamp = timestamp;
1016         r->agent_id = le32_to_cpu(p->agent_id);
1017         r->clock_id = le32_to_cpu(p->clock_id);
1018         r->rate = get_unaligned_le64(&p->rate_low);
1019         *src_id = r->clock_id;
1020
1021         return r;
1022 }
1023
1024 static int scmi_clk_get_num_sources(const struct scmi_protocol_handle *ph)
1025 {
1026         struct clock_info *ci = ph->get_priv(ph);
1027
1028         if (!ci)
1029                 return -EINVAL;
1030
1031         return ci->num_clocks;
1032 }
1033
1034 static const struct scmi_event clk_events[] = {
1035         {
1036                 .id = SCMI_EVENT_CLOCK_RATE_CHANGED,
1037                 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1038                 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1039         },
1040         {
1041                 .id = SCMI_EVENT_CLOCK_RATE_CHANGE_REQUESTED,
1042                 .max_payld_sz = sizeof(struct scmi_clock_rate_notify_payld),
1043                 .max_report_sz = sizeof(struct scmi_clock_rate_notif_report),
1044         },
1045 };
1046
1047 static const struct scmi_event_ops clk_event_ops = {
1048         .is_notify_supported = scmi_clk_notify_supported,
1049         .get_num_sources = scmi_clk_get_num_sources,
1050         .set_notify_enabled = scmi_clk_set_notify_enabled,
1051         .fill_custom_report = scmi_clk_fill_custom_report,
1052 };
1053
1054 static const struct scmi_protocol_events clk_protocol_events = {
1055         .queue_sz = SCMI_PROTO_QUEUE_SZ,
1056         .ops = &clk_event_ops,
1057         .evts = clk_events,
1058         .num_events = ARRAY_SIZE(clk_events),
1059 };
1060
1061 static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
1062 {
1063         u32 version;
1064         int clkid, ret;
1065         struct clock_info *cinfo;
1066
1067         ret = ph->xops->version_get(ph, &version);
1068         if (ret)
1069                 return ret;
1070
1071         dev_dbg(ph->dev, "Clock Version %d.%d\n",
1072                 PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version));
1073
1074         cinfo = devm_kzalloc(ph->dev, sizeof(*cinfo), GFP_KERNEL);
1075         if (!cinfo)
1076                 return -ENOMEM;
1077
1078         ret = scmi_clock_protocol_attributes_get(ph, cinfo);
1079         if (ret)
1080                 return ret;
1081
1082         cinfo->clk = devm_kcalloc(ph->dev, cinfo->num_clocks,
1083                                   sizeof(*cinfo->clk), GFP_KERNEL);
1084         if (!cinfo->clk)
1085                 return -ENOMEM;
1086
1087         for (clkid = 0; clkid < cinfo->num_clocks; clkid++) {
1088                 struct scmi_clock_info *clk = cinfo->clk + clkid;
1089
1090                 ret = scmi_clock_attributes_get(ph, clkid, cinfo, version);
1091                 if (!ret)
1092                         scmi_clock_describe_rates_get(ph, clkid, clk);
1093         }
1094
1095         if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
1096                 cinfo->clock_config_set = scmi_clock_config_set_v2;
1097                 cinfo->clock_config_get = scmi_clock_config_get_v2;
1098         } else {
1099                 cinfo->clock_config_set = scmi_clock_config_set;
1100                 cinfo->clock_config_get = scmi_clock_config_get;
1101         }
1102
1103         cinfo->version = version;
1104         return ph->set_priv(ph, cinfo, version);
1105 }
1106
1107 static const struct scmi_protocol scmi_clock = {
1108         .id = SCMI_PROTOCOL_CLOCK,
1109         .owner = THIS_MODULE,
1110         .instance_init = &scmi_clock_protocol_init,
1111         .ops = &clk_proto_ops,
1112         .events = &clk_protocol_events,
1113         .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION,
1114 };
1115
1116 DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(clock, scmi_clock)