remoteproc: qcom: Fix error handling paths in order to avoid memory leaks
[sfrench/cifs-2.6.git] / drivers / firmware / qcom_scm.c
1 /*
2  * Qualcomm SCM driver
3  *
4  * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
5  * Copyright (C) 2015 Linaro Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 and
9  * only version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17 #include <linux/platform_device.h>
18 #include <linux/init.h>
19 #include <linux/cpumask.h>
20 #include <linux/export.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/types.h>
23 #include <linux/qcom_scm.h>
24 #include <linux/of.h>
25 #include <linux/of_platform.h>
26 #include <linux/clk.h>
27 #include <linux/reset-controller.h>
28
29 #include "qcom_scm.h"
30
31 #define SCM_HAS_CORE_CLK        BIT(0)
32 #define SCM_HAS_IFACE_CLK       BIT(1)
33 #define SCM_HAS_BUS_CLK         BIT(2)
34
35 struct qcom_scm {
36         struct device *dev;
37         struct clk *core_clk;
38         struct clk *iface_clk;
39         struct clk *bus_clk;
40         struct reset_controller_dev reset;
41 };
42
43 struct qcom_scm_current_perm_info {
44         __le32 vmid;
45         __le32 perm;
46         __le64 ctx;
47         __le32 ctx_size;
48         __le32 unused;
49 };
50
51 struct qcom_scm_mem_map_info {
52         __le64 mem_addr;
53         __le64 mem_size;
54 };
55
56 static struct qcom_scm *__scm;
57
58 static int qcom_scm_clk_enable(void)
59 {
60         int ret;
61
62         ret = clk_prepare_enable(__scm->core_clk);
63         if (ret)
64                 goto bail;
65
66         ret = clk_prepare_enable(__scm->iface_clk);
67         if (ret)
68                 goto disable_core;
69
70         ret = clk_prepare_enable(__scm->bus_clk);
71         if (ret)
72                 goto disable_iface;
73
74         return 0;
75
76 disable_iface:
77         clk_disable_unprepare(__scm->iface_clk);
78 disable_core:
79         clk_disable_unprepare(__scm->core_clk);
80 bail:
81         return ret;
82 }
83
84 static void qcom_scm_clk_disable(void)
85 {
86         clk_disable_unprepare(__scm->core_clk);
87         clk_disable_unprepare(__scm->iface_clk);
88         clk_disable_unprepare(__scm->bus_clk);
89 }
90
91 /**
92  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
93  * @entry: Entry point function for the cpus
94  * @cpus: The cpumask of cpus that will use the entry point
95  *
96  * Set the cold boot address of the cpus. Any cpu outside the supported
97  * range would be removed from the cpu present mask.
98  */
99 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
100 {
101         return __qcom_scm_set_cold_boot_addr(entry, cpus);
102 }
103 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
104
105 /**
106  * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
107  * @entry: Entry point function for the cpus
108  * @cpus: The cpumask of cpus that will use the entry point
109  *
110  * Set the Linux entry point for the SCM to transfer control to when coming
111  * out of a power down. CPU power down may be executed on cpuidle or hotplug.
112  */
113 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
114 {
115         return __qcom_scm_set_warm_boot_addr(__scm->dev, entry, cpus);
116 }
117 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
118
119 /**
120  * qcom_scm_cpu_power_down() - Power down the cpu
121  * @flags - Flags to flush cache
122  *
123  * This is an end point to power down cpu. If there was a pending interrupt,
124  * the control would return from this function, otherwise, the cpu jumps to the
125  * warm boot entry point set for this cpu upon reset.
126  */
127 void qcom_scm_cpu_power_down(u32 flags)
128 {
129         __qcom_scm_cpu_power_down(flags);
130 }
131 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
132
133 /**
134  * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
135  *
136  * Return true if HDCP is supported, false if not.
137  */
138 bool qcom_scm_hdcp_available(void)
139 {
140         int ret = qcom_scm_clk_enable();
141
142         if (ret)
143                 return ret;
144
145         ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
146                                                 QCOM_SCM_CMD_HDCP);
147
148         qcom_scm_clk_disable();
149
150         return ret > 0 ? true : false;
151 }
152 EXPORT_SYMBOL(qcom_scm_hdcp_available);
153
154 /**
155  * qcom_scm_hdcp_req() - Send HDCP request.
156  * @req: HDCP request array
157  * @req_cnt: HDCP request array count
158  * @resp: response buffer passed to SCM
159  *
160  * Write HDCP register(s) through SCM.
161  */
162 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
163 {
164         int ret = qcom_scm_clk_enable();
165
166         if (ret)
167                 return ret;
168
169         ret = __qcom_scm_hdcp_req(__scm->dev, req, req_cnt, resp);
170         qcom_scm_clk_disable();
171         return ret;
172 }
173 EXPORT_SYMBOL(qcom_scm_hdcp_req);
174
175 /**
176  * qcom_scm_pas_supported() - Check if the peripheral authentication service is
177  *                            available for the given peripherial
178  * @peripheral: peripheral id
179  *
180  * Returns true if PAS is supported for this peripheral, otherwise false.
181  */
182 bool qcom_scm_pas_supported(u32 peripheral)
183 {
184         int ret;
185
186         ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
187                                            QCOM_SCM_PAS_IS_SUPPORTED_CMD);
188         if (ret <= 0)
189                 return false;
190
191         return __qcom_scm_pas_supported(__scm->dev, peripheral);
192 }
193 EXPORT_SYMBOL(qcom_scm_pas_supported);
194
195 /**
196  * qcom_scm_pas_init_image() - Initialize peripheral authentication service
197  *                             state machine for a given peripheral, using the
198  *                             metadata
199  * @peripheral: peripheral id
200  * @metadata:   pointer to memory containing ELF header, program header table
201  *              and optional blob of data used for authenticating the metadata
202  *              and the rest of the firmware
203  * @size:       size of the metadata
204  *
205  * Returns 0 on success.
206  */
207 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
208 {
209         dma_addr_t mdata_phys;
210         void *mdata_buf;
211         int ret;
212
213         /*
214          * During the scm call memory protection will be enabled for the meta
215          * data blob, so make sure it's physically contiguous, 4K aligned and
216          * non-cachable to avoid XPU violations.
217          */
218         mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
219                                        GFP_KERNEL);
220         if (!mdata_buf) {
221                 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
222                 return -ENOMEM;
223         }
224         memcpy(mdata_buf, metadata, size);
225
226         ret = qcom_scm_clk_enable();
227         if (ret)
228                 goto free_metadata;
229
230         ret = __qcom_scm_pas_init_image(__scm->dev, peripheral, mdata_phys);
231
232         qcom_scm_clk_disable();
233
234 free_metadata:
235         dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
236
237         return ret;
238 }
239 EXPORT_SYMBOL(qcom_scm_pas_init_image);
240
241 /**
242  * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
243  *                            for firmware loading
244  * @peripheral: peripheral id
245  * @addr:       start address of memory area to prepare
246  * @size:       size of the memory area to prepare
247  *
248  * Returns 0 on success.
249  */
250 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
251 {
252         int ret;
253
254         ret = qcom_scm_clk_enable();
255         if (ret)
256                 return ret;
257
258         ret = __qcom_scm_pas_mem_setup(__scm->dev, peripheral, addr, size);
259         qcom_scm_clk_disable();
260
261         return ret;
262 }
263 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
264
265 /**
266  * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
267  *                                 and reset the remote processor
268  * @peripheral: peripheral id
269  *
270  * Return 0 on success.
271  */
272 int qcom_scm_pas_auth_and_reset(u32 peripheral)
273 {
274         int ret;
275
276         ret = qcom_scm_clk_enable();
277         if (ret)
278                 return ret;
279
280         ret = __qcom_scm_pas_auth_and_reset(__scm->dev, peripheral);
281         qcom_scm_clk_disable();
282
283         return ret;
284 }
285 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
286
287 /**
288  * qcom_scm_pas_shutdown() - Shut down the remote processor
289  * @peripheral: peripheral id
290  *
291  * Returns 0 on success.
292  */
293 int qcom_scm_pas_shutdown(u32 peripheral)
294 {
295         int ret;
296
297         ret = qcom_scm_clk_enable();
298         if (ret)
299                 return ret;
300
301         ret = __qcom_scm_pas_shutdown(__scm->dev, peripheral);
302         qcom_scm_clk_disable();
303
304         return ret;
305 }
306 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
307
308 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
309                                      unsigned long idx)
310 {
311         if (idx != 0)
312                 return -EINVAL;
313
314         return __qcom_scm_pas_mss_reset(__scm->dev, 1);
315 }
316
317 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
318                                        unsigned long idx)
319 {
320         if (idx != 0)
321                 return -EINVAL;
322
323         return __qcom_scm_pas_mss_reset(__scm->dev, 0);
324 }
325
326 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
327         .assert = qcom_scm_pas_reset_assert,
328         .deassert = qcom_scm_pas_reset_deassert,
329 };
330
331 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
332 {
333         return __qcom_scm_restore_sec_cfg(__scm->dev, device_id, spare);
334 }
335 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
336
337 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
338 {
339         return __qcom_scm_iommu_secure_ptbl_size(__scm->dev, spare, size);
340 }
341 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
342
343 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
344 {
345         return __qcom_scm_iommu_secure_ptbl_init(__scm->dev, addr, size, spare);
346 }
347 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
348
349 /**
350  * qcom_scm_is_available() - Checks if SCM is available
351  */
352 bool qcom_scm_is_available(void)
353 {
354         return !!__scm;
355 }
356 EXPORT_SYMBOL(qcom_scm_is_available);
357
358 int qcom_scm_set_remote_state(u32 state, u32 id)
359 {
360         return __qcom_scm_set_remote_state(__scm->dev, state, id);
361 }
362 EXPORT_SYMBOL(qcom_scm_set_remote_state);
363
364 /**
365  * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
366  * @mem_addr: mem region whose ownership need to be reassigned
367  * @mem_sz:   size of the region.
368  * @srcvm:    vmid for current set of owners, each set bit in
369  *            flag indicate a unique owner
370  * @newvm:    array having new owners and corrsponding permission
371  *            flags
372  * @dest_cnt: number of owners in next set.
373  *
374  * Return negative errno on failure, 0 on success, with @srcvm updated.
375  */
376 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
377                         unsigned int *srcvm,
378                         struct qcom_scm_vmperm *newvm, int dest_cnt)
379 {
380         struct qcom_scm_current_perm_info *destvm;
381         struct qcom_scm_mem_map_info *mem_to_map;
382         phys_addr_t mem_to_map_phys;
383         phys_addr_t dest_phys;
384         phys_addr_t ptr_phys;
385         size_t mem_to_map_sz;
386         size_t dest_sz;
387         size_t src_sz;
388         size_t ptr_sz;
389         int next_vm;
390         __le32 *src;
391         void *ptr;
392         int ret;
393         int len;
394         int i;
395
396         src_sz = hweight_long(*srcvm) * sizeof(*src);
397         mem_to_map_sz = sizeof(*mem_to_map);
398         dest_sz = dest_cnt * sizeof(*destvm);
399         ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
400                         ALIGN(dest_sz, SZ_64);
401
402         ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
403         if (!ptr)
404                 return -ENOMEM;
405
406         /* Fill source vmid detail */
407         src = ptr;
408         len = hweight_long(*srcvm);
409         for (i = 0; i < len; i++) {
410                 src[i] = cpu_to_le32(ffs(*srcvm) - 1);
411                 *srcvm ^= 1 << (ffs(*srcvm) - 1);
412         }
413
414         /* Fill details of mem buff to map */
415         mem_to_map = ptr + ALIGN(src_sz, SZ_64);
416         mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
417         mem_to_map[0].mem_addr = cpu_to_le64(mem_addr);
418         mem_to_map[0].mem_size = cpu_to_le64(mem_sz);
419
420         next_vm = 0;
421         /* Fill details of next vmid detail */
422         destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
423         dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
424         for (i = 0; i < dest_cnt; i++) {
425                 destvm[i].vmid = cpu_to_le32(newvm[i].vmid);
426                 destvm[i].perm = cpu_to_le32(newvm[i].perm);
427                 destvm[i].ctx = 0;
428                 destvm[i].ctx_size = 0;
429                 next_vm |= BIT(newvm[i].vmid);
430         }
431
432         ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
433                                     ptr_phys, src_sz, dest_phys, dest_sz);
434         dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
435         if (ret) {
436                 dev_err(__scm->dev,
437                         "Assign memory protection call failed %d.\n", ret);
438                 return -EINVAL;
439         }
440
441         *srcvm = next_vm;
442         return 0;
443 }
444 EXPORT_SYMBOL(qcom_scm_assign_mem);
445
446 static int qcom_scm_probe(struct platform_device *pdev)
447 {
448         struct qcom_scm *scm;
449         unsigned long clks;
450         int ret;
451
452         scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
453         if (!scm)
454                 return -ENOMEM;
455
456         clks = (unsigned long)of_device_get_match_data(&pdev->dev);
457         if (clks & SCM_HAS_CORE_CLK) {
458                 scm->core_clk = devm_clk_get(&pdev->dev, "core");
459                 if (IS_ERR(scm->core_clk)) {
460                         if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
461                                 dev_err(&pdev->dev,
462                                         "failed to acquire core clk\n");
463                         return PTR_ERR(scm->core_clk);
464                 }
465         }
466
467         if (clks & SCM_HAS_IFACE_CLK) {
468                 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
469                 if (IS_ERR(scm->iface_clk)) {
470                         if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
471                                 dev_err(&pdev->dev,
472                                         "failed to acquire iface clk\n");
473                         return PTR_ERR(scm->iface_clk);
474                 }
475         }
476
477         if (clks & SCM_HAS_BUS_CLK) {
478                 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
479                 if (IS_ERR(scm->bus_clk)) {
480                         if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
481                                 dev_err(&pdev->dev,
482                                         "failed to acquire bus clk\n");
483                         return PTR_ERR(scm->bus_clk);
484                 }
485         }
486
487         scm->reset.ops = &qcom_scm_pas_reset_ops;
488         scm->reset.nr_resets = 1;
489         scm->reset.of_node = pdev->dev.of_node;
490         ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
491         if (ret)
492                 return ret;
493
494         /* vote for max clk rate for highest performance */
495         ret = clk_set_rate(scm->core_clk, INT_MAX);
496         if (ret)
497                 return ret;
498
499         __scm = scm;
500         __scm->dev = &pdev->dev;
501
502         __qcom_scm_init();
503
504         return 0;
505 }
506
507 static const struct of_device_id qcom_scm_dt_match[] = {
508         { .compatible = "qcom,scm-apq8064",
509           /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
510         },
511         { .compatible = "qcom,scm-msm8660",
512           .data = (void *) SCM_HAS_CORE_CLK,
513         },
514         { .compatible = "qcom,scm-msm8960",
515           .data = (void *) SCM_HAS_CORE_CLK,
516         },
517         { .compatible = "qcom,scm-msm8996",
518           .data = NULL, /* no clocks */
519         },
520         { .compatible = "qcom,scm",
521           .data = (void *)(SCM_HAS_CORE_CLK
522                            | SCM_HAS_IFACE_CLK
523                            | SCM_HAS_BUS_CLK),
524         },
525         {}
526 };
527
528 static struct platform_driver qcom_scm_driver = {
529         .driver = {
530                 .name   = "qcom_scm",
531                 .of_match_table = qcom_scm_dt_match,
532         },
533         .probe = qcom_scm_probe,
534 };
535
536 static int __init qcom_scm_init(void)
537 {
538         struct device_node *np, *fw_np;
539         int ret;
540
541         fw_np = of_find_node_by_name(NULL, "firmware");
542
543         if (!fw_np)
544                 return -ENODEV;
545
546         np = of_find_matching_node(fw_np, qcom_scm_dt_match);
547
548         if (!np) {
549                 of_node_put(fw_np);
550                 return -ENODEV;
551         }
552
553         of_node_put(np);
554
555         ret = of_platform_populate(fw_np, qcom_scm_dt_match, NULL, NULL);
556
557         of_node_put(fw_np);
558
559         if (ret)
560                 return ret;
561
562         return platform_driver_register(&qcom_scm_driver);
563 }
564 subsys_initcall(qcom_scm_init);