2 * Qualcomm self-authenticating modem subsystem remoteproc driver
4 * Copyright (C) 2016 Linaro Ltd.
5 * Copyright (C) 2014 Sony Mobile Communications AB
6 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/delay.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/interrupt.h>
22 #include <linux/kernel.h>
23 #include <linux/mfd/syscon.h>
24 #include <linux/module.h>
25 #include <linux/of_address.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/regmap.h>
31 #include <linux/regulator/consumer.h>
32 #include <linux/remoteproc.h>
33 #include <linux/reset.h>
34 #include <linux/soc/qcom/mdt_loader.h>
35 #include <linux/iopoll.h>
37 #include "remoteproc_internal.h"
38 #include "qcom_common.h"
39 #include "qcom_q6v5.h"
41 #include <linux/qcom_scm.h>
43 #define MPSS_CRASH_REASON_SMEM 421
45 /* RMB Status Register Values */
46 #define RMB_PBL_SUCCESS 0x1
48 #define RMB_MBA_XPU_UNLOCKED 0x1
49 #define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
50 #define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
51 #define RMB_MBA_AUTH_COMPLETE 0x4
53 /* PBL/MBA interface registers */
54 #define RMB_MBA_IMAGE_REG 0x00
55 #define RMB_PBL_STATUS_REG 0x04
56 #define RMB_MBA_COMMAND_REG 0x08
57 #define RMB_MBA_STATUS_REG 0x0C
58 #define RMB_PMI_META_DATA_REG 0x10
59 #define RMB_PMI_CODE_START_REG 0x14
60 #define RMB_PMI_CODE_LENGTH_REG 0x18
61 #define RMB_MBA_MSS_STATUS 0x40
62 #define RMB_MBA_ALT_RESET 0x44
64 #define RMB_CMD_META_DATA_READY 0x1
65 #define RMB_CMD_LOAD_READY 0x2
67 /* QDSP6SS Register Offsets */
68 #define QDSP6SS_RESET_REG 0x014
69 #define QDSP6SS_GFMUX_CTL_REG 0x020
70 #define QDSP6SS_PWR_CTL_REG 0x030
71 #define QDSP6SS_MEM_PWR_CTL 0x0B0
72 #define QDSP6SS_STRAP_ACC 0x110
74 /* AXI Halt Register Offsets */
75 #define AXI_HALTREQ_REG 0x0
76 #define AXI_HALTACK_REG 0x4
77 #define AXI_IDLE_REG 0x8
79 #define HALT_ACK_TIMEOUT_MS 100
82 #define Q6SS_STOP_CORE BIT(0)
83 #define Q6SS_CORE_ARES BIT(1)
84 #define Q6SS_BUS_ARES_ENABLE BIT(2)
86 /* QDSP6SS_GFMUX_CTL */
87 #define Q6SS_CLK_ENABLE BIT(1)
90 #define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
91 #define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
92 #define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
93 #define Q6SS_L2TAG_SLP_NRET_N BIT(16)
94 #define Q6SS_ETB_SLP_NRET_N BIT(17)
95 #define Q6SS_L2DATA_STBY_N BIT(18)
96 #define Q6SS_SLP_RET_N BIT(19)
97 #define Q6SS_CLAMP_IO BIT(20)
98 #define QDSS_BHS_ON BIT(21)
99 #define QDSS_LDO_BYP BIT(22)
101 /* QDSP6v56 parameters */
102 #define QDSP6v56_LDO_BYP BIT(25)
103 #define QDSP6v56_BHS_ON BIT(24)
104 #define QDSP6v56_CLAMP_WL BIT(21)
105 #define QDSP6v56_CLAMP_QMC_MEM BIT(22)
106 #define HALT_CHECK_MAX_LOOPS 200
107 #define QDSP6SS_XO_CBCR 0x0038
108 #define QDSP6SS_ACC_OVERRIDE_VAL 0x20
110 /* QDSP6v65 parameters */
111 #define QDSP6SS_SLEEP 0x3C
112 #define QDSP6SS_BOOT_CORE_START 0x400
113 #define QDSP6SS_BOOT_CMD 0x404
114 #define SLEEP_CHECK_MAX_LOOPS 200
115 #define BOOT_FSM_TIMEOUT 10000
118 struct regulator *reg;
123 struct qcom_mss_reg_res {
129 struct rproc_hexagon_res {
130 const char *hexagon_mba_image;
131 struct qcom_mss_reg_res *proxy_supply;
132 struct qcom_mss_reg_res *active_supply;
133 char **proxy_clk_names;
134 char **reset_clk_names;
135 char **active_clk_names;
136 char **active_pd_names;
137 char **proxy_pd_names;
139 bool need_mem_protection;
147 void __iomem *reg_base;
148 void __iomem *rmb_base;
150 struct regmap *halt_map;
155 struct reset_control *mss_restart;
156 struct reset_control *pdc_reset;
158 struct qcom_q6v5 q6v5;
160 struct clk *active_clks[8];
161 struct clk *reset_clks[4];
162 struct clk *proxy_clks[4];
163 struct device *active_pds[1];
164 struct device *proxy_pds[3];
165 int active_clk_count;
171 struct reg_info active_regs[1];
172 struct reg_info proxy_regs[3];
173 int active_reg_count;
178 bool dump_mba_loaded;
179 unsigned long dump_segment_mask;
180 unsigned long dump_complete_mask;
182 phys_addr_t mba_phys;
186 phys_addr_t mpss_phys;
187 phys_addr_t mpss_reloc;
191 struct qcom_rproc_glink glink_subdev;
192 struct qcom_rproc_subdev smd_subdev;
193 struct qcom_rproc_ssr ssr_subdev;
194 struct qcom_sysmon *sysmon;
195 bool need_mem_protection;
199 const char *hexagon_mdt_image;
210 static int q6v5_regulator_init(struct device *dev, struct reg_info *regs,
211 const struct qcom_mss_reg_res *reg_res)
219 for (i = 0; reg_res[i].supply; i++) {
220 regs[i].reg = devm_regulator_get(dev, reg_res[i].supply);
221 if (IS_ERR(regs[i].reg)) {
222 rc = PTR_ERR(regs[i].reg);
223 if (rc != -EPROBE_DEFER)
224 dev_err(dev, "Failed to get %s\n regulator",
229 regs[i].uV = reg_res[i].uV;
230 regs[i].uA = reg_res[i].uA;
236 static int q6v5_regulator_enable(struct q6v5 *qproc,
237 struct reg_info *regs, int count)
242 for (i = 0; i < count; i++) {
243 if (regs[i].uV > 0) {
244 ret = regulator_set_voltage(regs[i].reg,
245 regs[i].uV, INT_MAX);
248 "Failed to request voltage for %d.\n",
254 if (regs[i].uA > 0) {
255 ret = regulator_set_load(regs[i].reg,
259 "Failed to set regulator mode\n");
264 ret = regulator_enable(regs[i].reg);
266 dev_err(qproc->dev, "Regulator enable failed\n");
273 for (; i >= 0; i--) {
275 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
278 regulator_set_load(regs[i].reg, 0);
280 regulator_disable(regs[i].reg);
286 static void q6v5_regulator_disable(struct q6v5 *qproc,
287 struct reg_info *regs, int count)
291 for (i = 0; i < count; i++) {
293 regulator_set_voltage(regs[i].reg, 0, INT_MAX);
296 regulator_set_load(regs[i].reg, 0);
298 regulator_disable(regs[i].reg);
302 static int q6v5_clk_enable(struct device *dev,
303 struct clk **clks, int count)
308 for (i = 0; i < count; i++) {
309 rc = clk_prepare_enable(clks[i]);
311 dev_err(dev, "Clock enable failed\n");
318 for (i--; i >= 0; i--)
319 clk_disable_unprepare(clks[i]);
324 static void q6v5_clk_disable(struct device *dev,
325 struct clk **clks, int count)
329 for (i = 0; i < count; i++)
330 clk_disable_unprepare(clks[i]);
333 static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
339 for (i = 0; i < pd_count; i++) {
340 dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
341 ret = pm_runtime_get_sync(pds[i]);
343 goto unroll_pd_votes;
349 for (i--; i >= 0; i--) {
350 dev_pm_genpd_set_performance_state(pds[i], 0);
351 pm_runtime_put(pds[i]);
357 static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
362 for (i = 0; i < pd_count; i++) {
363 dev_pm_genpd_set_performance_state(pds[i], 0);
364 pm_runtime_put(pds[i]);
368 static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
369 bool remote_owner, phys_addr_t addr,
372 struct qcom_scm_vmperm next;
374 if (!qproc->need_mem_protection)
376 if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
378 if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
381 next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
382 next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
384 return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
385 current_perm, &next, 1);
388 static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
390 struct q6v5 *qproc = rproc->priv;
392 memcpy(qproc->mba_region, fw->data, fw->size);
397 static int q6v5_reset_assert(struct q6v5 *qproc)
401 if (qproc->has_alt_reset) {
402 reset_control_assert(qproc->pdc_reset);
403 ret = reset_control_reset(qproc->mss_restart);
404 reset_control_deassert(qproc->pdc_reset);
406 ret = reset_control_assert(qproc->mss_restart);
412 static int q6v5_reset_deassert(struct q6v5 *qproc)
416 if (qproc->has_alt_reset) {
417 reset_control_assert(qproc->pdc_reset);
418 writel(1, qproc->rmb_base + RMB_MBA_ALT_RESET);
419 ret = reset_control_reset(qproc->mss_restart);
420 writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
421 reset_control_deassert(qproc->pdc_reset);
423 ret = reset_control_deassert(qproc->mss_restart);
429 static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
431 unsigned long timeout;
434 timeout = jiffies + msecs_to_jiffies(ms);
436 val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
440 if (time_after(jiffies, timeout))
449 static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
452 unsigned long timeout;
455 timeout = jiffies + msecs_to_jiffies(ms);
457 val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
463 else if (status && val == status)
466 if (time_after(jiffies, timeout))
475 static int q6v5proc_reset(struct q6v5 *qproc)
481 if (qproc->version == MSS_SDM845) {
482 val = readl(qproc->reg_base + QDSP6SS_SLEEP);
484 writel(val, qproc->reg_base + QDSP6SS_SLEEP);
486 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
487 val, !(val & BIT(31)), 1,
488 SLEEP_CHECK_MAX_LOOPS);
490 dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
494 /* De-assert QDSP6 stop core */
495 writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
496 /* Trigger boot FSM */
497 writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
499 ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
500 val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
502 dev_err(qproc->dev, "Boot FSM failed to complete.\n");
503 /* Reset the modem so that boot FSM is in reset state */
504 q6v5_reset_deassert(qproc);
509 } else if (qproc->version == MSS_MSM8996) {
510 /* Override the ACC value if required */
511 writel(QDSP6SS_ACC_OVERRIDE_VAL,
512 qproc->reg_base + QDSP6SS_STRAP_ACC);
514 /* Assert resets, stop core */
515 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
516 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
517 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
519 /* BHS require xo cbcr to be enabled */
520 val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
522 writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
524 /* Read CLKOFF bit to go low indicating CLK is enabled */
525 ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
526 val, !(val & BIT(31)), 1,
527 HALT_CHECK_MAX_LOOPS);
530 "xo cbcr enabling timed out (rc:%d)\n", ret);
533 /* Enable power block headswitch and wait for it to stabilize */
534 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
535 val |= QDSP6v56_BHS_ON;
536 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
537 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
540 /* Put LDO in bypass mode */
541 val |= QDSP6v56_LDO_BYP;
542 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
544 /* Deassert QDSP6 compiler memory clamp */
545 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
546 val &= ~QDSP6v56_CLAMP_QMC_MEM;
547 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
549 /* Deassert memory peripheral sleep and L2 memory standby */
550 val |= Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N;
551 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
553 /* Turn on L1, L2, ETB and JU memories 1 at a time */
554 val = readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
555 for (i = 19; i >= 0; i--) {
557 writel(val, qproc->reg_base +
558 QDSP6SS_MEM_PWR_CTL);
560 * Read back value to ensure the write is done then
561 * wait for 1us for both memory peripheral and data
564 val |= readl(qproc->reg_base + QDSP6SS_MEM_PWR_CTL);
567 /* Remove word line clamp */
568 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
569 val &= ~QDSP6v56_CLAMP_WL;
570 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
572 /* Assert resets, stop core */
573 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
574 val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
575 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
577 /* Enable power block headswitch and wait for it to stabilize */
578 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
579 val |= QDSS_BHS_ON | QDSS_LDO_BYP;
580 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
581 val |= readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
584 * Turn on memories. L2 banks should be done individually
585 * to minimize inrush current.
587 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
588 val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
589 Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
590 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
591 val |= Q6SS_L2DATA_SLP_NRET_N_2;
592 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
593 val |= Q6SS_L2DATA_SLP_NRET_N_1;
594 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
595 val |= Q6SS_L2DATA_SLP_NRET_N_0;
596 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
598 /* Remove IO clamp */
599 val &= ~Q6SS_CLAMP_IO;
600 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
602 /* Bring core out of reset */
603 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
604 val &= ~Q6SS_CORE_ARES;
605 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
607 /* Turn on core clock */
608 val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
609 val |= Q6SS_CLK_ENABLE;
610 writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
612 /* Start core execution */
613 val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
614 val &= ~Q6SS_STOP_CORE;
615 writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
618 /* Wait for PBL status */
619 ret = q6v5_rmb_pbl_wait(qproc, 1000);
620 if (ret == -ETIMEDOUT) {
621 dev_err(qproc->dev, "PBL boot timed out\n");
622 } else if (ret != RMB_PBL_SUCCESS) {
623 dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
632 static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
633 struct regmap *halt_map,
636 unsigned long timeout;
640 /* Check if we're already idle */
641 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
645 /* Assert halt request */
646 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
649 timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
651 ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
652 if (ret || val || time_after(jiffies, timeout))
658 ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
660 dev_err(qproc->dev, "port failed halt\n");
662 /* Clear halt request (port will remain halted until reset) */
663 regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
666 static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
668 unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
677 metadata = qcom_mdt_read_metadata(fw, &size);
678 if (IS_ERR(metadata))
679 return PTR_ERR(metadata);
681 ptr = dma_alloc_attrs(qproc->dev, size, &phys, GFP_KERNEL, dma_attrs);
684 dev_err(qproc->dev, "failed to allocate mdt buffer\n");
688 memcpy(ptr, metadata, size);
690 /* Hypervisor mapping to access metadata by modem */
691 mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
692 ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
695 "assigning Q6 access to metadata failed: %d\n", ret);
700 writel(phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
701 writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
703 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
704 if (ret == -ETIMEDOUT)
705 dev_err(qproc->dev, "MPSS header authentication timed out\n");
707 dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
709 /* Metadata authentication done, remove modem access */
710 xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
713 "mdt buffer not reclaimed system may become unstable\n");
716 dma_free_attrs(qproc->dev, size, ptr, phys, dma_attrs);
719 return ret < 0 ? ret : 0;
722 static bool q6v5_phdr_valid(const struct elf32_phdr *phdr)
724 if (phdr->p_type != PT_LOAD)
727 if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
736 static int q6v5_mba_load(struct q6v5 *qproc)
741 qcom_q6v5_prepare(&qproc->q6v5);
743 ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
745 dev_err(qproc->dev, "failed to enable active power domains\n");
749 ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
751 dev_err(qproc->dev, "failed to enable proxy power domains\n");
752 goto disable_active_pds;
755 ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
756 qproc->proxy_reg_count);
758 dev_err(qproc->dev, "failed to enable proxy supplies\n");
759 goto disable_proxy_pds;
762 ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
763 qproc->proxy_clk_count);
765 dev_err(qproc->dev, "failed to enable proxy clocks\n");
766 goto disable_proxy_reg;
769 ret = q6v5_regulator_enable(qproc, qproc->active_regs,
770 qproc->active_reg_count);
772 dev_err(qproc->dev, "failed to enable supplies\n");
773 goto disable_proxy_clk;
776 ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
777 qproc->reset_clk_count);
779 dev_err(qproc->dev, "failed to enable reset clocks\n");
783 ret = q6v5_reset_deassert(qproc);
785 dev_err(qproc->dev, "failed to deassert mss restart\n");
786 goto disable_reset_clks;
789 ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
790 qproc->active_clk_count);
792 dev_err(qproc->dev, "failed to enable clocks\n");
796 /* Assign MBA image access in DDR to q6 */
797 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
798 qproc->mba_phys, qproc->mba_size);
801 "assigning Q6 access to mba memory failed: %d\n", ret);
802 goto disable_active_clks;
805 writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
807 ret = q6v5proc_reset(qproc);
811 ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
812 if (ret == -ETIMEDOUT) {
813 dev_err(qproc->dev, "MBA boot timed out\n");
815 } else if (ret != RMB_MBA_XPU_UNLOCKED &&
816 ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
817 dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
822 qproc->dump_mba_loaded = true;
826 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
827 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
828 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
831 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
836 "Failed to reclaim mba buffer, system may become unstable\n");
840 q6v5_clk_disable(qproc->dev, qproc->active_clks,
841 qproc->active_clk_count);
843 q6v5_reset_assert(qproc);
845 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
846 qproc->reset_clk_count);
848 q6v5_regulator_disable(qproc, qproc->active_regs,
849 qproc->active_reg_count);
851 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
852 qproc->proxy_clk_count);
854 q6v5_regulator_disable(qproc, qproc->proxy_regs,
855 qproc->proxy_reg_count);
857 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
859 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
861 qcom_q6v5_unprepare(&qproc->q6v5);
866 static void q6v5_mba_reclaim(struct q6v5 *qproc)
871 qproc->dump_mba_loaded = false;
873 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
874 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
875 q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
876 if (qproc->version == MSS_MSM8996) {
878 * To avoid high MX current during LPASS/MSS restart.
880 val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
881 val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
882 QDSP6v56_CLAMP_QMC_MEM;
883 writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
886 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
887 false, qproc->mpss_phys,
891 q6v5_reset_assert(qproc);
893 q6v5_clk_disable(qproc->dev, qproc->reset_clks,
894 qproc->reset_clk_count);
895 q6v5_clk_disable(qproc->dev, qproc->active_clks,
896 qproc->active_clk_count);
897 q6v5_regulator_disable(qproc, qproc->active_regs,
898 qproc->active_reg_count);
899 q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
901 /* In case of failure or coredump scenario where reclaiming MBA memory
902 * could not happen reclaim it here.
904 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
909 ret = qcom_q6v5_unprepare(&qproc->q6v5);
911 q6v5_pds_disable(qproc, qproc->proxy_pds,
912 qproc->proxy_pd_count);
913 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
914 qproc->proxy_clk_count);
915 q6v5_regulator_disable(qproc, qproc->proxy_regs,
916 qproc->proxy_reg_count);
920 static int q6v5_mpss_load(struct q6v5 *qproc)
922 const struct elf32_phdr *phdrs;
923 const struct elf32_phdr *phdr;
924 const struct firmware *seg_fw;
925 const struct firmware *fw;
926 struct elf32_hdr *ehdr;
927 phys_addr_t mpss_reloc;
928 phys_addr_t boot_addr;
929 phys_addr_t min_addr = PHYS_ADDR_MAX;
930 phys_addr_t max_addr = 0;
931 bool relocate = false;
940 fw_name_len = strlen(qproc->hexagon_mdt_image);
941 if (fw_name_len <= 4)
944 fw_name = kstrdup(qproc->hexagon_mdt_image, GFP_KERNEL);
948 ret = request_firmware(&fw, fw_name, qproc->dev);
950 dev_err(qproc->dev, "unable to load %s\n", fw_name);
954 /* Initialize the RMB validator */
955 writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
957 ret = q6v5_mpss_init_image(qproc, fw);
959 goto release_firmware;
961 ehdr = (struct elf32_hdr *)fw->data;
962 phdrs = (struct elf32_phdr *)(ehdr + 1);
964 for (i = 0; i < ehdr->e_phnum; i++) {
967 if (!q6v5_phdr_valid(phdr))
970 if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
973 if (phdr->p_paddr < min_addr)
974 min_addr = phdr->p_paddr;
976 if (phdr->p_paddr + phdr->p_memsz > max_addr)
977 max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
980 mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
981 qproc->mpss_reloc = mpss_reloc;
982 /* Load firmware segments */
983 for (i = 0; i < ehdr->e_phnum; i++) {
986 if (!q6v5_phdr_valid(phdr))
989 offset = phdr->p_paddr - mpss_reloc;
990 if (offset < 0 || offset + phdr->p_memsz > qproc->mpss_size) {
991 dev_err(qproc->dev, "segment outside memory range\n");
993 goto release_firmware;
996 ptr = qproc->mpss_region + offset;
998 if (phdr->p_filesz && phdr->p_offset < fw->size) {
999 /* Firmware is large enough to be non-split */
1000 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1002 "failed to load segment %d from truncated file %s\n",
1005 goto release_firmware;
1008 memcpy(ptr, fw->data + phdr->p_offset, phdr->p_filesz);
1009 } else if (phdr->p_filesz) {
1010 /* Replace "xxx.xxx" with "xxx.bxx" */
1011 sprintf(fw_name + fw_name_len - 3, "b%02d", i);
1012 ret = request_firmware(&seg_fw, fw_name, qproc->dev);
1014 dev_err(qproc->dev, "failed to load %s\n", fw_name);
1015 goto release_firmware;
1018 memcpy(ptr, seg_fw->data, seg_fw->size);
1020 release_firmware(seg_fw);
1023 if (phdr->p_memsz > phdr->p_filesz) {
1024 memset(ptr + phdr->p_filesz, 0,
1025 phdr->p_memsz - phdr->p_filesz);
1027 size += phdr->p_memsz;
1030 /* Transfer ownership of modem ddr region to q6 */
1031 ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
1032 qproc->mpss_phys, qproc->mpss_size);
1035 "assigning Q6 access to mpss memory failed: %d\n", ret);
1037 goto release_firmware;
1040 boot_addr = relocate ? qproc->mpss_phys : min_addr;
1041 writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
1042 writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
1043 writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
1045 ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
1046 if (ret == -ETIMEDOUT)
1047 dev_err(qproc->dev, "MPSS authentication timed out\n");
1049 dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
1052 release_firmware(fw);
1056 return ret < 0 ? ret : 0;
1059 static void qcom_q6v5_dump_segment(struct rproc *rproc,
1060 struct rproc_dump_segment *segment,
1064 struct q6v5 *qproc = rproc->priv;
1065 unsigned long mask = BIT((unsigned long)segment->priv);
1066 void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
1068 /* Unlock mba before copying segments */
1069 if (!qproc->dump_mba_loaded)
1070 ret = q6v5_mba_load(qproc);
1073 memset(dest, 0xff, segment->size);
1075 memcpy(dest, ptr, segment->size);
1077 qproc->dump_segment_mask |= mask;
1079 /* Reclaim mba after copying segments */
1080 if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
1081 if (qproc->dump_mba_loaded)
1082 q6v5_mba_reclaim(qproc);
1086 static int q6v5_start(struct rproc *rproc)
1088 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1092 ret = q6v5_mba_load(qproc);
1096 dev_info(qproc->dev, "MBA booted, loading mpss\n");
1098 ret = q6v5_mpss_load(qproc);
1102 ret = qcom_q6v5_wait_for_start(&qproc->q6v5, msecs_to_jiffies(5000));
1103 if (ret == -ETIMEDOUT) {
1104 dev_err(qproc->dev, "start timed out\n");
1108 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
1113 "Failed to reclaim mba buffer system may become unstable\n");
1115 /* Reset Dump Segment Mask */
1116 qproc->dump_segment_mask = 0;
1117 qproc->running = true;
1122 xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
1123 false, qproc->mpss_phys,
1125 WARN_ON(xfermemop_ret);
1126 q6v5_mba_reclaim(qproc);
1131 static int q6v5_stop(struct rproc *rproc)
1133 struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
1136 qproc->running = false;
1138 ret = qcom_q6v5_request_stop(&qproc->q6v5);
1139 if (ret == -ETIMEDOUT)
1140 dev_err(qproc->dev, "timed out on wait\n");
1142 q6v5_mba_reclaim(qproc);
1147 static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
1149 struct q6v5 *qproc = rproc->priv;
1152 offset = da - qproc->mpss_reloc;
1153 if (offset < 0 || offset + len > qproc->mpss_size)
1156 return qproc->mpss_region + offset;
1159 static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
1160 const struct firmware *mba_fw)
1162 const struct firmware *fw;
1163 const struct elf32_phdr *phdrs;
1164 const struct elf32_phdr *phdr;
1165 const struct elf32_hdr *ehdr;
1166 struct q6v5 *qproc = rproc->priv;
1170 ret = request_firmware(&fw, qproc->hexagon_mdt_image, qproc->dev);
1172 dev_err(qproc->dev, "unable to load %s\n",
1173 qproc->hexagon_mdt_image);
1177 ehdr = (struct elf32_hdr *)fw->data;
1178 phdrs = (struct elf32_phdr *)(ehdr + 1);
1179 qproc->dump_complete_mask = 0;
1181 for (i = 0; i < ehdr->e_phnum; i++) {
1184 if (!q6v5_phdr_valid(phdr))
1187 ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
1189 qcom_q6v5_dump_segment,
1194 qproc->dump_complete_mask |= BIT(i);
1197 release_firmware(fw);
1201 static const struct rproc_ops q6v5_ops = {
1202 .start = q6v5_start,
1204 .da_to_va = q6v5_da_to_va,
1205 .parse_fw = qcom_q6v5_register_dump_segments,
1209 static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
1211 struct q6v5 *qproc = container_of(q6v5, struct q6v5, q6v5);
1213 q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
1214 qproc->proxy_clk_count);
1215 q6v5_regulator_disable(qproc, qproc->proxy_regs,
1216 qproc->proxy_reg_count);
1217 q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1220 static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
1222 struct of_phandle_args args;
1223 struct resource *res;
1226 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
1227 qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
1228 if (IS_ERR(qproc->reg_base))
1229 return PTR_ERR(qproc->reg_base);
1231 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
1232 qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
1233 if (IS_ERR(qproc->rmb_base))
1234 return PTR_ERR(qproc->rmb_base);
1236 ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
1237 "qcom,halt-regs", 3, 0, &args);
1239 dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
1243 qproc->halt_map = syscon_node_to_regmap(args.np);
1244 of_node_put(args.np);
1245 if (IS_ERR(qproc->halt_map))
1246 return PTR_ERR(qproc->halt_map);
1248 qproc->halt_q6 = args.args[0];
1249 qproc->halt_modem = args.args[1];
1250 qproc->halt_nc = args.args[2];
1255 static int q6v5_init_clocks(struct device *dev, struct clk **clks,
1263 for (i = 0; clk_names[i]; i++) {
1264 clks[i] = devm_clk_get(dev, clk_names[i]);
1265 if (IS_ERR(clks[i])) {
1266 int rc = PTR_ERR(clks[i]);
1268 if (rc != -EPROBE_DEFER)
1269 dev_err(dev, "Failed to get %s clock\n",
1278 static int q6v5_pds_attach(struct device *dev, struct device **devs,
1288 while (pd_names[num_pds])
1291 for (i = 0; i < num_pds; i++) {
1292 devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
1293 if (IS_ERR(devs[i])) {
1294 ret = PTR_ERR(devs[i]);
1302 for (i--; i >= 0; i--)
1303 dev_pm_domain_detach(devs[i], false);
1308 static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
1313 for (i = 0; i < pd_count; i++)
1314 dev_pm_domain_detach(pds[i], false);
1317 static int q6v5_init_reset(struct q6v5 *qproc)
1319 qproc->mss_restart = devm_reset_control_get_exclusive(qproc->dev,
1321 if (IS_ERR(qproc->mss_restart)) {
1322 dev_err(qproc->dev, "failed to acquire mss restart\n");
1323 return PTR_ERR(qproc->mss_restart);
1326 if (qproc->has_alt_reset) {
1327 qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
1329 if (IS_ERR(qproc->pdc_reset)) {
1330 dev_err(qproc->dev, "failed to acquire pdc reset\n");
1331 return PTR_ERR(qproc->pdc_reset);
1338 static int q6v5_alloc_memory_region(struct q6v5 *qproc)
1340 struct device_node *child;
1341 struct device_node *node;
1345 child = of_get_child_by_name(qproc->dev->of_node, "mba");
1346 node = of_parse_phandle(child, "memory-region", 0);
1347 ret = of_address_to_resource(node, 0, &r);
1349 dev_err(qproc->dev, "unable to resolve mba region\n");
1354 qproc->mba_phys = r.start;
1355 qproc->mba_size = resource_size(&r);
1356 qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
1357 if (!qproc->mba_region) {
1358 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1359 &r.start, qproc->mba_size);
1363 child = of_get_child_by_name(qproc->dev->of_node, "mpss");
1364 node = of_parse_phandle(child, "memory-region", 0);
1365 ret = of_address_to_resource(node, 0, &r);
1367 dev_err(qproc->dev, "unable to resolve mpss region\n");
1372 qproc->mpss_phys = qproc->mpss_reloc = r.start;
1373 qproc->mpss_size = resource_size(&r);
1374 qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
1375 if (!qproc->mpss_region) {
1376 dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
1377 &r.start, qproc->mpss_size);
1384 static int q6v5_probe(struct platform_device *pdev)
1386 const struct rproc_hexagon_res *desc;
1388 struct rproc *rproc;
1389 const char *mba_image;
1392 desc = of_device_get_match_data(&pdev->dev);
1396 if (desc->need_mem_protection && !qcom_scm_is_available())
1397 return -EPROBE_DEFER;
1399 mba_image = desc->hexagon_mba_image;
1400 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1402 if (ret < 0 && ret != -EINVAL)
1405 rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
1406 mba_image, sizeof(*qproc));
1408 dev_err(&pdev->dev, "failed to allocate rproc\n");
1412 rproc->auto_boot = false;
1414 qproc = (struct q6v5 *)rproc->priv;
1415 qproc->dev = &pdev->dev;
1416 qproc->rproc = rproc;
1417 qproc->hexagon_mdt_image = "modem.mdt";
1418 ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1419 1, &qproc->hexagon_mdt_image);
1420 if (ret < 0 && ret != -EINVAL)
1423 platform_set_drvdata(pdev, qproc);
1425 ret = q6v5_init_mem(qproc, pdev);
1429 ret = q6v5_alloc_memory_region(qproc);
1433 ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
1434 desc->proxy_clk_names);
1436 dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
1439 qproc->proxy_clk_count = ret;
1441 ret = q6v5_init_clocks(&pdev->dev, qproc->reset_clks,
1442 desc->reset_clk_names);
1444 dev_err(&pdev->dev, "Failed to get reset clocks.\n");
1447 qproc->reset_clk_count = ret;
1449 ret = q6v5_init_clocks(&pdev->dev, qproc->active_clks,
1450 desc->active_clk_names);
1452 dev_err(&pdev->dev, "Failed to get active clocks.\n");
1455 qproc->active_clk_count = ret;
1457 ret = q6v5_regulator_init(&pdev->dev, qproc->proxy_regs,
1458 desc->proxy_supply);
1460 dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
1463 qproc->proxy_reg_count = ret;
1465 ret = q6v5_regulator_init(&pdev->dev, qproc->active_regs,
1466 desc->active_supply);
1468 dev_err(&pdev->dev, "Failed to get active regulators.\n");
1471 qproc->active_reg_count = ret;
1473 ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
1474 desc->active_pd_names);
1476 dev_err(&pdev->dev, "Failed to attach active power domains\n");
1479 qproc->active_pd_count = ret;
1481 ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
1482 desc->proxy_pd_names);
1484 dev_err(&pdev->dev, "Failed to init power domains\n");
1485 goto detach_active_pds;
1487 qproc->proxy_pd_count = ret;
1489 qproc->has_alt_reset = desc->has_alt_reset;
1490 ret = q6v5_init_reset(qproc);
1492 goto detach_proxy_pds;
1494 qproc->version = desc->version;
1495 qproc->need_mem_protection = desc->need_mem_protection;
1497 ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
1500 goto detach_proxy_pds;
1502 qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
1503 qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
1504 qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
1505 qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
1506 qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
1507 qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
1508 if (IS_ERR(qproc->sysmon)) {
1509 ret = PTR_ERR(qproc->sysmon);
1510 goto detach_proxy_pds;
1513 ret = rproc_add(rproc);
1515 goto detach_proxy_pds;
1520 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1522 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1529 static int q6v5_remove(struct platform_device *pdev)
1531 struct q6v5 *qproc = platform_get_drvdata(pdev);
1533 rproc_del(qproc->rproc);
1535 qcom_remove_sysmon_subdev(qproc->sysmon);
1536 qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
1537 qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
1538 qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
1540 q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
1541 q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
1543 rproc_free(qproc->rproc);
1548 static const struct rproc_hexagon_res sdm845_mss = {
1549 .hexagon_mba_image = "mba.mbn",
1550 .proxy_clk_names = (char*[]){
1555 .reset_clk_names = (char*[]){
1560 .active_clk_names = (char*[]){
1567 .active_pd_names = (char*[]){
1571 .proxy_pd_names = (char*[]){
1577 .need_mem_protection = true,
1578 .has_alt_reset = true,
1579 .version = MSS_SDM845,
1582 static const struct rproc_hexagon_res msm8996_mss = {
1583 .hexagon_mba_image = "mba.mbn",
1584 .proxy_supply = (struct qcom_mss_reg_res[]) {
1591 .proxy_clk_names = (char*[]){
1597 .active_clk_names = (char*[]){
1606 .need_mem_protection = true,
1607 .has_alt_reset = false,
1608 .version = MSS_MSM8996,
1611 static const struct rproc_hexagon_res msm8916_mss = {
1612 .hexagon_mba_image = "mba.mbn",
1613 .proxy_supply = (struct qcom_mss_reg_res[]) {
1628 .proxy_clk_names = (char*[]){
1632 .active_clk_names = (char*[]){
1638 .need_mem_protection = false,
1639 .has_alt_reset = false,
1640 .version = MSS_MSM8916,
1643 static const struct rproc_hexagon_res msm8974_mss = {
1644 .hexagon_mba_image = "mba.b00",
1645 .proxy_supply = (struct qcom_mss_reg_res[]) {
1660 .active_supply = (struct qcom_mss_reg_res[]) {
1668 .proxy_clk_names = (char*[]){
1672 .active_clk_names = (char*[]){
1678 .need_mem_protection = false,
1679 .has_alt_reset = false,
1680 .version = MSS_MSM8974,
1683 static const struct of_device_id q6v5_of_match[] = {
1684 { .compatible = "qcom,q6v5-pil", .data = &msm8916_mss},
1685 { .compatible = "qcom,msm8916-mss-pil", .data = &msm8916_mss},
1686 { .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
1687 { .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
1688 { .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
1691 MODULE_DEVICE_TABLE(of, q6v5_of_match);
1693 static struct platform_driver q6v5_driver = {
1694 .probe = q6v5_probe,
1695 .remove = q6v5_remove,
1697 .name = "qcom-q6v5-mss",
1698 .of_match_table = q6v5_of_match,
1701 module_platform_driver(q6v5_driver);
1703 MODULE_DESCRIPTION("Qualcomm Self-authenticating modem remoteproc driver");
1704 MODULE_LICENSE("GPL v2");