bool running;
+ bool dump_mba_loaded;
+ unsigned long dump_segment_mask;
+ unsigned long dump_complete_mask;
+
phys_addr_t mba_phys;
void *mba_region;
size_t mba_size;
return true;
}
+static int q6v5_mba_load(struct q6v5 *qproc)
+{
+ int ret;
+ int xfermemop_ret;
+
+ qcom_q6v5_prepare(&qproc->q6v5);
+
+ ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy supplies\n");
+ goto disable_irqs;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable proxy clocks\n");
+ goto disable_proxy_reg;
+ }
+
+ ret = q6v5_regulator_enable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable supplies\n");
+ goto disable_proxy_clk;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable reset clocks\n");
+ goto disable_vdd;
+ }
+
+ ret = q6v5_reset_deassert(qproc);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_reset_clks;
+ }
+
+ ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable clocks\n");
+ goto assert_reset;
+ }
+
+ /* Assign MBA image access in DDR to q6 */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ qproc->mba_phys, qproc->mba_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mba memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
+ writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+
+ ret = q6v5proc_reset(qproc);
+ if (ret)
+ goto reclaim_mba;
+
+ ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_MBA_XPU_UNLOCKED &&
+ ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ qproc->dump_mba_loaded = true;
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+
+reclaim_mba:
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ if (xfermemop_ret) {
+ dev_err(qproc->dev,
+ "Failed to reclaim mba buffer, system may become unstable\n");
+ }
+
+disable_active_clks:
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+assert_reset:
+ q6v5_reset_assert(qproc);
+disable_reset_clks:
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+disable_vdd:
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+disable_proxy_clk:
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+disable_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&qproc->q6v5);
+
+ return ret;
+}
+
+static void q6v5_mba_reclaim(struct q6v5 *qproc)
+{
+ int ret;
+ u32 val;
+
+ qproc->dump_mba_loaded = false;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
+ if (qproc->version == MSS_MSM8996) {
+ /*
+ * To avoid high MX current during LPASS/MSS restart.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
+ QDSP6v56_CLAMP_QMC_MEM;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ }
+
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, qproc->mpss_phys,
+ qproc->mpss_size);
+ WARN_ON(ret);
+
+ q6v5_reset_assert(qproc);
+
+ q6v5_clk_disable(qproc->dev, qproc->reset_clks,
+ qproc->reset_clk_count);
+ q6v5_clk_disable(qproc->dev, qproc->active_clks,
+ qproc->active_clk_count);
+ q6v5_regulator_disable(qproc, qproc->active_regs,
+ qproc->active_reg_count);
+
+ /* In case of failure or coredump scenario where reclaiming MBA memory
+ * could not happen reclaim it here.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ qproc->mba_phys,
+ qproc->mba_size);
+ WARN_ON(ret);
+
+ ret = qcom_q6v5_unprepare(&qproc->q6v5);
+ if (ret) {
+ q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
+ qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->proxy_regs,
+ qproc->proxy_reg_count);
+ }
+}
+
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
}
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
+ qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
return ret < 0 ? ret : 0;
}
-static int q6v5_start(struct rproc *rproc)
+static void qcom_q6v5_dump_segment(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest)
{
- struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
- int xfermemop_ret;
- int ret;
-
- qcom_q6v5_prepare(&qproc->q6v5);
-
- ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_irqs;
- }
+ int ret = 0;
+ struct q6v5 *qproc = rproc->priv;
+ unsigned long mask = BIT((unsigned long)segment->priv);
+ void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable proxy clocks\n");
- goto disable_proxy_reg;
- }
+ /* Unlock mba before copying segments */
+ if (!qproc->dump_mba_loaded)
+ ret = q6v5_mba_load(qproc);
- ret = q6v5_regulator_enable(qproc, qproc->active_regs,
- qproc->active_reg_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable supplies\n");
- goto disable_proxy_clk;
- }
+ if (!ptr || ret)
+ memset(dest, 0xff, segment->size);
+ else
+ memcpy(dest, ptr, segment->size);
- ret = q6v5_clk_enable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable reset clocks\n");
- goto disable_vdd;
- }
+ qproc->dump_segment_mask |= mask;
- ret = q6v5_reset_deassert(qproc);
- if (ret) {
- dev_err(qproc->dev, "failed to deassert mss restart\n");
- goto disable_reset_clks;
- }
-
- ret = q6v5_clk_enable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
- if (ret) {
- dev_err(qproc->dev, "failed to enable clocks\n");
- goto assert_reset;
- }
-
- /* Assign MBA image access in DDR to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
- qproc->mba_phys, qproc->mba_size);
- if (ret) {
- dev_err(qproc->dev,
- "assigning Q6 access to mba memory failed: %d\n", ret);
- goto disable_active_clks;
+ /* Reclaim mba after copying segments */
+ if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
+ if (qproc->dump_mba_loaded)
+ q6v5_mba_reclaim(qproc);
}
+}
- writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+static int q6v5_start(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int xfermemop_ret;
+ int ret;
- ret = q6v5proc_reset(qproc);
+ ret = q6v5_mba_load(qproc);
if (ret)
- goto reclaim_mba;
-
- ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
- if (ret == -ETIMEDOUT) {
- dev_err(qproc->dev, "MBA boot timed out\n");
- goto halt_axi_ports;
- } else if (ret != RMB_MBA_XPU_UNLOCKED &&
- ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
- dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
- ret = -EINVAL;
- goto halt_axi_ports;
- }
+ return ret;
dev_info(qproc->dev, "MBA booted, loading mpss\n");
if (xfermemop_ret)
dev_err(qproc->dev,
"Failed to reclaim mba buffer system may become unstable\n");
+
+ /* Reset Dump Segment Mask */
+ qproc->dump_segment_mask = 0;
qproc->running = true;
return 0;
false, qproc->mpss_phys,
qproc->mpss_size);
WARN_ON(xfermemop_ret);
-
-halt_axi_ports:
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
-reclaim_mba:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
- qproc->mba_size);
- if (xfermemop_ret) {
- dev_err(qproc->dev,
- "Failed to reclaim mba buffer, system may become unstable\n");
- }
-
-disable_active_clks:
- q6v5_clk_disable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
-
-assert_reset:
- q6v5_reset_assert(qproc);
-disable_reset_clks:
- q6v5_clk_disable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
-disable_vdd:
- q6v5_regulator_disable(qproc, qproc->active_regs,
- qproc->active_reg_count);
-disable_proxy_clk:
- q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
-disable_proxy_reg:
- q6v5_regulator_disable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
-
-disable_irqs:
- qcom_q6v5_unprepare(&qproc->q6v5);
+ q6v5_mba_reclaim(qproc);
return ret;
}
{
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- u32 val;
qproc->running = false;
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
- q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
- if (qproc->version == MSS_MSM8996) {
- /*
- * To avoid high MX current during LPASS/MSS restart.
- */
- val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- val |= Q6SS_CLAMP_IO | QDSP6v56_CLAMP_WL |
- QDSP6v56_CLAMP_QMC_MEM;
- writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
- }
-
-
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false,
- qproc->mpss_phys, qproc->mpss_size);
- WARN_ON(ret);
-
- q6v5_reset_assert(qproc);
-
- ret = qcom_q6v5_unprepare(&qproc->q6v5);
- if (ret) {
- q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
- qproc->proxy_clk_count);
- q6v5_regulator_disable(qproc, qproc->proxy_regs,
- qproc->proxy_reg_count);
- }
-
- q6v5_clk_disable(qproc->dev, qproc->reset_clks,
- qproc->reset_clk_count);
- q6v5_clk_disable(qproc->dev, qproc->active_clks,
- qproc->active_clk_count);
- q6v5_regulator_disable(qproc, qproc->active_regs,
- qproc->active_reg_count);
+ q6v5_mba_reclaim(qproc);
return 0;
}
return qproc->mpss_region + offset;
}
+static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
+ const struct firmware *mba_fw)
+{
+ const struct firmware *fw;
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ struct q6v5 *qproc = rproc->priv;
+ unsigned long i;
+ int ret;
+
+ ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load modem.mdt\n");
+ return ret;
+ }
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ qproc->dump_complete_mask = 0;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (!q6v5_phdr_valid(phdr))
+ continue;
+
+ ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
+ phdr->p_memsz,
+ qcom_q6v5_dump_segment,
+ (void *)i);
+ if (ret)
+ break;
+
+ qproc->dump_complete_mask |= BIT(i);
+ }
+
+ release_firmware(fw);
+ return ret;
+}
+
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
.da_to_va = q6v5_da_to_va,
+ .parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
};
if (!desc)
return -EINVAL;
+ if (desc->need_mem_protection && !qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
desc->hexagon_mba_image, sizeof(*qproc));
if (!rproc) {