staging: tidspbridge: replace CONST with c standard const
[sfrench/cifs-2.6.git] / drivers / staging / tidspbridge / core / tiomap3430.c
1 /*
2  * tiomap.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * Processor Manager Driver for TI OMAP3430 EVM.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 #include <linux/types.h>
20 /*  ----------------------------------- Host OS */
21 #include <dspbridge/host_os.h>
22 #include <linux/mm.h>
23 #include <linux/mmzone.h>
24 #include <plat/control.h>
25
26 /*  ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
28
29 /*  ----------------------------------- Trace & Debug */
30 #include <dspbridge/dbc.h>
31
32 /*  ----------------------------------- OS Adaptation Layer */
33 #include <dspbridge/cfg.h>
34 #include <dspbridge/drv.h>
35 #include <dspbridge/sync.h>
36
37 /* ------------------------------------ Hardware Abstraction Layer */
38 #include <hw_defs.h>
39 #include <hw_mmu.h>
40
41 /*  ----------------------------------- Link Driver */
42 #include <dspbridge/dspdefs.h>
43 #include <dspbridge/dspchnl.h>
44 #include <dspbridge/dspdeh.h>
45 #include <dspbridge/dspio.h>
46 #include <dspbridge/dspmsg.h>
47 #include <dspbridge/pwr.h>
48 #include <dspbridge/io_sm.h>
49
50 /*  ----------------------------------- Platform Manager */
51 #include <dspbridge/dev.h>
52 #include <dspbridge/dspapi.h>
53 #include <dspbridge/dmm.h>
54 #include <dspbridge/wdt.h>
55
56 /*  ----------------------------------- Local */
57 #include "_tiomap.h"
58 #include "_tiomap_pwr.h"
59 #include "tiomap_io.h"
60
61 /* Offset in shared mem to write to in order to synchronize start with DSP */
62 #define SHMSYNCOFFSET 4         /* GPP byte offset */
63
64 #define BUFFERSIZE 1024
65
66 #define TIHELEN_ACKTIMEOUT  10000
67
68 #define MMU_SECTION_ADDR_MASK    0xFFF00000
69 #define MMU_SSECTION_ADDR_MASK   0xFF000000
70 #define MMU_LARGE_PAGE_MASK      0xFFFF0000
71 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
72 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
73 #define PAGES_II_LVL_TABLE   512
74 #define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
75
76 /* Forward Declarations: */
77 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
78 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
79                                   OUT u8 *host_buff,
80                                   u32 dsp_addr, u32 ul_num_bytes,
81                                   u32 mem_type);
82 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
83                                    u32 dsp_addr);
84 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
85                                     int *board_state);
86 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
87 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
88                                    IN u8 *host_buff,
89                                    u32 dsp_addr, u32 ul_num_bytes,
90                                    u32 mem_type);
91 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
92                                     u32 brd_state);
93 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
94                                    u32 dsp_dest_addr, u32 dsp_src_addr,
95                                    u32 ul_num_bytes, u32 mem_type);
96 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
97                                     IN u8 *host_buff, u32 dsp_addr,
98                                     u32 ul_num_bytes, u32 mem_type);
99 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
100                                   u32 ul_mpu_addr, u32 virt_addr,
101                                   u32 ul_num_bytes, u32 ul_map_attr,
102                                   struct page **mapped_pages);
103 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
104                                      u32 virt_addr, u32 ul_num_bytes);
105 static int bridge_dev_create(OUT struct bridge_dev_context
106                                         **dev_cntxt,
107                                         struct dev_object *hdev_obj,
108                                         IN struct cfg_hostres *config_param);
109 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
110                                   u32 dw_cmd, IN OUT void *pargs);
111 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
112 static u32 user_va2_pa(struct mm_struct *mm, u32 address);
113 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
114                              u32 va, u32 size,
115                              struct hw_mmu_map_attrs_t *map_attrs);
116 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
117                           u32 size, struct hw_mmu_map_attrs_t *attrs);
118 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
119                                   u32 ul_mpu_addr, u32 virt_addr,
120                                   u32 ul_num_bytes,
121                                   struct hw_mmu_map_attrs_t *hw_attrs);
122
123 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
124
125 /*  ----------------------------------- Globals */
126
127 /* Attributes of L2 page tables for DSP MMU */
128 struct page_info {
129         u32 num_entries;        /* Number of valid PTEs in the L2 PT */
130 };
131
132 /* Attributes used to manage the DSP MMU page tables */
133 struct pg_table_attrs {
134         spinlock_t pg_lock;     /* Critical section object handle */
135
136         u32 l1_base_pa;         /* Physical address of the L1 PT */
137         u32 l1_base_va;         /* Virtual  address of the L1 PT */
138         u32 l1_size;            /* Size of the L1 PT */
139         u32 l1_tbl_alloc_pa;
140         /* Physical address of Allocated mem for L1 table. May not be aligned */
141         u32 l1_tbl_alloc_va;
142         /* Virtual address of Allocated mem for L1 table. May not be aligned */
143         u32 l1_tbl_alloc_sz;
144         /* Size of consistent memory allocated for L1 table.
145          * May not be aligned */
146
147         u32 l2_base_pa;         /* Physical address of the L2 PT */
148         u32 l2_base_va;         /* Virtual  address of the L2 PT */
149         u32 l2_size;            /* Size of the L2 PT */
150         u32 l2_tbl_alloc_pa;
151         /* Physical address of Allocated mem for L2 table. May not be aligned */
152         u32 l2_tbl_alloc_va;
153         /* Virtual address of Allocated mem for L2 table. May not be aligned */
154         u32 l2_tbl_alloc_sz;
155         /* Size of consistent memory allocated for L2 table.
156          * May not be aligned */
157
158         u32 l2_num_pages;       /* Number of allocated L2 PT */
159         /* Array [l2_num_pages] of L2 PT info structs */
160         struct page_info *pg_info;
161 };
162
163 /*
164  *  This Bridge driver's function interface table.
165  */
166 static struct bridge_drv_interface drv_interface_fxns = {
167         /* Bridge API ver. for which this bridge driver is built. */
168         BRD_API_MAJOR_VERSION,
169         BRD_API_MINOR_VERSION,
170         bridge_dev_create,
171         bridge_dev_destroy,
172         bridge_dev_ctrl,
173         bridge_brd_monitor,
174         bridge_brd_start,
175         bridge_brd_stop,
176         bridge_brd_status,
177         bridge_brd_read,
178         bridge_brd_write,
179         bridge_brd_set_state,
180         bridge_brd_mem_copy,
181         bridge_brd_mem_write,
182         bridge_brd_mem_map,
183         bridge_brd_mem_un_map,
184         /* The following CHNL functions are provided by chnl_io.lib: */
185         bridge_chnl_create,
186         bridge_chnl_destroy,
187         bridge_chnl_open,
188         bridge_chnl_close,
189         bridge_chnl_add_io_req,
190         bridge_chnl_get_ioc,
191         bridge_chnl_cancel_io,
192         bridge_chnl_flush_io,
193         bridge_chnl_get_info,
194         bridge_chnl_get_mgr_info,
195         bridge_chnl_idle,
196         bridge_chnl_register_notify,
197         /* The following IO functions are provided by chnl_io.lib: */
198         bridge_io_create,
199         bridge_io_destroy,
200         bridge_io_on_loaded,
201         bridge_io_get_proc_load,
202         /* The following msg_ctrl functions are provided by chnl_io.lib: */
203         bridge_msg_create,
204         bridge_msg_create_queue,
205         bridge_msg_delete,
206         bridge_msg_delete_queue,
207         bridge_msg_get,
208         bridge_msg_put,
209         bridge_msg_register_notify,
210         bridge_msg_set_queue_id,
211 };
212
213 static inline void flush_all(struct bridge_dev_context *dev_context)
214 {
215         if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
216             dev_context->dw_brd_state == BRD_HIBERNATION)
217                 wake_dsp(dev_context, NULL);
218
219         hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
220 }
221
222 static void bad_page_dump(u32 pa, struct page *pg)
223 {
224         pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
225         pr_emerg("Bad page state in process '%s'\n"
226                  "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
227                  "Backtrace:\n",
228                  current->comm, pg, (int)(2 * sizeof(unsigned long)),
229                  (unsigned long)pg->flags, pg->mapping,
230                  page_mapcount(pg), page_count(pg));
231         dump_stack();
232 }
233
234 /*
235  *  ======== bridge_drv_entry ========
236  *  purpose:
237  *      Bridge Driver entry point.
238  */
239 void bridge_drv_entry(OUT struct bridge_drv_interface **drv_intf,
240                    IN const char *driver_file_name)
241 {
242
243         DBC_REQUIRE(driver_file_name != NULL);
244
245         io_sm_init();           /* Initialization of io_sm module */
246
247         if (strcmp(driver_file_name, "UMA") == 0)
248                 *drv_intf = &drv_interface_fxns;
249         else
250                 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
251
252 }
253
254 /*
255  *  ======== bridge_brd_monitor ========
256  *  purpose:
257  *      This bridge_brd_monitor puts DSP into a Loadable state.
258  *      i.e Application can load and start the device.
259  *
260  *  Preconditions:
261  *      Device in 'OFF' state.
262  */
263 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
264 {
265         int status = 0;
266         struct bridge_dev_context *dev_context = dev_ctxt;
267         u32 temp;
268         struct dspbridge_platform_data *pdata =
269                                     omap_dspbridge_dev->dev.platform_data;
270
271         temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
272                                         OMAP_POWERSTATEST_MASK;
273         if (!(temp & 0x02)) {
274                 /* IVA2 is not in ON state */
275                 /* Read and set PM_PWSTCTRL_IVA2  to ON */
276                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
277                         PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
278                 /* Set the SW supervised state transition */
279                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
280                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
281
282                 /* Wait until the state has moved to ON */
283                 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
284                                                 OMAP_INTRANSITION_MASK)
285                         ;
286                 /* Disable Automatic transition */
287                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
288                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
289         }
290         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
291                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
292         dsp_clk_enable(DSP_CLK_IVA2);
293
294         if (DSP_SUCCEEDED(status)) {
295                 /* set the device state to IDLE */
296                 dev_context->dw_brd_state = BRD_IDLE;
297         }
298         return status;
299 }
300
301 /*
302  *  ======== bridge_brd_read ========
303  *  purpose:
304  *      Reads buffers for DSP memory.
305  */
306 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
307                                   OUT u8 *host_buff, u32 dsp_addr,
308                                   u32 ul_num_bytes, u32 mem_type)
309 {
310         int status = 0;
311         struct bridge_dev_context *dev_context = dev_ctxt;
312         u32 offset;
313         u32 dsp_base_addr = dev_ctxt->dw_dsp_base_addr;
314
315         if (dsp_addr < dev_context->dw_dsp_start_add) {
316                 status = -EPERM;
317                 return status;
318         }
319         /* change here to account for the 3 bands of the DSP internal memory */
320         if ((dsp_addr - dev_context->dw_dsp_start_add) <
321             dev_context->dw_internal_size) {
322                 offset = dsp_addr - dev_context->dw_dsp_start_add;
323         } else {
324                 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
325                                            ul_num_bytes, mem_type);
326                 return status;
327         }
328         /* copy the data from  DSP memory, */
329         memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
330         return status;
331 }
332
333 /*
334  *  ======== bridge_brd_set_state ========
335  *  purpose:
336  *      This routine updates the Board status.
337  */
338 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
339                                     u32 brd_state)
340 {
341         int status = 0;
342         struct bridge_dev_context *dev_context = dev_ctxt;
343
344         dev_context->dw_brd_state = brd_state;
345         return status;
346 }
347
348 /*
349  *  ======== bridge_brd_start ========
350  *  purpose:
351  *      Initializes DSP MMU and Starts DSP.
352  *
353  *  Preconditions:
354  *  a) DSP domain is 'ACTIVE'.
355  *  b) DSP_RST1 is asserted.
356  *  b) DSP_RST2 is released.
357  */
358 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
359                                    u32 dsp_addr)
360 {
361         int status = 0;
362         struct bridge_dev_context *dev_context = dev_ctxt;
363         u32 dw_sync_addr = 0;
364         u32 ul_shm_base;        /* Gpp Phys SM base addr(byte) */
365         u32 ul_shm_base_virt;   /* Dsp Virt SM base addr */
366         u32 ul_tlb_base_virt;   /* Base of MMU TLB entry */
367         /* Offset of shm_base_virt from tlb_base_virt */
368         u32 ul_shm_offset_virt;
369         s32 entry_ndx;
370         s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
371         struct cfg_hostres *resources = NULL;
372         u32 temp;
373         u32 ul_dsp_clk_rate;
374         u32 ul_dsp_clk_addr;
375         u32 ul_bios_gp_timer;
376         u32 clk_cmd;
377         struct io_mgr *hio_mgr;
378         u32 ul_load_monitor_timer;
379         struct dspbridge_platform_data *pdata =
380                                 omap_dspbridge_dev->dev.platform_data;
381
382         /* The device context contains all the mmu setup info from when the
383          * last dsp base image was loaded. The first entry is always
384          * SHMMEM base. */
385         /* Get SHM_BEG - convert to byte address */
386         (void)dev_get_symbol(dev_context->hdev_obj, SHMBASENAME,
387                              &ul_shm_base_virt);
388         ul_shm_base_virt *= DSPWORDSIZE;
389         DBC_ASSERT(ul_shm_base_virt != 0);
390         /* DSP Virtual address */
391         ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
392         DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
393         ul_shm_offset_virt =
394             ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
395         /* Kernel logical address */
396         ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
397
398         DBC_ASSERT(ul_shm_base != 0);
399         /* 2nd wd is used as sync field */
400         dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
401         /* Write a signature into the shm base + offset; this will
402          * get cleared when the DSP program starts. */
403         if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
404                 pr_err("%s: Illegal SM base\n", __func__);
405                 status = -EPERM;
406         } else
407                 *((volatile u32 *)dw_sync_addr) = 0xffffffff;
408
409         if (DSP_SUCCEEDED(status)) {
410                 resources = dev_context->resources;
411                 if (!resources)
412                         status = -EPERM;
413
414                 /* Assert RST1 i.e only the RST only for DSP megacell */
415                 if (DSP_SUCCEEDED(status)) {
416                         (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
417                                         OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
418                                         OMAP2_RM_RSTCTRL);
419                         /* Mask address with 1K for compatibility */
420                         __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
421                                         OMAP343X_CTRL_REGADDR(
422                                         OMAP343X_CONTROL_IVA2_BOOTADDR));
423                         /*
424                          * Set bootmode to self loop if dsp_debug flag is true
425                          */
426                         __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
427                                         OMAP343X_CTRL_REGADDR(
428                                         OMAP343X_CONTROL_IVA2_BOOTMOD));
429                 }
430         }
431         if (DSP_SUCCEEDED(status)) {
432                 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
433                  * IVA2 SYSC register */
434                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
435                         OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
436                 udelay(100);
437                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
438                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
439                 udelay(100);
440
441                 /* Disbale the DSP MMU */
442                 hw_mmu_disable(resources->dw_dmmu_base);
443                 /* Disable TWL */
444                 hw_mmu_twl_disable(resources->dw_dmmu_base);
445
446                 /* Only make TLB entry if both addresses are non-zero */
447                 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
448                      entry_ndx++) {
449                         struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
450                         struct hw_mmu_map_attrs_t map_attrs = {
451                                 .endianism = e->endianism,
452                                 .element_size = e->elem_size,
453                                 .mixed_size = e->mixed_mode,
454                         };
455
456                         if (!e->ul_gpp_pa || !e->ul_dsp_va)
457                                 continue;
458
459                         dev_dbg(bridge,
460                                         "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
461                                         itmp_entry_ndx,
462                                         e->ul_gpp_pa,
463                                         e->ul_dsp_va,
464                                         e->ul_size);
465
466                         hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
467                                         e->ul_gpp_pa,
468                                         e->ul_dsp_va,
469                                         e->ul_size,
470                                         itmp_entry_ndx,
471                                         &map_attrs, 1, 1);
472
473                         itmp_entry_ndx++;
474                 }
475         }
476
477         /* Lock the above TLB entries and get the BIOS and load monitor timer
478          * information */
479         if (DSP_SUCCEEDED(status)) {
480                 hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
481                 hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
482                 hw_mmu_ttb_set(resources->dw_dmmu_base,
483                                dev_context->pt_attrs->l1_base_pa);
484                 hw_mmu_twl_enable(resources->dw_dmmu_base);
485                 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
486
487                 temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
488                 temp = (temp & 0xFFFFFFEF) | 0x11;
489                 __raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
490
491                 /* Let the DSP MMU run */
492                 hw_mmu_enable(resources->dw_dmmu_base);
493
494                 /* Enable the BIOS clock */
495                 (void)dev_get_symbol(dev_context->hdev_obj,
496                                      BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
497                 (void)dev_get_symbol(dev_context->hdev_obj,
498                                      BRIDGEINIT_LOADMON_GPTIMER,
499                                      &ul_load_monitor_timer);
500         }
501
502         if (DSP_SUCCEEDED(status)) {
503                 if (ul_load_monitor_timer != 0xFFFF) {
504                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
505                             ul_load_monitor_timer;
506                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
507                 } else {
508                         dev_dbg(bridge, "Not able to get the symbol for Load "
509                                 "Monitor Timer\n");
510                 }
511         }
512
513         if (DSP_SUCCEEDED(status)) {
514                 if (ul_bios_gp_timer != 0xFFFF) {
515                         clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
516                             ul_bios_gp_timer;
517                         dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
518                 } else {
519                         dev_dbg(bridge,
520                                 "Not able to get the symbol for BIOS Timer\n");
521                 }
522         }
523
524         if (DSP_SUCCEEDED(status)) {
525                 /* Set the DSP clock rate */
526                 (void)dev_get_symbol(dev_context->hdev_obj,
527                                      "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
528                 /*Set Autoidle Mode for IVA2 PLL */
529                 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
530                                 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
531
532                 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
533                         /* Get the clock rate */
534                         ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
535                         dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
536                                 __func__, ul_dsp_clk_rate);
537                         (void)bridge_brd_write(dev_context,
538                                                (u8 *) &ul_dsp_clk_rate,
539                                                ul_dsp_clk_addr, sizeof(u32), 0);
540                 }
541                 /*
542                  * Enable Mailbox events and also drain any pending
543                  * stale messages.
544                  */
545                 dev_context->mbox = omap_mbox_get("dsp");
546                 if (IS_ERR(dev_context->mbox)) {
547                         dev_context->mbox = NULL;
548                         pr_err("%s: Failed to get dsp mailbox handle\n",
549                                                                 __func__);
550                         status = -EPERM;
551                 }
552
553         }
554         if (DSP_SUCCEEDED(status)) {
555                 dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg;
556
557 /*PM_IVA2GRPSEL_PER = 0xC0;*/
558                 temp = readl(resources->dw_per_pm_base + 0xA8);
559                 temp = (temp & 0xFFFFFF30) | 0xC0;
560                 writel(temp, resources->dw_per_pm_base + 0xA8);
561
562 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
563                 temp = readl(resources->dw_per_pm_base + 0xA4);
564                 temp = (temp & 0xFFFFFF3F);
565                 writel(temp, resources->dw_per_pm_base + 0xA4);
566 /*CM_SLEEPDEP_PER |= 0x04; */
567                 temp = readl(resources->dw_per_base + 0x44);
568                 temp = (temp & 0xFFFFFFFB) | 0x04;
569                 writel(temp, resources->dw_per_base + 0x44);
570
571 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
572                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
573                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
574
575                 /* Let DSP go */
576                 dev_dbg(bridge, "%s Unreset\n", __func__);
577                 /* Enable DSP MMU Interrupts */
578                 hw_mmu_event_enable(resources->dw_dmmu_base,
579                                     HW_MMU_ALL_INTERRUPTS);
580                 /* release the RST1, DSP starts executing now .. */
581                 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
582                                         OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
583
584                 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
585                 dev_dbg(bridge, "DSP c_int00 Address =  0x%x\n", dsp_addr);
586                 if (dsp_debug)
587                         while (*((volatile u16 *)dw_sync_addr))
588                                 ;;
589
590                 /* Wait for DSP to clear word in shared memory */
591                 /* Read the Location */
592                 if (!wait_for_start(dev_context, dw_sync_addr))
593                         status = -ETIMEDOUT;
594
595                 /* Start wdt */
596                 dsp_wdt_sm_set((void *)ul_shm_base);
597                 dsp_wdt_enable(true);
598
599                 status = dev_get_io_mgr(dev_context->hdev_obj, &hio_mgr);
600                 if (hio_mgr) {
601                         io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
602                         /* Write the synchronization bit to indicate the
603                          * completion of OPP table update to DSP
604                          */
605                         *((volatile u32 *)dw_sync_addr) = 0XCAFECAFE;
606
607                         /* update board state */
608                         dev_context->dw_brd_state = BRD_RUNNING;
609                         /* (void)chnlsm_enable_interrupt(dev_context); */
610                 } else {
611                         dev_context->dw_brd_state = BRD_UNKNOWN;
612                 }
613         }
614         return status;
615 }
616
617 /*
618  *  ======== bridge_brd_stop ========
619  *  purpose:
620  *      Puts DSP in self loop.
621  *
622  *  Preconditions :
623  *  a) None
624  */
625 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
626 {
627         int status = 0;
628         struct bridge_dev_context *dev_context = dev_ctxt;
629         struct pg_table_attrs *pt_attrs;
630         u32 dsp_pwr_state;
631         int clk_status;
632         struct dspbridge_platform_data *pdata =
633                                 omap_dspbridge_dev->dev.platform_data;
634
635         if (dev_context->dw_brd_state == BRD_STOPPED)
636                 return status;
637
638         /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
639          * before turning off the clocks.. This is to ensure that there are no
640          * pending L3 or other transactons from IVA2 */
641         dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
642                                         OMAP_POWERSTATEST_MASK;
643         if (dsp_pwr_state != PWRDM_POWER_OFF) {
644                 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
645                 mdelay(10);
646
647                 clk_status = dsp_clk_disable(DSP_CLK_IVA2);
648
649                 /* IVA2 is not in OFF state */
650                 /* Set PM_PWSTCTRL_IVA2  to OFF */
651                 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
652                         PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
653                 /* Set the SW supervised state transition for Sleep */
654                 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
655                                         OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
656         } else {
657                 clk_status = dsp_clk_disable(DSP_CLK_IVA2);
658         }
659         udelay(10);
660         /* Release the Ext Base virtual Address as the next DSP Program
661          * may have a different load address */
662         if (dev_context->dw_dsp_ext_base_addr)
663                 dev_context->dw_dsp_ext_base_addr = 0;
664
665         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
666
667         dsp_wdt_enable(false);
668
669         /* This is a good place to clear the MMU page tables as well */
670         if (dev_context->pt_attrs) {
671                 pt_attrs = dev_context->pt_attrs;
672                 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
673                 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
674                 memset((u8 *) pt_attrs->pg_info, 0x00,
675                        (pt_attrs->l2_num_pages * sizeof(struct page_info)));
676         }
677         /* Disable the mailbox interrupts */
678         if (dev_context->mbox) {
679                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
680                 omap_mbox_put(dev_context->mbox);
681                 dev_context->mbox = NULL;
682         }
683         /* Reset IVA2 clocks*/
684         (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
685                         OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
686
687         return status;
688 }
689
690 /*
691  *  ======== bridge_brd_delete ========
692  *  purpose:
693  *      Puts DSP in Low power mode
694  *
695  *  Preconditions :
696  *  a) None
697  */
698 static int bridge_brd_delete(struct bridge_dev_context *dev_ctxt)
699 {
700         int status = 0;
701         struct bridge_dev_context *dev_context = dev_ctxt;
702         struct pg_table_attrs *pt_attrs;
703         int clk_status;
704         struct dspbridge_platform_data *pdata =
705                                 omap_dspbridge_dev->dev.platform_data;
706
707         if (dev_context->dw_brd_state == BRD_STOPPED)
708                 return status;
709
710         /* as per TRM, it is advised to first drive
711          * the IVA2 to 'Standby' mode, before turning off the clocks.. This is
712          * to ensure that there are no pending L3 or other transactons from
713          * IVA2 */
714         status = sleep_dsp(dev_context, PWR_EMERGENCYDEEPSLEEP, NULL);
715         clk_status = dsp_clk_disable(DSP_CLK_IVA2);
716
717         /* Release the Ext Base virtual Address as the next DSP Program
718          * may have a different load address */
719         if (dev_context->dw_dsp_ext_base_addr)
720                 dev_context->dw_dsp_ext_base_addr = 0;
721
722         dev_context->dw_brd_state = BRD_STOPPED;        /* update board state */
723
724         /* This is a good place to clear the MMU page tables as well */
725         if (dev_context->pt_attrs) {
726                 pt_attrs = dev_context->pt_attrs;
727                 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
728                 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
729                 memset((u8 *) pt_attrs->pg_info, 0x00,
730                        (pt_attrs->l2_num_pages * sizeof(struct page_info)));
731         }
732         /* Disable the mail box interrupts */
733         if (dev_context->mbox) {
734                 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
735                 omap_mbox_put(dev_context->mbox);
736                 dev_context->mbox = NULL;
737         }
738         /* Reset IVA2 clocks*/
739         (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
740                         OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
741
742         return status;
743 }
744
745 /*
746  *  ======== bridge_brd_status ========
747  *      Returns the board status.
748  */
749 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
750                                     int *board_state)
751 {
752         struct bridge_dev_context *dev_context = dev_ctxt;
753         *board_state = dev_context->dw_brd_state;
754         return 0;
755 }
756
757 /*
758  *  ======== bridge_brd_write ========
759  *      Copies the buffers to DSP internal or external memory.
760  */
761 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
762                                    IN u8 *host_buff, u32 dsp_addr,
763                                    u32 ul_num_bytes, u32 mem_type)
764 {
765         int status = 0;
766         struct bridge_dev_context *dev_context = dev_ctxt;
767
768         if (dsp_addr < dev_context->dw_dsp_start_add) {
769                 status = -EPERM;
770                 return status;
771         }
772         if ((dsp_addr - dev_context->dw_dsp_start_add) <
773             dev_context->dw_internal_size) {
774                 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
775                                         ul_num_bytes, mem_type);
776         } else {
777                 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
778                                             ul_num_bytes, mem_type, false);
779         }
780
781         return status;
782 }
783
784 /*
785  *  ======== bridge_dev_create ========
786  *      Creates a driver object. Puts DSP in self loop.
787  */
788 static int bridge_dev_create(OUT struct bridge_dev_context
789                                         **dev_cntxt,
790                                         struct dev_object *hdev_obj,
791                                         IN struct cfg_hostres *config_param)
792 {
793         int status = 0;
794         struct bridge_dev_context *dev_context = NULL;
795         s32 entry_ndx;
796         struct cfg_hostres *resources = config_param;
797         struct pg_table_attrs *pt_attrs;
798         u32 pg_tbl_pa;
799         u32 pg_tbl_va;
800         u32 align_size;
801         struct drv_data *drv_datap = dev_get_drvdata(bridge);
802
803         /* Allocate and initialize a data structure to contain the bridge driver
804          *  state, which becomes the context for later calls into this driver */
805         dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
806         if (!dev_context) {
807                 status = -ENOMEM;
808                 goto func_end;
809         }
810
811         dev_context->dw_dsp_start_add = (u32) OMAP_GEM_BASE;
812         dev_context->dw_self_loop = (u32) NULL;
813         dev_context->dsp_per_clks = 0;
814         dev_context->dw_internal_size = OMAP_DSP_SIZE;
815         /*  Clear dev context MMU table entries.
816          *  These get set on bridge_io_on_loaded() call after program loaded. */
817         for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
818                 dev_context->atlb_entry[entry_ndx].ul_gpp_pa =
819                     dev_context->atlb_entry[entry_ndx].ul_dsp_va = 0;
820         }
821         dev_context->dw_dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
822                                                                  (config_param->
823                                                                   dw_mem_base
824                                                                   [3]),
825                                                                  config_param->
826                                                                  dw_mem_length
827                                                                  [3]);
828         if (!dev_context->dw_dsp_base_addr)
829                 status = -EPERM;
830
831         pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
832         if (pt_attrs != NULL) {
833                 /* Assuming that we use only DSP's memory map
834                  * until 0x4000:0000 , we would need only 1024
835                  * L1 enties i.e L1 size = 4K */
836                 pt_attrs->l1_size = 0x1000;
837                 align_size = pt_attrs->l1_size;
838                 /* Align sizes are expected to be power of 2 */
839                 /* we like to get aligned on L1 table size */
840                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
841                                                      align_size, &pg_tbl_pa);
842
843                 /* Check if the PA is aligned for us */
844                 if ((pg_tbl_pa) & (align_size - 1)) {
845                         /* PA not aligned to page table size ,
846                          * try with more allocation and align */
847                         mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
848                                           pt_attrs->l1_size);
849                         /* we like to get aligned on L1 table size */
850                         pg_tbl_va =
851                             (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
852                                                      align_size, &pg_tbl_pa);
853                         /* We should be able to get aligned table now */
854                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
855                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
856                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
857                         /* Align the PA to the next 'align'  boundary */
858                         pt_attrs->l1_base_pa =
859                             ((pg_tbl_pa) +
860                              (align_size - 1)) & (~(align_size - 1));
861                         pt_attrs->l1_base_va =
862                             pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
863                 } else {
864                         /* We got aligned PA, cool */
865                         pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
866                         pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
867                         pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
868                         pt_attrs->l1_base_pa = pg_tbl_pa;
869                         pt_attrs->l1_base_va = pg_tbl_va;
870                 }
871                 if (pt_attrs->l1_base_va)
872                         memset((u8 *) pt_attrs->l1_base_va, 0x00,
873                                pt_attrs->l1_size);
874
875                 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
876                  * L4 pages */
877                 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
878                 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
879                     pt_attrs->l2_num_pages;
880                 align_size = 4; /* Make it u32 aligned */
881                 /* we like to get aligned on L1 table size */
882                 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
883                                                      align_size, &pg_tbl_pa);
884                 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
885                 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
886                 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
887                 pt_attrs->l2_base_pa = pg_tbl_pa;
888                 pt_attrs->l2_base_va = pg_tbl_va;
889
890                 if (pt_attrs->l2_base_va)
891                         memset((u8 *) pt_attrs->l2_base_va, 0x00,
892                                pt_attrs->l2_size);
893
894                 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
895                                         sizeof(struct page_info), GFP_KERNEL);
896                 dev_dbg(bridge,
897                         "L1 pa %x, va %x, size %x\n L2 pa %x, va "
898                         "%x, size %x\n", pt_attrs->l1_base_pa,
899                         pt_attrs->l1_base_va, pt_attrs->l1_size,
900                         pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
901                         pt_attrs->l2_size);
902                 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
903                         pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
904         }
905         if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
906             (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
907                 dev_context->pt_attrs = pt_attrs;
908         else
909                 status = -ENOMEM;
910
911         if (DSP_SUCCEEDED(status)) {
912                 spin_lock_init(&pt_attrs->pg_lock);
913                 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
914
915                 /* Set the Clock Divisor for the DSP module */
916                 udelay(5);
917                 /* MMU address is obtained from the host
918                  * resources struct */
919                 dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
920         }
921         if (DSP_SUCCEEDED(status)) {
922                 dev_context->hdev_obj = hdev_obj;
923                 /* Store current board state. */
924                 dev_context->dw_brd_state = BRD_STOPPED;
925                 dev_context->resources = resources;
926                 /* Return ptr to our device state to the DSP API for storage */
927                 *dev_cntxt = dev_context;
928         } else {
929                 if (pt_attrs != NULL) {
930                         kfree(pt_attrs->pg_info);
931
932                         if (pt_attrs->l2_tbl_alloc_va) {
933                                 mem_free_phys_mem((void *)
934                                                   pt_attrs->l2_tbl_alloc_va,
935                                                   pt_attrs->l2_tbl_alloc_pa,
936                                                   pt_attrs->l2_tbl_alloc_sz);
937                         }
938                         if (pt_attrs->l1_tbl_alloc_va) {
939                                 mem_free_phys_mem((void *)
940                                                   pt_attrs->l1_tbl_alloc_va,
941                                                   pt_attrs->l1_tbl_alloc_pa,
942                                                   pt_attrs->l1_tbl_alloc_sz);
943                         }
944                 }
945                 kfree(pt_attrs);
946                 kfree(dev_context);
947         }
948 func_end:
949         return status;
950 }
951
952 /*
953  *  ======== bridge_dev_ctrl ========
954  *      Receives device specific commands.
955  */
956 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
957                                   u32 dw_cmd, IN OUT void *pargs)
958 {
959         int status = 0;
960         struct bridge_ioctl_extproc *pa_ext_proc =
961                                         (struct bridge_ioctl_extproc *)pargs;
962         s32 ndx;
963
964         switch (dw_cmd) {
965         case BRDIOCTL_CHNLREAD:
966                 break;
967         case BRDIOCTL_CHNLWRITE:
968                 break;
969         case BRDIOCTL_SETMMUCONFIG:
970                 /* store away dsp-mmu setup values for later use */
971                 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
972                         dev_context->atlb_entry[ndx] = *pa_ext_proc;
973                 break;
974         case BRDIOCTL_DEEPSLEEP:
975         case BRDIOCTL_EMERGENCYSLEEP:
976                 /* Currently only DSP Idle is supported Need to update for
977                  * later releases */
978                 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
979                 break;
980         case BRDIOCTL_WAKEUP:
981                 status = wake_dsp(dev_context, pargs);
982                 break;
983         case BRDIOCTL_CLK_CTRL:
984                 status = 0;
985                 /* Looking For Baseport Fix for Clocks */
986                 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
987                 break;
988         case BRDIOCTL_PWR_HIBERNATE:
989                 status = handle_hibernation_from_dsp(dev_context);
990                 break;
991         case BRDIOCTL_PRESCALE_NOTIFY:
992                 status = pre_scale_dsp(dev_context, pargs);
993                 break;
994         case BRDIOCTL_POSTSCALE_NOTIFY:
995                 status = post_scale_dsp(dev_context, pargs);
996                 break;
997         case BRDIOCTL_CONSTRAINT_REQUEST:
998                 status = handle_constraints_set(dev_context, pargs);
999                 break;
1000         default:
1001                 status = -EPERM;
1002                 break;
1003         }
1004         return status;
1005 }
1006
1007 /*
1008  *  ======== bridge_dev_destroy ========
1009  *      Destroys the driver object.
1010  */
1011 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
1012 {
1013         struct pg_table_attrs *pt_attrs;
1014         int status = 0;
1015         struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
1016             dev_ctxt;
1017         struct cfg_hostres *host_res;
1018         u32 shm_size;
1019         struct drv_data *drv_datap = dev_get_drvdata(bridge);
1020
1021         /* It should never happen */
1022         if (!dev_ctxt)
1023                 return -EFAULT;
1024
1025         /* first put the device to stop state */
1026         bridge_brd_delete(dev_context);
1027         if (dev_context->pt_attrs) {
1028                 pt_attrs = dev_context->pt_attrs;
1029                 kfree(pt_attrs->pg_info);
1030
1031                 if (pt_attrs->l2_tbl_alloc_va) {
1032                         mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
1033                                           pt_attrs->l2_tbl_alloc_pa,
1034                                           pt_attrs->l2_tbl_alloc_sz);
1035                 }
1036                 if (pt_attrs->l1_tbl_alloc_va) {
1037                         mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
1038                                           pt_attrs->l1_tbl_alloc_pa,
1039                                           pt_attrs->l1_tbl_alloc_sz);
1040                 }
1041                 kfree(pt_attrs);
1042
1043         }
1044
1045         if (dev_context->resources) {
1046                 host_res = dev_context->resources;
1047                 shm_size = drv_datap->shm_size;
1048                 if (shm_size >= 0x10000) {
1049                         if ((host_res->dw_mem_base[1]) &&
1050                             (host_res->dw_mem_phys[1])) {
1051                                 mem_free_phys_mem((void *)
1052                                                   host_res->dw_mem_base
1053                                                   [1],
1054                                                   host_res->dw_mem_phys
1055                                                   [1], shm_size);
1056                         }
1057                 } else {
1058                         dev_dbg(bridge, "%s: Error getting shm size "
1059                                 "from registry: %x. Not calling "
1060                                 "mem_free_phys_mem\n", __func__,
1061                                 status);
1062                 }
1063                 host_res->dw_mem_base[1] = 0;
1064                 host_res->dw_mem_phys[1] = 0;
1065
1066                 if (host_res->dw_mem_base[0])
1067                         iounmap((void *)host_res->dw_mem_base[0]);
1068                 if (host_res->dw_mem_base[2])
1069                         iounmap((void *)host_res->dw_mem_base[2]);
1070                 if (host_res->dw_mem_base[3])
1071                         iounmap((void *)host_res->dw_mem_base[3]);
1072                 if (host_res->dw_mem_base[4])
1073                         iounmap((void *)host_res->dw_mem_base[4]);
1074                 if (host_res->dw_dmmu_base)
1075                         iounmap(host_res->dw_dmmu_base);
1076                 if (host_res->dw_per_base)
1077                         iounmap(host_res->dw_per_base);
1078                 if (host_res->dw_per_pm_base)
1079                         iounmap((void *)host_res->dw_per_pm_base);
1080                 if (host_res->dw_core_pm_base)
1081                         iounmap((void *)host_res->dw_core_pm_base);
1082                 if (host_res->dw_sys_ctrl_base)
1083                         iounmap(host_res->dw_sys_ctrl_base);
1084
1085                 host_res->dw_mem_base[0] = (u32) NULL;
1086                 host_res->dw_mem_base[2] = (u32) NULL;
1087                 host_res->dw_mem_base[3] = (u32) NULL;
1088                 host_res->dw_mem_base[4] = (u32) NULL;
1089                 host_res->dw_dmmu_base = NULL;
1090                 host_res->dw_sys_ctrl_base = NULL;
1091
1092                 kfree(host_res);
1093         }
1094
1095         /* Free the driver's device context: */
1096         kfree(drv_datap->base_img);
1097         kfree(drv_datap);
1098         dev_set_drvdata(bridge, NULL);
1099         kfree((void *)dev_ctxt);
1100         return status;
1101 }
1102
1103 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1104                                    u32 dsp_dest_addr, u32 dsp_src_addr,
1105                                    u32 ul_num_bytes, u32 mem_type)
1106 {
1107         int status = 0;
1108         u32 src_addr = dsp_src_addr;
1109         u32 dest_addr = dsp_dest_addr;
1110         u32 copy_bytes = 0;
1111         u32 total_bytes = ul_num_bytes;
1112         u8 host_buf[BUFFERSIZE];
1113         struct bridge_dev_context *dev_context = dev_ctxt;
1114         while ((total_bytes > 0) && DSP_SUCCEEDED(status)) {
1115                 copy_bytes =
1116                     total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1117                 /* Read from External memory */
1118                 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1119                                            copy_bytes, mem_type);
1120                 if (DSP_SUCCEEDED(status)) {
1121                         if (dest_addr < (dev_context->dw_dsp_start_add +
1122                                          dev_context->dw_internal_size)) {
1123                                 /* Write to Internal memory */
1124                                 status = write_dsp_data(dev_ctxt, host_buf,
1125                                                         dest_addr, copy_bytes,
1126                                                         mem_type);
1127                         } else {
1128                                 /* Write to External memory */
1129                                 status =
1130                                     write_ext_dsp_data(dev_ctxt, host_buf,
1131                                                        dest_addr, copy_bytes,
1132                                                        mem_type, false);
1133                         }
1134                 }
1135                 total_bytes -= copy_bytes;
1136                 src_addr += copy_bytes;
1137                 dest_addr += copy_bytes;
1138         }
1139         return status;
1140 }
1141
1142 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1143 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1144                                     IN u8 *host_buff, u32 dsp_addr,
1145                                     u32 ul_num_bytes, u32 mem_type)
1146 {
1147         int status = 0;
1148         struct bridge_dev_context *dev_context = dev_ctxt;
1149         u32 ul_remain_bytes = 0;
1150         u32 ul_bytes = 0;
1151         ul_remain_bytes = ul_num_bytes;
1152         while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) {
1153                 ul_bytes =
1154                     ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1155                 if (dsp_addr < (dev_context->dw_dsp_start_add +
1156                                  dev_context->dw_internal_size)) {
1157                         status =
1158                             write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1159                                            ul_bytes, mem_type);
1160                 } else {
1161                         status = write_ext_dsp_data(dev_ctxt, host_buff,
1162                                                     dsp_addr, ul_bytes,
1163                                                     mem_type, true);
1164                 }
1165                 ul_remain_bytes -= ul_bytes;
1166                 dsp_addr += ul_bytes;
1167                 host_buff = host_buff + ul_bytes;
1168         }
1169         return status;
1170 }
1171
1172 /*
1173  *  ======== bridge_brd_mem_map ========
1174  *      This function maps MPU buffer to the DSP address space. It performs
1175  *  linear to physical address translation if required. It translates each
1176  *  page since linear addresses can be physically non-contiguous
1177  *  All address & size arguments are assumed to be page aligned (in proc.c)
1178  *
1179  *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1180  */
1181 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1182                                   u32 ul_mpu_addr, u32 virt_addr,
1183                                   u32 ul_num_bytes, u32 ul_map_attr,
1184                                   struct page **mapped_pages)
1185 {
1186         u32 attrs;
1187         int status = 0;
1188         struct bridge_dev_context *dev_context = dev_ctxt;
1189         struct hw_mmu_map_attrs_t hw_attrs;
1190         struct vm_area_struct *vma;
1191         struct mm_struct *mm = current->mm;
1192         u32 write = 0;
1193         u32 num_usr_pgs = 0;
1194         struct page *mapped_page, *pg;
1195         s32 pg_num;
1196         u32 va = virt_addr;
1197         struct task_struct *curr_task = current;
1198         u32 pg_i = 0;
1199         u32 mpu_addr, pa;
1200
1201         dev_dbg(bridge,
1202                 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1203                 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1204                 ul_map_attr);
1205         if (ul_num_bytes == 0)
1206                 return -EINVAL;
1207
1208         if (ul_map_attr & DSP_MAP_DIR_MASK) {
1209                 attrs = ul_map_attr;
1210         } else {
1211                 /* Assign default attributes */
1212                 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1213         }
1214         /* Take mapping properties */
1215         if (attrs & DSP_MAPBIGENDIAN)
1216                 hw_attrs.endianism = HW_BIG_ENDIAN;
1217         else
1218                 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1219
1220         hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1221             ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1222         /* Ignore element_size if mixed_size is enabled */
1223         if (hw_attrs.mixed_size == 0) {
1224                 if (attrs & DSP_MAPELEMSIZE8) {
1225                         /* Size is 8 bit */
1226                         hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1227                 } else if (attrs & DSP_MAPELEMSIZE16) {
1228                         /* Size is 16 bit */
1229                         hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1230                 } else if (attrs & DSP_MAPELEMSIZE32) {
1231                         /* Size is 32 bit */
1232                         hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1233                 } else if (attrs & DSP_MAPELEMSIZE64) {
1234                         /* Size is 64 bit */
1235                         hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1236                 } else {
1237                         /*
1238                          * Mixedsize isn't enabled, so size can't be
1239                          * zero here
1240                          */
1241                         return -EINVAL;
1242                 }
1243         }
1244         if (attrs & DSP_MAPDONOTLOCK)
1245                 hw_attrs.donotlockmpupage = 1;
1246         else
1247                 hw_attrs.donotlockmpupage = 0;
1248
1249         if (attrs & DSP_MAPVMALLOCADDR) {
1250                 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1251                                        ul_num_bytes, &hw_attrs);
1252         }
1253         /*
1254          * Do OS-specific user-va to pa translation.
1255          * Combine physically contiguous regions to reduce TLBs.
1256          * Pass the translated pa to pte_update.
1257          */
1258         if ((attrs & DSP_MAPPHYSICALADDR)) {
1259                 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1260                                     ul_num_bytes, &hw_attrs);
1261                 goto func_cont;
1262         }
1263
1264         /*
1265          * Important Note: ul_mpu_addr is mapped from user application process
1266          * to current process - it must lie completely within the current
1267          * virtual memory address space in order to be of use to us here!
1268          */
1269         down_read(&mm->mmap_sem);
1270         vma = find_vma(mm, ul_mpu_addr);
1271         if (vma)
1272                 dev_dbg(bridge,
1273                         "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1274                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1275                         ul_num_bytes, vma->vm_start, vma->vm_end,
1276                         vma->vm_flags);
1277
1278         /*
1279          * It is observed that under some circumstances, the user buffer is
1280          * spread across several VMAs. So loop through and check if the entire
1281          * user buffer is covered
1282          */
1283         while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1284                 /* jump to the next VMA region */
1285                 vma = find_vma(mm, vma->vm_end + 1);
1286                 dev_dbg(bridge,
1287                         "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1288                         "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1289                         ul_num_bytes, vma->vm_start, vma->vm_end,
1290                         vma->vm_flags);
1291         }
1292         if (!vma) {
1293                 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1294                        __func__, ul_mpu_addr, ul_num_bytes);
1295                 status = -EINVAL;
1296                 up_read(&mm->mmap_sem);
1297                 goto func_cont;
1298         }
1299
1300         if (vma->vm_flags & VM_IO) {
1301                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1302                 mpu_addr = ul_mpu_addr;
1303
1304                 /* Get the physical addresses for user buffer */
1305                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1306                         pa = user_va2_pa(mm, mpu_addr);
1307                         if (!pa) {
1308                                 status = -EPERM;
1309                                 pr_err("DSPBRIDGE: VM_IO mapping physical"
1310                                        "address is invalid\n");
1311                                 break;
1312                         }
1313                         if (pfn_valid(__phys_to_pfn(pa))) {
1314                                 pg = PHYS_TO_PAGE(pa);
1315                                 get_page(pg);
1316                                 if (page_count(pg) < 1) {
1317                                         pr_err("Bad page in VM_IO buffer\n");
1318                                         bad_page_dump(pa, pg);
1319                                 }
1320                         }
1321                         status = pte_set(dev_context->pt_attrs, pa,
1322                                          va, HW_PAGE_SIZE4KB, &hw_attrs);
1323                         if (DSP_FAILED(status))
1324                                 break;
1325
1326                         va += HW_PAGE_SIZE4KB;
1327                         mpu_addr += HW_PAGE_SIZE4KB;
1328                         pa += HW_PAGE_SIZE4KB;
1329                 }
1330         } else {
1331                 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1332                 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1333                         write = 1;
1334
1335                 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1336                         pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1337                                                 write, 1, &mapped_page, NULL);
1338                         if (pg_num > 0) {
1339                                 if (page_count(mapped_page) < 1) {
1340                                         pr_err("Bad page count after doing"
1341                                                "get_user_pages on"
1342                                                "user buffer\n");
1343                                         bad_page_dump(page_to_phys(mapped_page),
1344                                                       mapped_page);
1345                                 }
1346                                 status = pte_set(dev_context->pt_attrs,
1347                                                  page_to_phys(mapped_page), va,
1348                                                  HW_PAGE_SIZE4KB, &hw_attrs);
1349                                 if (DSP_FAILED(status))
1350                                         break;
1351
1352                                 if (mapped_pages)
1353                                         mapped_pages[pg_i] = mapped_page;
1354
1355                                 va += HW_PAGE_SIZE4KB;
1356                                 ul_mpu_addr += HW_PAGE_SIZE4KB;
1357                         } else {
1358                                 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1359                                        "MPU addr = 0x%x,"
1360                                        "vma->vm_flags = 0x%lx,"
1361                                        "get_user_pages Err"
1362                                        "Value = %d, Buffer"
1363                                        "size=0x%x\n", ul_mpu_addr,
1364                                        vma->vm_flags, pg_num, ul_num_bytes);
1365                                 status = -EPERM;
1366                                 break;
1367                         }
1368                 }
1369         }
1370         up_read(&mm->mmap_sem);
1371 func_cont:
1372         if (DSP_SUCCEEDED(status)) {
1373                 status = 0;
1374         } else {
1375                 /*
1376                  * Roll out the mapped pages incase it failed in middle of
1377                  * mapping
1378                  */
1379                 if (pg_i) {
1380                         bridge_brd_mem_un_map(dev_context, virt_addr,
1381                                            (pg_i * PG_SIZE4K));
1382                 }
1383                 status = -EPERM;
1384         }
1385         /*
1386          * In any case, flush the TLB
1387          * This is called from here instead from pte_update to avoid unnecessary
1388          * repetition while mapping non-contiguous physical regions of a virtual
1389          * region
1390          */
1391         flush_all(dev_context);
1392         dev_dbg(bridge, "%s status %x\n", __func__, status);
1393         return status;
1394 }
1395
1396 /*
1397  *  ======== bridge_brd_mem_un_map ========
1398  *      Invalidate the PTEs for the DSP VA block to be unmapped.
1399  *
1400  *      PTEs of a mapped memory block are contiguous in any page table
1401  *      So, instead of looking up the PTE address for every 4K block,
1402  *      we clear consecutive PTEs until we unmap all the bytes
1403  */
1404 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1405                                      u32 virt_addr, u32 ul_num_bytes)
1406 {
1407         u32 l1_base_va;
1408         u32 l2_base_va;
1409         u32 l2_base_pa;
1410         u32 l2_page_num;
1411         u32 pte_val;
1412         u32 pte_size;
1413         u32 pte_count;
1414         u32 pte_addr_l1;
1415         u32 pte_addr_l2 = 0;
1416         u32 rem_bytes;
1417         u32 rem_bytes_l2;
1418         u32 va_curr;
1419         struct page *pg = NULL;
1420         int status = 0;
1421         struct bridge_dev_context *dev_context = dev_ctxt;
1422         struct pg_table_attrs *pt = dev_context->pt_attrs;
1423         u32 temp;
1424         u32 paddr;
1425         u32 numof4k_pages = 0;
1426
1427         va_curr = virt_addr;
1428         rem_bytes = ul_num_bytes;
1429         rem_bytes_l2 = 0;
1430         l1_base_va = pt->l1_base_va;
1431         pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1432         dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1433                 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1434                 ul_num_bytes, l1_base_va, pte_addr_l1);
1435
1436         while (rem_bytes && (DSP_SUCCEEDED(status))) {
1437                 u32 va_curr_orig = va_curr;
1438                 /* Find whether the L1 PTE points to a valid L2 PT */
1439                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1440                 pte_val = *(u32 *) pte_addr_l1;
1441                 pte_size = hw_mmu_pte_size_l1(pte_val);
1442
1443                 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1444                         goto skip_coarse_page;
1445
1446                 /*
1447                  * Get the L2 PA from the L1 PTE, and find
1448                  * corresponding L2 VA
1449                  */
1450                 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1451                 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1452                 l2_page_num =
1453                     (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1454                 /*
1455                  * Find the L2 PTE address from which we will start
1456                  * clearing, the number of PTEs to be cleared on this
1457                  * page, and the size of VA space that needs to be
1458                  * cleared on this L2 page
1459                  */
1460                 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1461                 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1462                 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1463                 if (rem_bytes < (pte_count * PG_SIZE4K))
1464                         pte_count = rem_bytes / PG_SIZE4K;
1465                 rem_bytes_l2 = pte_count * PG_SIZE4K;
1466
1467                 /*
1468                  * Unmap the VA space on this L2 PT. A quicker way
1469                  * would be to clear pte_count entries starting from
1470                  * pte_addr_l2. However, below code checks that we don't
1471                  * clear invalid entries or less than 64KB for a 64KB
1472                  * entry. Similar checking is done for L1 PTEs too
1473                  * below
1474                  */
1475                 while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) {
1476                         pte_val = *(u32 *) pte_addr_l2;
1477                         pte_size = hw_mmu_pte_size_l2(pte_val);
1478                         /* va_curr aligned to pte_size? */
1479                         if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1480                             va_curr & (pte_size - 1)) {
1481                                 status = -EPERM;
1482                                 break;
1483                         }
1484
1485                         /* Collect Physical addresses from VA */
1486                         paddr = (pte_val & ~(pte_size - 1));
1487                         if (pte_size == HW_PAGE_SIZE64KB)
1488                                 numof4k_pages = 16;
1489                         else
1490                                 numof4k_pages = 1;
1491                         temp = 0;
1492                         while (temp++ < numof4k_pages) {
1493                                 if (!pfn_valid(__phys_to_pfn(paddr))) {
1494                                         paddr += HW_PAGE_SIZE4KB;
1495                                         continue;
1496                                 }
1497                                 pg = PHYS_TO_PAGE(paddr);
1498                                 if (page_count(pg) < 1) {
1499                                         pr_info("DSPBRIDGE: UNMAP function: "
1500                                                 "COUNT 0 FOR PA 0x%x, size = "
1501                                                 "0x%x\n", paddr, ul_num_bytes);
1502                                         bad_page_dump(paddr, pg);
1503                                 } else {
1504                                         set_page_dirty(pg);
1505                                         page_cache_release(pg);
1506                                 }
1507                                 paddr += HW_PAGE_SIZE4KB;
1508                         }
1509                         if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1510                                 status = -EPERM;
1511                                 goto EXIT_LOOP;
1512                         }
1513
1514                         status = 0;
1515                         rem_bytes_l2 -= pte_size;
1516                         va_curr += pte_size;
1517                         pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1518                 }
1519                 spin_lock(&pt->pg_lock);
1520                 if (rem_bytes_l2 == 0) {
1521                         pt->pg_info[l2_page_num].num_entries -= pte_count;
1522                         if (pt->pg_info[l2_page_num].num_entries == 0) {
1523                                 /*
1524                                  * Clear the L1 PTE pointing to the L2 PT
1525                                  */
1526                                 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1527                                                      HW_MMU_COARSE_PAGE_SIZE))
1528                                         status = 0;
1529                                 else {
1530                                         status = -EPERM;
1531                                         spin_unlock(&pt->pg_lock);
1532                                         goto EXIT_LOOP;
1533                                 }
1534                         }
1535                         rem_bytes -= pte_count * PG_SIZE4K;
1536                 } else
1537                         status = -EPERM;
1538
1539                 spin_unlock(&pt->pg_lock);
1540                 continue;
1541 skip_coarse_page:
1542                 /* va_curr aligned to pte_size? */
1543                 /* pte_size = 1 MB or 16 MB */
1544                 if (pte_size == 0 || rem_bytes < pte_size ||
1545                     va_curr & (pte_size - 1)) {
1546                         status = -EPERM;
1547                         break;
1548                 }
1549
1550                 if (pte_size == HW_PAGE_SIZE1MB)
1551                         numof4k_pages = 256;
1552                 else
1553                         numof4k_pages = 4096;
1554                 temp = 0;
1555                 /* Collect Physical addresses from VA */
1556                 paddr = (pte_val & ~(pte_size - 1));
1557                 while (temp++ < numof4k_pages) {
1558                         if (pfn_valid(__phys_to_pfn(paddr))) {
1559                                 pg = PHYS_TO_PAGE(paddr);
1560                                 if (page_count(pg) < 1) {
1561                                         pr_info("DSPBRIDGE: UNMAP function: "
1562                                                 "COUNT 0 FOR PA 0x%x, size = "
1563                                                 "0x%x\n", paddr, ul_num_bytes);
1564                                         bad_page_dump(paddr, pg);
1565                                 } else {
1566                                         set_page_dirty(pg);
1567                                         page_cache_release(pg);
1568                                 }
1569                         }
1570                         paddr += HW_PAGE_SIZE4KB;
1571                 }
1572                 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1573                         status = 0;
1574                         rem_bytes -= pte_size;
1575                         va_curr += pte_size;
1576                 } else {
1577                         status = -EPERM;
1578                         goto EXIT_LOOP;
1579                 }
1580         }
1581         /*
1582          * It is better to flush the TLB here, so that any stale old entries
1583          * get flushed
1584          */
1585 EXIT_LOOP:
1586         flush_all(dev_context);
1587         dev_dbg(bridge,
1588                 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1589                 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1590                 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1591         return status;
1592 }
1593
1594 /*
1595  *  ======== user_va2_pa ========
1596  *  Purpose:
1597  *      This function walks through the page tables to convert a userland
1598  *      virtual address to physical address
1599  */
1600 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1601 {
1602         pgd_t *pgd;
1603         pmd_t *pmd;
1604         pte_t *ptep, pte;
1605
1606         pgd = pgd_offset(mm, address);
1607         if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
1608                 pmd = pmd_offset(pgd, address);
1609                 if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
1610                         ptep = pte_offset_map(pmd, address);
1611                         if (ptep) {
1612                                 pte = *ptep;
1613                                 if (pte_present(pte))
1614                                         return pte & PAGE_MASK;
1615                         }
1616                 }
1617         }
1618
1619         return 0;
1620 }
1621
1622 /*
1623  *  ======== pte_update ========
1624  *      This function calculates the optimum page-aligned addresses and sizes
1625  *      Caller must pass page-aligned values
1626  */
1627 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1628                              u32 va, u32 size,
1629                              struct hw_mmu_map_attrs_t *map_attrs)
1630 {
1631         u32 i;
1632         u32 all_bits;
1633         u32 pa_curr = pa;
1634         u32 va_curr = va;
1635         u32 num_bytes = size;
1636         struct bridge_dev_context *dev_context = dev_ctxt;
1637         int status = 0;
1638         u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1639                 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1640         };
1641
1642         while (num_bytes && DSP_SUCCEEDED(status)) {
1643                 /* To find the max. page size with which both PA & VA are
1644                  * aligned */
1645                 all_bits = pa_curr | va_curr;
1646
1647                 for (i = 0; i < 4; i++) {
1648                         if ((num_bytes >= page_size[i]) && ((all_bits &
1649                                                              (page_size[i] -
1650                                                               1)) == 0)) {
1651                                 status =
1652                                     pte_set(dev_context->pt_attrs, pa_curr,
1653                                             va_curr, page_size[i], map_attrs);
1654                                 pa_curr += page_size[i];
1655                                 va_curr += page_size[i];
1656                                 num_bytes -= page_size[i];
1657                                 /* Don't try smaller sizes. Hopefully we have
1658                                  * reached an address aligned to a bigger page
1659                                  * size */
1660                                 break;
1661                         }
1662                 }
1663         }
1664
1665         return status;
1666 }
1667
1668 /*
1669  *  ======== pte_set ========
1670  *      This function calculates PTE address (MPU virtual) to be updated
1671  *      It also manages the L2 page tables
1672  */
1673 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1674                           u32 size, struct hw_mmu_map_attrs_t *attrs)
1675 {
1676         u32 i;
1677         u32 pte_val;
1678         u32 pte_addr_l1;
1679         u32 pte_size;
1680         /* Base address of the PT that will be updated */
1681         u32 pg_tbl_va;
1682         u32 l1_base_va;
1683         /* Compiler warns that the next three variables might be used
1684          * uninitialized in this function. Doesn't seem so. Working around,
1685          * anyways. */
1686         u32 l2_base_va = 0;
1687         u32 l2_base_pa = 0;
1688         u32 l2_page_num = 0;
1689         int status = 0;
1690
1691         l1_base_va = pt->l1_base_va;
1692         pg_tbl_va = l1_base_va;
1693         if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1694                 /* Find whether the L1 PTE points to a valid L2 PT */
1695                 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1696                 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1697                         pte_val = *(u32 *) pte_addr_l1;
1698                         pte_size = hw_mmu_pte_size_l1(pte_val);
1699                 } else {
1700                         return -EPERM;
1701                 }
1702                 spin_lock(&pt->pg_lock);
1703                 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1704                         /* Get the L2 PA from the L1 PTE, and find
1705                          * corresponding L2 VA */
1706                         l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1707                         l2_base_va =
1708                             l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1709                         l2_page_num =
1710                             (l2_base_pa -
1711                              pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1712                 } else if (pte_size == 0) {
1713                         /* L1 PTE is invalid. Allocate a L2 PT and
1714                          * point the L1 PTE to it */
1715                         /* Find a free L2 PT. */
1716                         for (i = 0; (i < pt->l2_num_pages) &&
1717                              (pt->pg_info[i].num_entries != 0); i++)
1718                                 ;;
1719                         if (i < pt->l2_num_pages) {
1720                                 l2_page_num = i;
1721                                 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1722                                                 HW_MMU_COARSE_PAGE_SIZE);
1723                                 l2_base_va = pt->l2_base_va + (l2_page_num *
1724                                                 HW_MMU_COARSE_PAGE_SIZE);
1725                                 /* Endianness attributes are ignored for
1726                                  * HW_MMU_COARSE_PAGE_SIZE */
1727                                 status =
1728                                     hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1729                                                    HW_MMU_COARSE_PAGE_SIZE,
1730                                                    attrs);
1731                         } else {
1732                                 status = -ENOMEM;
1733                         }
1734                 } else {
1735                         /* Found valid L1 PTE of another size.
1736                          * Should not overwrite it. */
1737                         status = -EPERM;
1738                 }
1739                 if (DSP_SUCCEEDED(status)) {
1740                         pg_tbl_va = l2_base_va;
1741                         if (size == HW_PAGE_SIZE64KB)
1742                                 pt->pg_info[l2_page_num].num_entries += 16;
1743                         else
1744                                 pt->pg_info[l2_page_num].num_entries++;
1745                         dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1746                                 "%x, num_entries %x\n", l2_base_va,
1747                                 l2_base_pa, l2_page_num,
1748                                 pt->pg_info[l2_page_num].num_entries);
1749                 }
1750                 spin_unlock(&pt->pg_lock);
1751         }
1752         if (DSP_SUCCEEDED(status)) {
1753                 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1754                         pg_tbl_va, pa, va, size);
1755                 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1756                         "mixed_size %x\n", attrs->endianism,
1757                         attrs->element_size, attrs->mixed_size);
1758                 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1759         }
1760
1761         return status;
1762 }
1763
1764 /* Memory map kernel VA -- memory allocated with vmalloc */
1765 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1766                                   u32 ul_mpu_addr, u32 virt_addr,
1767                                   u32 ul_num_bytes,
1768                                   struct hw_mmu_map_attrs_t *hw_attrs)
1769 {
1770         int status = 0;
1771         struct page *page[1];
1772         u32 i;
1773         u32 pa_curr;
1774         u32 pa_next;
1775         u32 va_curr;
1776         u32 size_curr;
1777         u32 num_pages;
1778         u32 pa;
1779         u32 num_of4k_pages;
1780         u32 temp = 0;
1781
1782         /*
1783          * Do Kernel va to pa translation.
1784          * Combine physically contiguous regions to reduce TLBs.
1785          * Pass the translated pa to pte_update.
1786          */
1787         num_pages = ul_num_bytes / PAGE_SIZE;   /* PAGE_SIZE = OS page size */
1788         i = 0;
1789         va_curr = ul_mpu_addr;
1790         page[0] = vmalloc_to_page((void *)va_curr);
1791         pa_next = page_to_phys(page[0]);
1792         while (DSP_SUCCEEDED(status) && (i < num_pages)) {
1793                 /*
1794                  * Reuse pa_next from the previous iteraion to avoid
1795                  * an extra va2pa call
1796                  */
1797                 pa_curr = pa_next;
1798                 size_curr = PAGE_SIZE;
1799                 /*
1800                  * If the next page is physically contiguous,
1801                  * map it with the current one by increasing
1802                  * the size of the region to be mapped
1803                  */
1804                 while (++i < num_pages) {
1805                         page[0] =
1806                             vmalloc_to_page((void *)(va_curr + size_curr));
1807                         pa_next = page_to_phys(page[0]);
1808
1809                         if (pa_next == (pa_curr + size_curr))
1810                                 size_curr += PAGE_SIZE;
1811                         else
1812                                 break;
1813
1814                 }
1815                 if (pa_next == 0) {
1816                         status = -ENOMEM;
1817                         break;
1818                 }
1819                 pa = pa_curr;
1820                 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1821                 while (temp++ < num_of4k_pages) {
1822                         get_page(PHYS_TO_PAGE(pa));
1823                         pa += HW_PAGE_SIZE4KB;
1824                 }
1825                 status = pte_update(dev_context, pa_curr, virt_addr +
1826                                     (va_curr - ul_mpu_addr), size_curr,
1827                                     hw_attrs);
1828                 va_curr += size_curr;
1829         }
1830         if (DSP_SUCCEEDED(status))
1831                 status = 0;
1832         else
1833                 status = -EPERM;
1834
1835         /*
1836          * In any case, flush the TLB
1837          * This is called from here instead from pte_update to avoid unnecessary
1838          * repetition while mapping non-contiguous physical regions of a virtual
1839          * region
1840          */
1841         flush_all(dev_context);
1842         dev_dbg(bridge, "%s status %x\n", __func__, status);
1843         return status;
1844 }
1845
1846 /*
1847  *  ======== wait_for_start ========
1848  *      Wait for the singal from DSP that it has started, or time out.
1849  */
1850 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1851 {
1852         u16 timeout = TIHELEN_ACKTIMEOUT;
1853
1854         /*  Wait for response from board */
1855         while (*((volatile u16 *)dw_sync_addr) && --timeout)
1856                 udelay(10);
1857
1858         /*  If timed out: return false */
1859         if (!timeout) {
1860                 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1861                 return false;
1862         }
1863         return true;
1864 }