xfs: xfs_bmap_add_extent_delay_real should init br_startblock
[sfrench/cifs-2.6.git] / drivers / staging / tidspbridge / core / io_sm.c
1 /*
2  * io_sm.c
3  *
4  * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5  *
6  * IO dispatcher for a shared memory channel driver.
7  *
8  * Copyright (C) 2005-2006 Texas Instruments, Inc.
9  *
10  * This package is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17  */
18
19 /*
20  * Channel Invariant:
21  * There is an important invariant condition which must be maintained per
22  * channel outside of bridge_chnl_get_ioc() and IO_Dispatch(), violation of
23  * which may cause timeouts and/or failure of the sync_wait_on_event
24  * function.
25  */
26 #include <linux/types.h>
27
28 /* Host OS */
29 #include <dspbridge/host_os.h>
30 #include <linux/workqueue.h>
31
32 /*  ----------------------------------- DSP/BIOS Bridge */
33 #include <dspbridge/dbdefs.h>
34
35 /* Trace & Debug */
36 #include <dspbridge/dbc.h>
37
38 /* Services Layer */
39 #include <dspbridge/ntfy.h>
40 #include <dspbridge/sync.h>
41
42 /* Hardware Abstraction Layer */
43 #include <hw_defs.h>
44 #include <hw_mmu.h>
45
46 /* Bridge Driver */
47 #include <dspbridge/dspdeh.h>
48 #include <dspbridge/dspio.h>
49 #include <dspbridge/dspioctl.h>
50 #include <dspbridge/wdt.h>
51 #include <_tiomap.h>
52 #include <tiomap_io.h>
53 #include <_tiomap_pwr.h>
54
55 /* Platform Manager */
56 #include <dspbridge/cod.h>
57 #include <dspbridge/node.h>
58 #include <dspbridge/dev.h>
59
60 /* Others */
61 #include <dspbridge/rms_sh.h>
62 #include <dspbridge/mgr.h>
63 #include <dspbridge/drv.h>
64 #include "_cmm.h"
65 #include "module_list.h"
66
67 /* This */
68 #include <dspbridge/io_sm.h>
69 #include "_msg_sm.h"
70
71 /* Defines, Data Structures, Typedefs */
72 #define OUTPUTNOTREADY  0xffff
73 #define NOTENABLED      0xffff  /* Channel(s) not enabled */
74
75 #define EXTEND      "_EXT_END"
76
77 #define SWAP_WORD(x)     (x)
78 #define UL_PAGE_ALIGN_SIZE 0x10000      /* Page Align Size */
79
80 #define MAX_PM_REQS 32
81
82 #define MMU_FAULT_HEAD1 0xa5a5a5a5
83 #define MMU_FAULT_HEAD2 0x96969696
84 #define POLL_MAX 1000
85 #define MAX_MMU_DBGBUFF 10240
86
87 /* IO Manager: only one created per board */
88 struct io_mgr {
89         /* These four fields must be the first fields in a io_mgr_ struct */
90         /* Bridge device context */
91         struct bridge_dev_context *hbridge_context;
92         /* Function interface to Bridge driver */
93         struct bridge_drv_interface *intf_fxns;
94         struct dev_object *hdev_obj;    /* Device this board represents */
95
96         /* These fields initialized in bridge_io_create() */
97         struct chnl_mgr *hchnl_mgr;
98         struct shm *shared_mem; /* Shared Memory control */
99         u8 *input;              /* Address of input channel */
100         u8 *output;             /* Address of output channel */
101         struct msg_mgr *hmsg_mgr;       /* Message manager */
102         /* Msg control for from DSP messages */
103         struct msg_ctrl *msg_input_ctrl;
104         /* Msg control for to DSP messages */
105         struct msg_ctrl *msg_output_ctrl;
106         u8 *msg_input;          /* Address of input messages */
107         u8 *msg_output;         /* Address of output messages */
108         u32 usm_buf_size;       /* Size of a shared memory I/O channel */
109         bool shared_irq;        /* Is this IRQ shared? */
110         u32 word_size;          /* Size in bytes of DSP word */
111         u16 intr_val;           /* Interrupt value */
112         /* Private extnd proc info; mmu setup */
113         struct mgr_processorextinfo ext_proc_info;
114         struct cmm_object *hcmm_mgr;    /* Shared Mem Mngr */
115         struct work_struct io_workq;    /* workqueue */
116 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
117         u32 ul_trace_buffer_begin;      /* Trace message start address */
118         u32 ul_trace_buffer_end;        /* Trace message end address */
119         u32 ul_trace_buffer_current;    /* Trace message current address */
120         u32 ul_gpp_read_pointer;        /* GPP Read pointer to Trace buffer */
121         u8 *pmsg;
122         u32 ul_gpp_va;
123         u32 ul_dsp_va;
124 #endif
125         /* IO Dpc */
126         u32 dpc_req;            /* Number of requested DPC's. */
127         u32 dpc_sched;          /* Number of executed DPC's. */
128         struct tasklet_struct dpc_tasklet;
129         spinlock_t dpc_lock;
130
131 };
132
133 /* Function Prototypes */
134 static void io_dispatch_pm(struct io_mgr *pio_mgr);
135 static void notify_chnl_complete(struct chnl_object *pchnl,
136                                  struct chnl_irp *chnl_packet_obj);
137 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
138                         u8 io_mode);
139 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
140                         u8 io_mode);
141 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
142 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr);
143 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
144                              struct chnl_object *pchnl, u32 mask);
145
146 /* Bus Addr (cached kernel) */
147 static int register_shm_segs(struct io_mgr *hio_mgr,
148                                     struct cod_manager *cod_man,
149                                     u32 dw_gpp_base_pa);
150
151 static inline void set_chnl_free(struct shm *sm, u32 chnl)
152 {
153         sm->host_free_mask &= ~(1 << chnl);
154 }
155
156 static inline void set_chnl_busy(struct shm *sm, u32 chnl)
157 {
158         sm->host_free_mask |= 1 << chnl;
159 }
160
161
162 /*
163  *  ======== bridge_io_create ========
164  *      Create an IO manager object.
165  */
166 int bridge_io_create(struct io_mgr **io_man,
167                             struct dev_object *hdev_obj,
168                             const struct io_attrs *mgr_attrts)
169 {
170         int status = 0;
171         struct io_mgr *pio_mgr = NULL;
172         struct shm *shared_mem = NULL;
173         struct bridge_dev_context *hbridge_context = NULL;
174         struct cfg_devnode *dev_node_obj;
175         struct chnl_mgr *hchnl_mgr;
176         u8 dev_type;
177
178         /* Check requirements */
179         if (!io_man || !mgr_attrts || mgr_attrts->word_size == 0) {
180                 status = -EFAULT;
181                 goto func_end;
182         }
183         dev_get_chnl_mgr(hdev_obj, &hchnl_mgr);
184         if (!hchnl_mgr || hchnl_mgr->hio_mgr) {
185                 status = -EFAULT;
186                 goto func_end;
187         }
188         /*
189          * Message manager will be created when a file is loaded, since
190          * size of message buffer in shared memory is configurable in
191          * the base image.
192          */
193         dev_get_bridge_context(hdev_obj, &hbridge_context);
194         if (!hbridge_context) {
195                 status = -EFAULT;
196                 goto func_end;
197         }
198         dev_get_dev_type(hdev_obj, &dev_type);
199         /*
200          * DSP shared memory area will get set properly when
201          * a program is loaded. They are unknown until a COFF file is
202          * loaded. I chose the value -1 because it was less likely to be
203          * a valid address than 0.
204          */
205         shared_mem = (struct shm *)-1;
206
207         /* Allocate IO manager object */
208         pio_mgr = kzalloc(sizeof(struct io_mgr), GFP_KERNEL);
209         if (pio_mgr == NULL) {
210                 status = -ENOMEM;
211                 goto func_end;
212         }
213
214         /* Initialize chnl_mgr object */
215 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
216         pio_mgr->pmsg = NULL;
217 #endif
218         pio_mgr->hchnl_mgr = hchnl_mgr;
219         pio_mgr->word_size = mgr_attrts->word_size;
220         pio_mgr->shared_mem = shared_mem;
221
222         if (dev_type == DSP_UNIT) {
223                 /* Create an IO DPC */
224                 tasklet_init(&pio_mgr->dpc_tasklet, io_dpc, (u32) pio_mgr);
225
226                 /* Initialize DPC counters */
227                 pio_mgr->dpc_req = 0;
228                 pio_mgr->dpc_sched = 0;
229
230                 spin_lock_init(&pio_mgr->dpc_lock);
231
232                 status = dev_get_dev_node(hdev_obj, &dev_node_obj);
233         }
234
235         if (!status) {
236                 pio_mgr->hbridge_context = hbridge_context;
237                 pio_mgr->shared_irq = mgr_attrts->irq_shared;
238                 if (dsp_wdt_init())
239                         status = -EPERM;
240         } else {
241                 status = -EIO;
242         }
243 func_end:
244         if (status) {
245                 /* Cleanup */
246                 bridge_io_destroy(pio_mgr);
247                 if (io_man)
248                         *io_man = NULL;
249         } else {
250                 /* Return IO manager object to caller... */
251                 hchnl_mgr->hio_mgr = pio_mgr;
252                 *io_man = pio_mgr;
253         }
254         return status;
255 }
256
257 /*
258  *  ======== bridge_io_destroy ========
259  *  Purpose:
260  *      Disable interrupts, destroy the IO manager.
261  */
262 int bridge_io_destroy(struct io_mgr *hio_mgr)
263 {
264         int status = 0;
265         if (hio_mgr) {
266                 /* Free IO DPC object */
267                 tasklet_kill(&hio_mgr->dpc_tasklet);
268
269 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
270                 kfree(hio_mgr->pmsg);
271 #endif
272                 dsp_wdt_exit();
273                 /* Free this IO manager object */
274                 kfree(hio_mgr);
275         } else {
276                 status = -EFAULT;
277         }
278
279         return status;
280 }
281
282 /*
283  *  ======== bridge_io_on_loaded ========
284  *  Purpose:
285  *      Called when a new program is loaded to get shared memory buffer
286  *      parameters from COFF file. ulSharedBufferBase and ulSharedBufferLimit
287  *      are in DSP address units.
288  */
289 int bridge_io_on_loaded(struct io_mgr *hio_mgr)
290 {
291         struct cod_manager *cod_man;
292         struct chnl_mgr *hchnl_mgr;
293         struct msg_mgr *hmsg_mgr;
294         u32 ul_shm_base;
295         u32 ul_shm_base_offset;
296         u32 ul_shm_limit;
297         u32 ul_shm_length = -1;
298         u32 ul_mem_length = -1;
299         u32 ul_msg_base;
300         u32 ul_msg_limit;
301         u32 ul_msg_length = -1;
302         u32 ul_ext_end;
303         u32 ul_gpp_pa = 0;
304         u32 ul_gpp_va = 0;
305         u32 ul_dsp_va = 0;
306         u32 ul_seg_size = 0;
307         u32 ul_pad_size = 0;
308         u32 i;
309         int status = 0;
310         u8 num_procs = 0;
311         s32 ndx = 0;
312         /* DSP MMU setup table */
313         struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
314         struct cfg_hostres *host_res;
315         struct bridge_dev_context *pbridge_context;
316         u32 map_attrs;
317         u32 shm0_end;
318         u32 ul_dyn_ext_base;
319         u32 ul_seg1_size = 0;
320         u32 pa_curr = 0;
321         u32 va_curr = 0;
322         u32 gpp_va_curr = 0;
323         u32 num_bytes = 0;
324         u32 all_bits = 0;
325         u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
326                 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
327         };
328
329         status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
330         if (!pbridge_context) {
331                 status = -EFAULT;
332                 goto func_end;
333         }
334
335         host_res = pbridge_context->resources;
336         if (!host_res) {
337                 status = -EFAULT;
338                 goto func_end;
339         }
340         status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
341         if (!cod_man) {
342                 status = -EFAULT;
343                 goto func_end;
344         }
345         hchnl_mgr = hio_mgr->hchnl_mgr;
346         /* The message manager is destroyed when the board is stopped. */
347         dev_get_msg_mgr(hio_mgr->hdev_obj, &hio_mgr->hmsg_mgr);
348         hmsg_mgr = hio_mgr->hmsg_mgr;
349         if (!hchnl_mgr || !hmsg_mgr) {
350                 status = -EFAULT;
351                 goto func_end;
352         }
353         if (hio_mgr->shared_mem)
354                 hio_mgr->shared_mem = NULL;
355
356         /* Get start and length of channel part of shared memory */
357         status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_BASE_SYM,
358                                    &ul_shm_base);
359         if (status) {
360                 status = -EFAULT;
361                 goto func_end;
362         }
363         status = cod_get_sym_value(cod_man, CHNL_SHARED_BUFFER_LIMIT_SYM,
364                                    &ul_shm_limit);
365         if (status) {
366                 status = -EFAULT;
367                 goto func_end;
368         }
369         if (ul_shm_limit <= ul_shm_base) {
370                 status = -EINVAL;
371                 goto func_end;
372         }
373         /* Get total length in bytes */
374         ul_shm_length = (ul_shm_limit - ul_shm_base + 1) * hio_mgr->word_size;
375         /* Calculate size of a PROCCOPY shared memory region */
376         dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n",
377                 __func__, (ul_shm_length - sizeof(struct shm)));
378
379         /* Get start and length of message part of shared memory */
380         status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM,
381                                            &ul_msg_base);
382         if (!status) {
383                 status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM,
384                                            &ul_msg_limit);
385                 if (!status) {
386                         if (ul_msg_limit <= ul_msg_base) {
387                                 status = -EINVAL;
388                         } else {
389                                 /*
390                                  * Length (bytes) of messaging part of shared
391                                  * memory.
392                                  */
393                                 ul_msg_length =
394                                     (ul_msg_limit - ul_msg_base +
395                                      1) * hio_mgr->word_size;
396                                 /*
397                                  * Total length (bytes) of shared memory:
398                                  * chnl + msg.
399                                  */
400                                 ul_mem_length = ul_shm_length + ul_msg_length;
401                         }
402                 } else {
403                         status = -EFAULT;
404                 }
405         } else {
406                 status = -EFAULT;
407         }
408         if (!status) {
409 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
410                 status =
411                     cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end);
412 #else
413                 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
414                                            &shm0_end);
415 #endif
416                 if (status)
417                         status = -EFAULT;
418         }
419         if (!status) {
420                 status =
421                     cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base);
422                 if (status)
423                         status = -EFAULT;
424         }
425         if (!status) {
426                 status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end);
427                 if (status)
428                         status = -EFAULT;
429         }
430         if (!status) {
431                 /* Get memory reserved in host resources */
432                 (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *)
433                                               &hio_mgr->ext_proc_info,
434                                               sizeof(struct
435                                                      mgr_processorextinfo),
436                                               &num_procs);
437
438                 /* The first MMU TLB entry(TLB_0) in DCD is ShmBase. */
439                 ndx = 0;
440                 ul_gpp_pa = host_res->dw_mem_phys[1];
441                 ul_gpp_va = host_res->dw_mem_base[1];
442                 /* This is the virtual uncached ioremapped address!!! */
443                 /* Why can't we directly take the DSPVA from the symbols? */
444                 ul_dsp_va = hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt;
445                 ul_seg_size = (shm0_end - ul_dsp_va) * hio_mgr->word_size;
446                 ul_seg1_size =
447                     (ul_ext_end - ul_dyn_ext_base) * hio_mgr->word_size;
448                 /* 4K align */
449                 ul_seg1_size = (ul_seg1_size + 0xFFF) & (~0xFFFUL);
450                 /* 64K align */
451                 ul_seg_size = (ul_seg_size + 0xFFFF) & (~0xFFFFUL);
452                 ul_pad_size = UL_PAGE_ALIGN_SIZE - ((ul_gpp_pa + ul_seg1_size) %
453                                                     UL_PAGE_ALIGN_SIZE);
454                 if (ul_pad_size == UL_PAGE_ALIGN_SIZE)
455                         ul_pad_size = 0x0;
456
457                 dev_dbg(bridge, "%s: ul_gpp_pa %x, ul_gpp_va %x, ul_dsp_va %x, "
458                         "shm0_end %x, ul_dyn_ext_base %x, ul_ext_end %x, "
459                         "ul_seg_size %x ul_seg1_size %x \n", __func__,
460                         ul_gpp_pa, ul_gpp_va, ul_dsp_va, shm0_end,
461                         ul_dyn_ext_base, ul_ext_end, ul_seg_size, ul_seg1_size);
462
463                 if ((ul_seg_size + ul_seg1_size + ul_pad_size) >
464                     host_res->dw_mem_length[1]) {
465                         pr_err("%s: shm Error, reserved 0x%x required 0x%x\n",
466                                __func__, host_res->dw_mem_length[1],
467                                ul_seg_size + ul_seg1_size + ul_pad_size);
468                         status = -ENOMEM;
469                 }
470         }
471         if (status)
472                 goto func_end;
473
474         pa_curr = ul_gpp_pa;
475         va_curr = ul_dyn_ext_base * hio_mgr->word_size;
476         gpp_va_curr = ul_gpp_va;
477         num_bytes = ul_seg1_size;
478
479         /*
480          * Try to fit into TLB entries. If not possible, push them to page
481          * tables. It is quite possible that if sections are not on
482          * bigger page boundary, we may end up making several small pages.
483          * So, push them onto page tables, if that is the case.
484          */
485         map_attrs = 0x00000000;
486         map_attrs = DSP_MAPLITTLEENDIAN;
487         map_attrs |= DSP_MAPPHYSICALADDR;
488         map_attrs |= DSP_MAPELEMSIZE32;
489         map_attrs |= DSP_MAPDONOTLOCK;
490
491         while (num_bytes) {
492                 /*
493                  * To find the max. page size with which both PA & VA are
494                  * aligned.
495                  */
496                 all_bits = pa_curr | va_curr;
497                 dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
498                         "num_bytes %x\n", all_bits, pa_curr, va_curr,
499                         num_bytes);
500                 for (i = 0; i < 4; i++) {
501                         if ((num_bytes >= page_size[i]) && ((all_bits &
502                                                              (page_size[i] -
503                                                               1)) == 0)) {
504                                 status =
505                                     hio_mgr->intf_fxns->
506                                     pfn_brd_mem_map(hio_mgr->hbridge_context,
507                                                     pa_curr, va_curr,
508                                                     page_size[i], map_attrs,
509                                                     NULL);
510                                 if (status)
511                                         goto func_end;
512                                 pa_curr += page_size[i];
513                                 va_curr += page_size[i];
514                                 gpp_va_curr += page_size[i];
515                                 num_bytes -= page_size[i];
516                                 /*
517                                  * Don't try smaller sizes. Hopefully we have
518                                  * reached an address aligned to a bigger page
519                                  * size.
520                                  */
521                                 break;
522                         }
523                 }
524         }
525         pa_curr += ul_pad_size;
526         va_curr += ul_pad_size;
527         gpp_va_curr += ul_pad_size;
528
529         /* Configure the TLB entries for the next cacheable segment */
530         num_bytes = ul_seg_size;
531         va_curr = ul_dsp_va * hio_mgr->word_size;
532         while (num_bytes) {
533                 /*
534                  * To find the max. page size with which both PA & VA are
535                  * aligned.
536                  */
537                 all_bits = pa_curr | va_curr;
538                 dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
539                         "va_curr %x, num_bytes %x\n", all_bits, pa_curr,
540                         va_curr, num_bytes);
541                 for (i = 0; i < 4; i++) {
542                         if (!(num_bytes >= page_size[i]) ||
543                             !((all_bits & (page_size[i] - 1)) == 0))
544                                 continue;
545                         if (ndx < MAX_LOCK_TLB_ENTRIES) {
546                                 /*
547                                  * This is the physical address written to
548                                  * DSP MMU.
549                                  */
550                                 ae_proc[ndx].ul_gpp_pa = pa_curr;
551                                 /*
552                                  * This is the virtual uncached ioremapped
553                                  * address!!!
554                                  */
555                                 ae_proc[ndx].ul_gpp_va = gpp_va_curr;
556                                 ae_proc[ndx].ul_dsp_va =
557                                     va_curr / hio_mgr->word_size;
558                                 ae_proc[ndx].ul_size = page_size[i];
559                                 ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
560                                 ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
561                                 ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
562                                 dev_dbg(bridge, "shm MMU TLB entry PA %x"
563                                         " VA %x DSP_VA %x Size %x\n",
564                                         ae_proc[ndx].ul_gpp_pa,
565                                         ae_proc[ndx].ul_gpp_va,
566                                         ae_proc[ndx].ul_dsp_va *
567                                         hio_mgr->word_size, page_size[i]);
568                                 ndx++;
569                         } else {
570                                 status =
571                                     hio_mgr->intf_fxns->
572                                     pfn_brd_mem_map(hio_mgr->hbridge_context,
573                                                     pa_curr, va_curr,
574                                                     page_size[i], map_attrs,
575                                                     NULL);
576                                 dev_dbg(bridge,
577                                         "shm MMU PTE entry PA %x"
578                                         " VA %x DSP_VA %x Size %x\n",
579                                         ae_proc[ndx].ul_gpp_pa,
580                                         ae_proc[ndx].ul_gpp_va,
581                                         ae_proc[ndx].ul_dsp_va *
582                                         hio_mgr->word_size, page_size[i]);
583                                 if (status)
584                                         goto func_end;
585                         }
586                         pa_curr += page_size[i];
587                         va_curr += page_size[i];
588                         gpp_va_curr += page_size[i];
589                         num_bytes -= page_size[i];
590                         /*
591                          * Don't try smaller sizes. Hopefully we have reached
592                          * an address aligned to a bigger page size.
593                          */
594                         break;
595                 }
596         }
597
598         /*
599          * Copy remaining entries from CDB. All entries are 1 MB and
600          * should not conflict with shm entries on MPU or DSP side.
601          */
602         for (i = 3; i < 7 && ndx < BRDIOCTL_NUMOFMMUTLB; i++) {
603                 if (hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys == 0)
604                         continue;
605
606                 if ((hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys >
607                      ul_gpp_pa - 0x100000
608                      && hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys <=
609                      ul_gpp_pa + ul_seg_size)
610                     || (hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt >
611                         ul_dsp_va - 0x100000 / hio_mgr->word_size
612                         && hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt <=
613                         ul_dsp_va + ul_seg_size / hio_mgr->word_size)) {
614                         dev_dbg(bridge,
615                                 "CDB MMU entry %d conflicts with "
616                                 "shm.\n\tCDB: GppPa %x, DspVa %x.\n\tSHM: "
617                                 "GppPa %x, DspVa %x, Bytes %x.\n", i,
618                                 hio_mgr->ext_proc_info.ty_tlb[i].ul_gpp_phys,
619                                 hio_mgr->ext_proc_info.ty_tlb[i].ul_dsp_virt,
620                                 ul_gpp_pa, ul_dsp_va, ul_seg_size);
621                         status = -EPERM;
622                 } else {
623                         if (ndx < MAX_LOCK_TLB_ENTRIES) {
624                                 ae_proc[ndx].ul_dsp_va =
625                                     hio_mgr->ext_proc_info.ty_tlb[i].
626                                     ul_dsp_virt;
627                                 ae_proc[ndx].ul_gpp_pa =
628                                     hio_mgr->ext_proc_info.ty_tlb[i].
629                                     ul_gpp_phys;
630                                 ae_proc[ndx].ul_gpp_va = 0;
631                                 /* 1 MB */
632                                 ae_proc[ndx].ul_size = 0x100000;
633                                 dev_dbg(bridge, "shm MMU entry PA %x "
634                                         "DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
635                                         ae_proc[ndx].ul_dsp_va);
636                                 ndx++;
637                         } else {
638                                 status = hio_mgr->intf_fxns->pfn_brd_mem_map
639                                     (hio_mgr->hbridge_context,
640                                      hio_mgr->ext_proc_info.ty_tlb[i].
641                                      ul_gpp_phys,
642                                      hio_mgr->ext_proc_info.ty_tlb[i].
643                                      ul_dsp_virt, 0x100000, map_attrs,
644                                      NULL);
645                         }
646                 }
647                 if (status)
648                         goto func_end;
649         }
650
651         map_attrs = 0x00000000;
652         map_attrs = DSP_MAPLITTLEENDIAN;
653         map_attrs |= DSP_MAPPHYSICALADDR;
654         map_attrs |= DSP_MAPELEMSIZE32;
655         map_attrs |= DSP_MAPDONOTLOCK;
656
657         /* Map the L4 peripherals */
658         i = 0;
659         while (l4_peripheral_table[i].phys_addr) {
660                 status = hio_mgr->intf_fxns->pfn_brd_mem_map
661                     (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
662                      l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
663                      map_attrs, NULL);
664                 if (status)
665                         goto func_end;
666                 i++;
667         }
668
669         for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
670                 ae_proc[i].ul_dsp_va = 0;
671                 ae_proc[i].ul_gpp_pa = 0;
672                 ae_proc[i].ul_gpp_va = 0;
673                 ae_proc[i].ul_size = 0;
674         }
675         /*
676          * Set the shm physical address entry (grayed out in CDB file)
677          * to the virtual uncached ioremapped address of shm reserved
678          * on MPU.
679          */
680         hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys =
681             (ul_gpp_va + ul_seg1_size + ul_pad_size);
682
683         /*
684          * Need shm Phys addr. IO supports only one DSP for now:
685          * num_procs = 1.
686          */
687         if (!hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys || num_procs != 1) {
688                 status = -EFAULT;
689                 goto func_end;
690         } else {
691                 if (ae_proc[0].ul_dsp_va > ul_shm_base) {
692                         status = -EPERM;
693                         goto func_end;
694                 }
695                 /* ul_shm_base may not be at ul_dsp_va address */
696                 ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
697                     hio_mgr->word_size;
698                 /*
699                  * bridge_dev_ctrl() will set dev context dsp-mmu info. In
700                  * bridge_brd_start() the MMU will be re-programed with MMU
701                  * DSPVa-GPPPa pair info while DSP is in a known
702                  * (reset) state.
703                  */
704
705                 status =
706                     hio_mgr->intf_fxns->pfn_dev_cntrl(hio_mgr->hbridge_context,
707                                                       BRDIOCTL_SETMMUCONFIG,
708                                                       ae_proc);
709                 if (status)
710                         goto func_end;
711                 ul_shm_base = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
712                 ul_shm_base += ul_shm_base_offset;
713                 ul_shm_base = (u32) MEM_LINEAR_ADDRESS((void *)ul_shm_base,
714                                                        ul_mem_length);
715                 if (ul_shm_base == 0) {
716                         status = -EFAULT;
717                         goto func_end;
718                 }
719                 /* Register SM */
720                 status =
721                     register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
722         }
723
724         hio_mgr->shared_mem = (struct shm *)ul_shm_base;
725         hio_mgr->input = (u8 *) hio_mgr->shared_mem + sizeof(struct shm);
726         hio_mgr->output = hio_mgr->input + (ul_shm_length -
727                                             sizeof(struct shm)) / 2;
728         hio_mgr->usm_buf_size = hio_mgr->output - hio_mgr->input;
729
730         /*  Set up Shared memory addresses for messaging. */
731         hio_mgr->msg_input_ctrl = (struct msg_ctrl *)((u8 *) hio_mgr->shared_mem
732                                                       + ul_shm_length);
733         hio_mgr->msg_input =
734             (u8 *) hio_mgr->msg_input_ctrl + sizeof(struct msg_ctrl);
735         hio_mgr->msg_output_ctrl =
736             (struct msg_ctrl *)((u8 *) hio_mgr->msg_input_ctrl +
737                                 ul_msg_length / 2);
738         hio_mgr->msg_output =
739             (u8 *) hio_mgr->msg_output_ctrl + sizeof(struct msg_ctrl);
740         hmsg_mgr->max_msgs =
741             ((u8 *) hio_mgr->msg_output_ctrl - hio_mgr->msg_input)
742             / sizeof(struct msg_dspmsg);
743         dev_dbg(bridge, "IO MGR shm details: shared_mem %p, input %p, "
744                 "output %p, msg_input_ctrl %p, msg_input %p, "
745                 "msg_output_ctrl %p, msg_output %p\n",
746                 (u8 *) hio_mgr->shared_mem, hio_mgr->input,
747                 hio_mgr->output, (u8 *) hio_mgr->msg_input_ctrl,
748                 hio_mgr->msg_input, (u8 *) hio_mgr->msg_output_ctrl,
749                 hio_mgr->msg_output);
750         dev_dbg(bridge, "(proc) Mas msgs in shared memory: 0x%x\n",
751                 hmsg_mgr->max_msgs);
752         memset((void *)hio_mgr->shared_mem, 0, sizeof(struct shm));
753
754 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
755         /* Get the start address of trace buffer */
756         status = cod_get_sym_value(cod_man, SYS_PUTCBEG,
757                                    &hio_mgr->ul_trace_buffer_begin);
758         if (status) {
759                 status = -EFAULT;
760                 goto func_end;
761         }
762
763         hio_mgr->ul_gpp_read_pointer = hio_mgr->ul_trace_buffer_begin =
764             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
765             (hio_mgr->ul_trace_buffer_begin - ul_dsp_va);
766         /* Get the end address of trace buffer */
767         status = cod_get_sym_value(cod_man, SYS_PUTCEND,
768                                    &hio_mgr->ul_trace_buffer_end);
769         if (status) {
770                 status = -EFAULT;
771                 goto func_end;
772         }
773         hio_mgr->ul_trace_buffer_end =
774             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
775             (hio_mgr->ul_trace_buffer_end - ul_dsp_va);
776         /* Get the current address of DSP write pointer */
777         status = cod_get_sym_value(cod_man, BRIDGE_SYS_PUTC_CURRENT,
778                                    &hio_mgr->ul_trace_buffer_current);
779         if (status) {
780                 status = -EFAULT;
781                 goto func_end;
782         }
783         hio_mgr->ul_trace_buffer_current =
784             (ul_gpp_va + ul_seg1_size + ul_pad_size) +
785             (hio_mgr->ul_trace_buffer_current - ul_dsp_va);
786         /* Calculate the size of trace buffer */
787         kfree(hio_mgr->pmsg);
788         hio_mgr->pmsg = kmalloc(((hio_mgr->ul_trace_buffer_end -
789                                 hio_mgr->ul_trace_buffer_begin) *
790                                 hio_mgr->word_size) + 2, GFP_KERNEL);
791         if (!hio_mgr->pmsg)
792                 status = -ENOMEM;
793
794         hio_mgr->ul_dsp_va = ul_dsp_va;
795         hio_mgr->ul_gpp_va = (ul_gpp_va + ul_seg1_size + ul_pad_size);
796
797 #endif
798 func_end:
799         return status;
800 }
801
802 /*
803  *  ======== io_buf_size ========
804  *      Size of shared memory I/O channel.
805  */
806 u32 io_buf_size(struct io_mgr *hio_mgr)
807 {
808         if (hio_mgr)
809                 return hio_mgr->usm_buf_size;
810         else
811                 return 0;
812 }
813
814 /*
815  *  ======== io_cancel_chnl ========
816  *      Cancel IO on a given PCPY channel.
817  */
818 void io_cancel_chnl(struct io_mgr *hio_mgr, u32 chnl)
819 {
820         struct io_mgr *pio_mgr = (struct io_mgr *)hio_mgr;
821         struct shm *sm;
822
823         if (!hio_mgr)
824                 goto func_end;
825         sm = hio_mgr->shared_mem;
826
827         /* Inform DSP that we have no more buffers on this channel */
828         set_chnl_free(sm, chnl);
829
830         sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
831 func_end:
832         return;
833 }
834
835
836 /*
837  *  ======== io_dispatch_pm ========
838  *      Performs I/O dispatch on PM related messages from DSP
839  */
840 static void io_dispatch_pm(struct io_mgr *pio_mgr)
841 {
842         int status;
843         u32 parg[2];
844
845         /* Perform Power message processing here */
846         parg[0] = pio_mgr->intr_val;
847
848         /* Send the command to the Bridge clk/pwr manager to handle */
849         if (parg[0] == MBX_PM_HIBERNATE_EN) {
850                 dev_dbg(bridge, "PM: Hibernate command\n");
851                 status = pio_mgr->intf_fxns->
852                                 pfn_dev_cntrl(pio_mgr->hbridge_context,
853                                               BRDIOCTL_PWR_HIBERNATE, parg);
854                 if (status)
855                         pr_err("%s: hibernate cmd failed 0x%x\n",
856                                        __func__, status);
857         } else if (parg[0] == MBX_PM_OPP_REQ) {
858                 parg[1] = pio_mgr->shared_mem->opp_request.rqst_opp_pt;
859                 dev_dbg(bridge, "PM: Requested OPP = 0x%x\n", parg[1]);
860                 status = pio_mgr->intf_fxns->
861                                 pfn_dev_cntrl(pio_mgr->hbridge_context,
862                                         BRDIOCTL_CONSTRAINT_REQUEST, parg);
863                 if (status)
864                         dev_dbg(bridge, "PM: Failed to set constraint "
865                                 "= 0x%x\n", parg[1]);
866         } else {
867                 dev_dbg(bridge, "PM: clk control value of msg = 0x%x\n",
868                         parg[0]);
869                 status = pio_mgr->intf_fxns->
870                                 pfn_dev_cntrl(pio_mgr->hbridge_context,
871                                               BRDIOCTL_CLK_CTRL, parg);
872                 if (status)
873                         dev_dbg(bridge, "PM: Failed to ctrl the DSP clk"
874                                 "= 0x%x\n", *parg);
875         }
876 }
877
878 /*
879  *  ======== io_dpc ========
880  *      Deferred procedure call for shared memory channel driver ISR.  Carries
881  *      out the dispatch of I/O as a non-preemptible event.It can only be
882  *      pre-empted      by an ISR.
883  */
884 void io_dpc(unsigned long ref_data)
885 {
886         struct io_mgr *pio_mgr = (struct io_mgr *)ref_data;
887         struct chnl_mgr *chnl_mgr_obj;
888         struct msg_mgr *msg_mgr_obj;
889         struct deh_mgr *hdeh_mgr;
890         u32 requested;
891         u32 serviced;
892
893         if (!pio_mgr)
894                 goto func_end;
895         chnl_mgr_obj = pio_mgr->hchnl_mgr;
896         dev_get_msg_mgr(pio_mgr->hdev_obj, &msg_mgr_obj);
897         dev_get_deh_mgr(pio_mgr->hdev_obj, &hdeh_mgr);
898         if (!chnl_mgr_obj)
899                 goto func_end;
900
901         requested = pio_mgr->dpc_req;
902         serviced = pio_mgr->dpc_sched;
903
904         if (serviced == requested)
905                 goto func_end;
906
907         /* Process pending DPC's */
908         do {
909                 /* Check value of interrupt reg to ensure it's a valid error */
910                 if ((pio_mgr->intr_val > DEH_BASE) &&
911                     (pio_mgr->intr_val < DEH_LIMIT)) {
912                         /* Notify DSP/BIOS exception */
913                         if (hdeh_mgr) {
914 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
915                                 print_dsp_debug_trace(pio_mgr);
916 #endif
917                                 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
918                                                   pio_mgr->intr_val);
919                         }
920                 }
921                 /* Proc-copy chanel dispatch */
922                 input_chnl(pio_mgr, NULL, IO_SERVICE);
923                 output_chnl(pio_mgr, NULL, IO_SERVICE);
924
925 #ifdef CHNL_MESSAGES
926                 if (msg_mgr_obj) {
927                         /* Perform I/O dispatch on message queues */
928                         input_msg(pio_mgr, msg_mgr_obj);
929                         output_msg(pio_mgr, msg_mgr_obj);
930                 }
931
932 #endif
933 #ifdef CONFIG_TIDSPBRIDGE_DEBUG
934                 if (pio_mgr->intr_val & MBX_DBG_SYSPRINTF) {
935                         /* Notify DSP Trace message */
936                         print_dsp_debug_trace(pio_mgr);
937                 }
938 #endif
939                 serviced++;
940         } while (serviced != requested);
941         pio_mgr->dpc_sched = requested;
942 func_end:
943         return;
944 }
945
946 /*
947  *  ======== io_mbox_msg ========
948  *      Main interrupt handler for the shared memory IO manager.
949  *      Calls the Bridge's CHNL_ISR to determine if this interrupt is ours, then
950  *      schedules a DPC to dispatch I/O.
951  */
952 void io_mbox_msg(u32 msg)
953 {
954         struct io_mgr *pio_mgr;
955         struct dev_object *dev_obj;
956         unsigned long flags;
957
958         dev_obj = dev_get_first();
959         dev_get_io_mgr(dev_obj, &pio_mgr);
960
961         if (!pio_mgr)
962                 return;
963
964         pio_mgr->intr_val = (u16)msg;
965         if (pio_mgr->intr_val & MBX_PM_CLASS)
966                 io_dispatch_pm(pio_mgr);
967
968         if (pio_mgr->intr_val == MBX_DEH_RESET) {
969                 pio_mgr->intr_val = 0;
970         } else {
971                 spin_lock_irqsave(&pio_mgr->dpc_lock, flags);
972                 pio_mgr->dpc_req++;
973                 spin_unlock_irqrestore(&pio_mgr->dpc_lock, flags);
974                 tasklet_schedule(&pio_mgr->dpc_tasklet);
975         }
976         return;
977 }
978
979 /*
980  *  ======== io_request_chnl ========
981  *  Purpose:
982  *      Request chanenel I/O from the DSP. Sets flags in shared memory, then
983  *      interrupts the DSP.
984  */
985 void io_request_chnl(struct io_mgr *io_manager, struct chnl_object *pchnl,
986                         u8 io_mode, u16 *mbx_val)
987 {
988         struct chnl_mgr *chnl_mgr_obj;
989         struct shm *sm;
990
991         if (!pchnl || !mbx_val)
992                 goto func_end;
993         chnl_mgr_obj = io_manager->hchnl_mgr;
994         sm = io_manager->shared_mem;
995         if (io_mode == IO_INPUT) {
996                 /*
997                  * Assertion fires if CHNL_AddIOReq() called on a stream
998                  * which was cancelled, or attached to a dead board.
999                  */
1000                 DBC_ASSERT((pchnl->dw_state == CHNL_STATEREADY) ||
1001                            (pchnl->dw_state == CHNL_STATEEOS));
1002                 /* Indicate to the DSP we have a buffer available for input */
1003                 set_chnl_busy(sm, pchnl->chnl_id);
1004                 *mbx_val = MBX_PCPY_CLASS;
1005         } else if (io_mode == IO_OUTPUT) {
1006                 /*
1007                  * This assertion fails if CHNL_AddIOReq() was called on a
1008                  * stream which was cancelled, or attached to a dead board.
1009                  */
1010                 DBC_ASSERT((pchnl->dw_state & ~CHNL_STATEEOS) ==
1011                            CHNL_STATEREADY);
1012                 /*
1013                  * Record the fact that we have a buffer available for
1014                  * output.
1015                  */
1016                 chnl_mgr_obj->dw_output_mask |= (1 << pchnl->chnl_id);
1017         } else {
1018                 DBC_ASSERT(io_mode);    /* Shouldn't get here. */
1019         }
1020 func_end:
1021         return;
1022 }
1023
1024 /*
1025  *  ======== iosm_schedule ========
1026  *      Schedule DPC for IO.
1027  */
1028 void iosm_schedule(struct io_mgr *io_manager)
1029 {
1030         unsigned long flags;
1031
1032         if (!io_manager)
1033                 return;
1034
1035         /* Increment count of DPC's pending. */
1036         spin_lock_irqsave(&io_manager->dpc_lock, flags);
1037         io_manager->dpc_req++;
1038         spin_unlock_irqrestore(&io_manager->dpc_lock, flags);
1039
1040         /* Schedule DPC */
1041         tasklet_schedule(&io_manager->dpc_tasklet);
1042 }
1043
1044 /*
1045  *  ======== find_ready_output ========
1046  *      Search for a host output channel which is ready to send.  If this is
1047  *      called as a result of servicing the DPC, then implement a round
1048  *      robin search; otherwise, this was called by a client thread (via
1049  *      IO_Dispatch()), so just start searching from the current channel id.
1050  */
1051 static u32 find_ready_output(struct chnl_mgr *chnl_mgr_obj,
1052                              struct chnl_object *pchnl, u32 mask)
1053 {
1054         u32 ret = OUTPUTNOTREADY;
1055         u32 id, start_id;
1056         u32 shift;
1057
1058         id = (pchnl !=
1059               NULL ? pchnl->chnl_id : (chnl_mgr_obj->dw_last_output + 1));
1060         id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1061         if (id >= CHNL_MAXCHANNELS)
1062                 goto func_end;
1063         if (mask) {
1064                 shift = (1 << id);
1065                 start_id = id;
1066                 do {
1067                         if (mask & shift) {
1068                                 ret = id;
1069                                 if (pchnl == NULL)
1070                                         chnl_mgr_obj->dw_last_output = id;
1071                                 break;
1072                         }
1073                         id = id + 1;
1074                         id = ((id == CHNL_MAXCHANNELS) ? 0 : id);
1075                         shift = (1 << id);
1076                 } while (id != start_id);
1077         }
1078 func_end:
1079         return ret;
1080 }
1081
1082 /*
1083  *  ======== input_chnl ========
1084  *      Dispatch a buffer on an input channel.
1085  */
1086 static void input_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1087                         u8 io_mode)
1088 {
1089         struct chnl_mgr *chnl_mgr_obj;
1090         struct shm *sm;
1091         u32 chnl_id;
1092         u32 bytes;
1093         struct chnl_irp *chnl_packet_obj = NULL;
1094         u32 dw_arg;
1095         bool clear_chnl = false;
1096         bool notify_client = false;
1097
1098         sm = pio_mgr->shared_mem;
1099         chnl_mgr_obj = pio_mgr->hchnl_mgr;
1100
1101         /* Attempt to perform input */
1102         if (!sm->input_full)
1103                 goto func_end;
1104
1105         bytes = sm->input_size * chnl_mgr_obj->word_size;
1106         chnl_id = sm->input_id;
1107         dw_arg = sm->arg;
1108         if (chnl_id >= CHNL_MAXCHANNELS) {
1109                 /* Shouldn't be here: would indicate corrupted shm. */
1110                 DBC_ASSERT(chnl_id);
1111                 goto func_end;
1112         }
1113         pchnl = chnl_mgr_obj->ap_channel[chnl_id];
1114         if ((pchnl != NULL) && CHNL_IS_INPUT(pchnl->chnl_mode)) {
1115                 if ((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY) {
1116                         if (!pchnl->pio_requests)
1117                                 goto func_end;
1118                         /* Get the I/O request, and attempt a transfer */
1119                         chnl_packet_obj = (struct chnl_irp *)
1120                             lst_get_head(pchnl->pio_requests);
1121                         if (chnl_packet_obj) {
1122                                 pchnl->cio_reqs--;
1123                                 if (pchnl->cio_reqs < 0)
1124                                         goto func_end;
1125                                 /*
1126                                  * Ensure we don't overflow the client's
1127                                  * buffer.
1128                                  */
1129                                 bytes = min(bytes, chnl_packet_obj->byte_size);
1130                                 memcpy(chnl_packet_obj->host_sys_buf,
1131                                                 pio_mgr->input, bytes);
1132                                 pchnl->bytes_moved += bytes;
1133                                 chnl_packet_obj->byte_size = bytes;
1134                                 chnl_packet_obj->dw_arg = dw_arg;
1135                                 chnl_packet_obj->status = CHNL_IOCSTATCOMPLETE;
1136
1137                                 if (bytes == 0) {
1138                                         /*
1139                                          * This assertion fails if the DSP
1140                                          * sends EOS more than once on this
1141                                          * channel.
1142                                          */
1143                                         if (pchnl->dw_state & CHNL_STATEEOS)
1144                                                 goto func_end;
1145                                         /*
1146                                          * Zero bytes indicates EOS. Update
1147                                          * IOC status for this chirp, and also
1148                                          * the channel state.
1149                                          */
1150                                         chnl_packet_obj->status |=
1151                                             CHNL_IOCSTATEOS;
1152                                         pchnl->dw_state |= CHNL_STATEEOS;
1153                                         /*
1154                                          * Notify that end of stream has
1155                                          * occurred.
1156                                          */
1157                                         ntfy_notify(pchnl->ntfy_obj,
1158                                                     DSP_STREAMDONE);
1159                                 }
1160                                 /* Tell DSP if no more I/O buffers available */
1161                                 if (!pchnl->pio_requests)
1162                                         goto func_end;
1163                                 if (LST_IS_EMPTY(pchnl->pio_requests)) {
1164                                         set_chnl_free(sm, pchnl->chnl_id);
1165                                 }
1166                                 clear_chnl = true;
1167                                 notify_client = true;
1168                         } else {
1169                                 /*
1170                                  * Input full for this channel, but we have no
1171                                  * buffers available.  The channel must be
1172                                  * "idling". Clear out the physical input
1173                                  * channel.
1174                                  */
1175                                 clear_chnl = true;
1176                         }
1177                 } else {
1178                         /* Input channel cancelled: clear input channel */
1179                         clear_chnl = true;
1180                 }
1181         } else {
1182                 /* DPC fired after host closed channel: clear input channel */
1183                 clear_chnl = true;
1184         }
1185         if (clear_chnl) {
1186                 /* Indicate to the DSP we have read the input */
1187                 sm->input_full = 0;
1188                 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1189         }
1190         if (notify_client) {
1191                 /* Notify client with IO completion record */
1192                 notify_chnl_complete(pchnl, chnl_packet_obj);
1193         }
1194 func_end:
1195         return;
1196 }
1197
1198 /*
1199  *  ======== input_msg ========
1200  *      Copies messages from shared memory to the message queues.
1201  */
1202 static void input_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1203 {
1204         u32 num_msgs;
1205         u32 i;
1206         u8 *msg_input;
1207         struct msg_queue *msg_queue_obj;
1208         struct msg_frame *pmsg;
1209         struct msg_dspmsg msg;
1210         struct msg_ctrl *msg_ctr_obj;
1211         u32 input_empty;
1212         u32 addr;
1213
1214         msg_ctr_obj = pio_mgr->msg_input_ctrl;
1215         /* Get the number of input messages to be read */
1216         input_empty = msg_ctr_obj->buf_empty;
1217         num_msgs = msg_ctr_obj->size;
1218         if (input_empty)
1219                 goto func_end;
1220
1221         msg_input = pio_mgr->msg_input;
1222         for (i = 0; i < num_msgs; i++) {
1223                 /* Read the next message */
1224                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_cmd);
1225                 msg.msg.dw_cmd =
1226                     read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1227                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg1);
1228                 msg.msg.dw_arg1 =
1229                     read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1230                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msg.dw_arg2);
1231                 msg.msg.dw_arg2 =
1232                     read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1233                 addr = (u32) &(((struct msg_dspmsg *)msg_input)->msgq_id);
1234                 msg.msgq_id =
1235                     read_ext32_bit_dsp_data(pio_mgr->hbridge_context, addr);
1236                 msg_input += sizeof(struct msg_dspmsg);
1237                 if (!hmsg_mgr->queue_list)
1238                         goto func_end;
1239
1240                 /* Determine which queue to put the message in */
1241                 msg_queue_obj =
1242                     (struct msg_queue *)lst_first(hmsg_mgr->queue_list);
1243                 dev_dbg(bridge, "input msg: dw_cmd=0x%x dw_arg1=0x%x "
1244                         "dw_arg2=0x%x msgq_id=0x%x \n", msg.msg.dw_cmd,
1245                         msg.msg.dw_arg1, msg.msg.dw_arg2, msg.msgq_id);
1246                 /*
1247                  * Interrupt may occur before shared memory and message
1248                  * input locations have been set up. If all nodes were
1249                  * cleaned up, hmsg_mgr->max_msgs should be 0.
1250                  */
1251                 while (msg_queue_obj != NULL) {
1252                         if (msg.msgq_id == msg_queue_obj->msgq_id) {
1253                                 /* Found it */
1254                                 if (msg.msg.dw_cmd == RMS_EXITACK) {
1255                                         /*
1256                                          * Call the node exit notification.
1257                                          * The exit message does not get
1258                                          * queued.
1259                                          */
1260                                         (*hmsg_mgr->on_exit) ((void *)
1261                                                            msg_queue_obj->arg,
1262                                                            msg.msg.dw_arg1);
1263                                 } else {
1264                                         /*
1265                                          * Not an exit acknowledgement, queue
1266                                          * the message.
1267                                          */
1268                                         if (!msg_queue_obj->msg_free_list)
1269                                                 goto func_end;
1270                                         pmsg = (struct msg_frame *)lst_get_head
1271                                             (msg_queue_obj->msg_free_list);
1272                                         if (msg_queue_obj->msg_used_list
1273                                             && pmsg) {
1274                                                 pmsg->msg_data = msg;
1275                                                 lst_put_tail
1276                                                  (msg_queue_obj->msg_used_list,
1277                                                      (struct list_head *)pmsg);
1278                                                 ntfy_notify
1279                                                     (msg_queue_obj->ntfy_obj,
1280                                                      DSP_NODEMESSAGEREADY);
1281                                                 sync_set_event
1282                                                     (msg_queue_obj->sync_event);
1283                                         } else {
1284                                                 /*
1285                                                  * No free frame to copy the
1286                                                  * message into.
1287                                                  */
1288                                                 pr_err("%s: no free msg frames,"
1289                                                        " discarding msg\n",
1290                                                        __func__);
1291                                         }
1292                                 }
1293                                 break;
1294                         }
1295
1296                         if (!hmsg_mgr->queue_list || !msg_queue_obj)
1297                                 goto func_end;
1298                         msg_queue_obj =
1299                             (struct msg_queue *)lst_next(hmsg_mgr->queue_list,
1300                                                          (struct list_head *)
1301                                                          msg_queue_obj);
1302                 }
1303         }
1304         /* Set the post SWI flag */
1305         if (num_msgs > 0) {
1306                 /* Tell the DSP we've read the messages */
1307                 msg_ctr_obj->buf_empty = true;
1308                 msg_ctr_obj->post_swi = true;
1309                 sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1310         }
1311 func_end:
1312         return;
1313 }
1314
1315 /*
1316  *  ======== notify_chnl_complete ========
1317  *  Purpose:
1318  *      Signal the channel event, notifying the client that I/O has completed.
1319  */
1320 static void notify_chnl_complete(struct chnl_object *pchnl,
1321                                  struct chnl_irp *chnl_packet_obj)
1322 {
1323         bool signal_event;
1324
1325         if (!pchnl || !pchnl->sync_event ||
1326             !pchnl->pio_completions || !chnl_packet_obj)
1327                 goto func_end;
1328
1329         /*
1330          * Note: we signal the channel event only if the queue of IO
1331          * completions is empty.  If it is not empty, the event is sure to be
1332          * signalled by the only IO completion list consumer:
1333          * bridge_chnl_get_ioc().
1334          */
1335         signal_event = LST_IS_EMPTY(pchnl->pio_completions);
1336         /* Enqueue the IO completion info for the client */
1337         lst_put_tail(pchnl->pio_completions,
1338                      (struct list_head *)chnl_packet_obj);
1339         pchnl->cio_cs++;
1340
1341         if (pchnl->cio_cs > pchnl->chnl_packets)
1342                 goto func_end;
1343         /* Signal the channel event (if not already set) that IO is complete */
1344         if (signal_event)
1345                 sync_set_event(pchnl->sync_event);
1346
1347         /* Notify that IO is complete */
1348         ntfy_notify(pchnl->ntfy_obj, DSP_STREAMIOCOMPLETION);
1349 func_end:
1350         return;
1351 }
1352
1353 /*
1354  *  ======== output_chnl ========
1355  *  Purpose:
1356  *      Dispatch a buffer on an output channel.
1357  */
1358 static void output_chnl(struct io_mgr *pio_mgr, struct chnl_object *pchnl,
1359                         u8 io_mode)
1360 {
1361         struct chnl_mgr *chnl_mgr_obj;
1362         struct shm *sm;
1363         u32 chnl_id;
1364         struct chnl_irp *chnl_packet_obj;
1365         u32 dw_dsp_f_mask;
1366
1367         chnl_mgr_obj = pio_mgr->hchnl_mgr;
1368         sm = pio_mgr->shared_mem;
1369         /* Attempt to perform output */
1370         if (sm->output_full)
1371                 goto func_end;
1372
1373         if (pchnl && !((pchnl->dw_state & ~CHNL_STATEEOS) == CHNL_STATEREADY))
1374                 goto func_end;
1375
1376         /* Look to see if both a PC and DSP output channel are ready */
1377         dw_dsp_f_mask = sm->dsp_free_mask;
1378         chnl_id =
1379             find_ready_output(chnl_mgr_obj, pchnl,
1380                               (chnl_mgr_obj->dw_output_mask & dw_dsp_f_mask));
1381         if (chnl_id == OUTPUTNOTREADY)
1382                 goto func_end;
1383
1384         pchnl = chnl_mgr_obj->ap_channel[chnl_id];
1385         if (!pchnl || !pchnl->pio_requests) {
1386                 /* Shouldn't get here */
1387                 goto func_end;
1388         }
1389         /* Get the I/O request, and attempt a transfer */
1390         chnl_packet_obj = (struct chnl_irp *)lst_get_head(pchnl->pio_requests);
1391         if (!chnl_packet_obj)
1392                 goto func_end;
1393
1394         pchnl->cio_reqs--;
1395         if (pchnl->cio_reqs < 0 || !pchnl->pio_requests)
1396                 goto func_end;
1397
1398         /* Record fact that no more I/O buffers available */
1399         if (LST_IS_EMPTY(pchnl->pio_requests))
1400                 chnl_mgr_obj->dw_output_mask &= ~(1 << chnl_id);
1401
1402         /* Transfer buffer to DSP side */
1403         chnl_packet_obj->byte_size = min(pio_mgr->usm_buf_size,
1404                                         chnl_packet_obj->byte_size);
1405         memcpy(pio_mgr->output, chnl_packet_obj->host_sys_buf,
1406                                         chnl_packet_obj->byte_size);
1407         pchnl->bytes_moved += chnl_packet_obj->byte_size;
1408         /* Write all 32 bits of arg */
1409         sm->arg = chnl_packet_obj->dw_arg;
1410 #if _CHNL_WORDSIZE == 2
1411         /* Access can be different SM access word size (e.g. 16/32 bit words) */
1412         sm->output_id = (u16) chnl_id;
1413         sm->output_size = (u16) (chnl_packet_obj->byte_size +
1414                                 chnl_mgr_obj->word_size - 1) /
1415                                 (u16) chnl_mgr_obj->word_size;
1416 #else
1417         sm->output_id = chnl_id;
1418         sm->output_size = (chnl_packet_obj->byte_size +
1419                         chnl_mgr_obj->word_size - 1) / chnl_mgr_obj->word_size;
1420 #endif
1421         sm->output_full =  1;
1422         /* Indicate to the DSP we have written the output */
1423         sm_interrupt_dsp(pio_mgr->hbridge_context, MBX_PCPY_CLASS);
1424         /* Notify client with IO completion record (keep EOS) */
1425         chnl_packet_obj->status &= CHNL_IOCSTATEOS;
1426         notify_chnl_complete(pchnl, chnl_packet_obj);
1427         /* Notify if stream is done. */
1428         if (chnl_packet_obj->status & CHNL_IOCSTATEOS)
1429                 ntfy_notify(pchnl->ntfy_obj, DSP_STREAMDONE);
1430
1431 func_end:
1432         return;
1433 }
1434
1435 /*
1436  *  ======== output_msg ========
1437  *      Copies messages from the message queues to the shared memory.
1438  */
1439 static void output_msg(struct io_mgr *pio_mgr, struct msg_mgr *hmsg_mgr)
1440 {
1441         u32 num_msgs = 0;
1442         u32 i;
1443         u8 *msg_output;
1444         struct msg_frame *pmsg;
1445         struct msg_ctrl *msg_ctr_obj;
1446         u32 output_empty;
1447         u32 val;
1448         u32 addr;
1449
1450         msg_ctr_obj = pio_mgr->msg_output_ctrl;
1451
1452         /* Check if output has been cleared */
1453         output_empty = msg_ctr_obj->buf_empty;
1454         if (output_empty) {
1455                 num_msgs = (hmsg_mgr->msgs_pending > hmsg_mgr->max_msgs) ?
1456                     hmsg_mgr->max_msgs : hmsg_mgr->msgs_pending;
1457                 msg_output = pio_mgr->msg_output;
1458                 /* Copy num_msgs messages into shared memory */
1459                 for (i = 0; i < num_msgs; i++) {
1460                         if (!hmsg_mgr->msg_used_list) {
1461                                 pmsg = NULL;
1462                                 goto func_end;
1463                         } else {
1464                                 pmsg = (struct msg_frame *)
1465                                     lst_get_head(hmsg_mgr->msg_used_list);
1466                         }
1467                         if (pmsg != NULL) {
1468                                 val = (pmsg->msg_data).msgq_id;
1469                                 addr = (u32) &(((struct msg_dspmsg *)
1470                                                  msg_output)->msgq_id);
1471                                 write_ext32_bit_dsp_data(
1472                                         pio_mgr->hbridge_context, addr, val);
1473                                 val = (pmsg->msg_data).msg.dw_cmd;
1474                                 addr = (u32) &((((struct msg_dspmsg *)
1475                                                   msg_output)->msg).dw_cmd);
1476                                 write_ext32_bit_dsp_data(
1477                                         pio_mgr->hbridge_context, addr, val);
1478                                 val = (pmsg->msg_data).msg.dw_arg1;
1479                                 addr = (u32) &((((struct msg_dspmsg *)
1480                                                   msg_output)->msg).dw_arg1);
1481                                 write_ext32_bit_dsp_data(
1482                                         pio_mgr->hbridge_context, addr, val);
1483                                 val = (pmsg->msg_data).msg.dw_arg2;
1484                                 addr = (u32) &((((struct msg_dspmsg *)
1485                                                   msg_output)->msg).dw_arg2);
1486                                 write_ext32_bit_dsp_data(
1487                                         pio_mgr->hbridge_context, addr, val);
1488                                 msg_output += sizeof(struct msg_dspmsg);
1489                                 if (!hmsg_mgr->msg_free_list)
1490                                         goto func_end;
1491                                 lst_put_tail(hmsg_mgr->msg_free_list,
1492                                              (struct list_head *)pmsg);
1493                                 sync_set_event(hmsg_mgr->sync_event);
1494                         }
1495                 }
1496
1497                 if (num_msgs > 0) {
1498                         hmsg_mgr->msgs_pending -= num_msgs;
1499 #if _CHNL_WORDSIZE == 2
1500                         /*
1501                          * Access can be different SM access word size
1502                          * (e.g. 16/32 bit words)
1503                          */
1504                         msg_ctr_obj->size = (u16) num_msgs;
1505 #else
1506                         msg_ctr_obj->size = num_msgs;
1507 #endif
1508                         msg_ctr_obj->buf_empty = false;
1509                         /* Set the post SWI flag */
1510                         msg_ctr_obj->post_swi = true;
1511                         /* Tell the DSP we have written the output. */
1512                         sm_interrupt_dsp(pio_mgr->hbridge_context,
1513                                                 MBX_PCPY_CLASS);
1514                 }
1515         }
1516 func_end:
1517         return;
1518 }
1519
1520 /*
1521  *  ======== register_shm_segs ========
1522  *  purpose:
1523  *      Registers GPP SM segment with CMM.
1524  */
1525 static int register_shm_segs(struct io_mgr *hio_mgr,
1526                                     struct cod_manager *cod_man,
1527                                     u32 dw_gpp_base_pa)
1528 {
1529         int status = 0;
1530         u32 ul_shm0_base = 0;
1531         u32 shm0_end = 0;
1532         u32 ul_shm0_rsrvd_start = 0;
1533         u32 ul_rsrvd_size = 0;
1534         u32 ul_gpp_phys;
1535         u32 ul_dsp_virt;
1536         u32 ul_shm_seg_id0 = 0;
1537         u32 dw_offset, dw_gpp_base_va, ul_dsp_size;
1538
1539         /*
1540          * Read address and size info for first SM region.
1541          * Get start of 1st SM Heap region.
1542          */
1543         status =
1544             cod_get_sym_value(cod_man, SHM0_SHARED_BASE_SYM, &ul_shm0_base);
1545         if (ul_shm0_base == 0) {
1546                 status = -EPERM;
1547                 goto func_end;
1548         }
1549         /* Get end of 1st SM Heap region */
1550         if (!status) {
1551                 /* Get start and length of message part of shared memory */
1552                 status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM,
1553                                            &shm0_end);
1554                 if (shm0_end == 0) {
1555                         status = -EPERM;
1556                         goto func_end;
1557                 }
1558         }
1559         /* Start of Gpp reserved region */
1560         if (!status) {
1561                 /* Get start and length of message part of shared memory */
1562                 status =
1563                     cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM,
1564                                       &ul_shm0_rsrvd_start);
1565                 if (ul_shm0_rsrvd_start == 0) {
1566                         status = -EPERM;
1567                         goto func_end;
1568                 }
1569         }
1570         /* Register with CMM */
1571         if (!status) {
1572                 status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr);
1573                 if (!status) {
1574                         status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr,
1575                                                            CMM_ALLSEGMENTS);
1576                 }
1577         }
1578         /* Register new SM region(s) */
1579         if (!status && (shm0_end - ul_shm0_base) > 0) {
1580                 /* Calc size (bytes) of SM the GPP can alloc from */
1581                 ul_rsrvd_size =
1582                     (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size;
1583                 if (ul_rsrvd_size <= 0) {
1584                         status = -EPERM;
1585                         goto func_end;
1586                 }
1587                 /* Calc size of SM DSP can alloc from */
1588                 ul_dsp_size =
1589                     (ul_shm0_rsrvd_start - ul_shm0_base) * hio_mgr->word_size;
1590                 if (ul_dsp_size <= 0) {
1591                         status = -EPERM;
1592                         goto func_end;
1593                 }
1594                 /* First TLB entry reserved for Bridge SM use. */
1595                 ul_gpp_phys = hio_mgr->ext_proc_info.ty_tlb[0].ul_gpp_phys;
1596                 /* Get size in bytes */
1597                 ul_dsp_virt =
1598                     hio_mgr->ext_proc_info.ty_tlb[0].ul_dsp_virt *
1599                     hio_mgr->word_size;
1600                 /*
1601                  * Calc byte offset used to convert GPP phys <-> DSP byte
1602                  * address.
1603                  */
1604                 if (dw_gpp_base_pa > ul_dsp_virt)
1605                         dw_offset = dw_gpp_base_pa - ul_dsp_virt;
1606                 else
1607                         dw_offset = ul_dsp_virt - dw_gpp_base_pa;
1608
1609                 if (ul_shm0_rsrvd_start * hio_mgr->word_size < ul_dsp_virt) {
1610                         status = -EPERM;
1611                         goto func_end;
1612                 }
1613                 /*
1614                  * Calc Gpp phys base of SM region.
1615                  * This is actually uncached kernel virtual address.
1616                  */
1617                 dw_gpp_base_va =
1618                     ul_gpp_phys + ul_shm0_rsrvd_start * hio_mgr->word_size -
1619                     ul_dsp_virt;
1620                 /*
1621                  * Calc Gpp phys base of SM region.
1622                  * This is the physical address.
1623                  */
1624                 dw_gpp_base_pa =
1625                     dw_gpp_base_pa + ul_shm0_rsrvd_start * hio_mgr->word_size -
1626                     ul_dsp_virt;
1627                 /* Register SM Segment 0. */
1628                 status =
1629                     cmm_register_gppsm_seg(hio_mgr->hcmm_mgr, dw_gpp_base_pa,
1630                                            ul_rsrvd_size, dw_offset,
1631                                            (dw_gpp_base_pa >
1632                                             ul_dsp_virt) ? CMM_ADDTODSPPA :
1633                                            CMM_SUBFROMDSPPA,
1634                                            (u32) (ul_shm0_base *
1635                                                   hio_mgr->word_size),
1636                                            ul_dsp_size, &ul_shm_seg_id0,
1637                                            dw_gpp_base_va);
1638                 /* First SM region is seg_id = 1 */
1639                 if (ul_shm_seg_id0 != 1)
1640                         status = -EPERM;
1641         }
1642 func_end:
1643         return status;
1644 }
1645
1646 /* ZCPY IO routines. */
1647 /*
1648  *  ======== IO_SHMcontrol ========
1649  *      Sets the requested shm setting.
1650  */
1651 int io_sh_msetting(struct io_mgr *hio_mgr, u8 desc, void *pargs)
1652 {
1653 #ifdef CONFIG_TIDSPBRIDGE_DVFS
1654         u32 i;
1655         struct dspbridge_platform_data *pdata =
1656             omap_dspbridge_dev->dev.platform_data;
1657
1658         switch (desc) {
1659         case SHM_CURROPP:
1660                 /* Update the shared memory with requested OPP information */
1661                 if (pargs != NULL)
1662                         hio_mgr->shared_mem->opp_table_struct.curr_opp_pt =
1663                             *(u32 *) pargs;
1664                 else
1665                         return -EPERM;
1666                 break;
1667         case SHM_OPPINFO:
1668                 /*
1669                  * Update the shared memory with the voltage, frequency,
1670                  * min and max frequency values for an OPP.
1671                  */
1672                 for (i = 0; i <= dsp_max_opps; i++) {
1673                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1674                             voltage = vdd1_dsp_freq[i][0];
1675                         dev_dbg(bridge, "OPP-shm: voltage: %d\n",
1676                                 vdd1_dsp_freq[i][0]);
1677                         hio_mgr->shared_mem->opp_table_struct.
1678                             opp_point[i].frequency = vdd1_dsp_freq[i][1];
1679                         dev_dbg(bridge, "OPP-shm: frequency: %d\n",
1680                                 vdd1_dsp_freq[i][1]);
1681                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1682                             min_freq = vdd1_dsp_freq[i][2];
1683                         dev_dbg(bridge, "OPP-shm: min freq: %d\n",
1684                                 vdd1_dsp_freq[i][2]);
1685                         hio_mgr->shared_mem->opp_table_struct.opp_point[i].
1686                             max_freq = vdd1_dsp_freq[i][3];
1687                         dev_dbg(bridge, "OPP-shm: max freq: %d\n",
1688                                 vdd1_dsp_freq[i][3]);
1689                 }
1690                 hio_mgr->shared_mem->opp_table_struct.num_opp_pts =
1691                     dsp_max_opps;
1692                 dev_dbg(bridge, "OPP-shm: max OPP number: %d\n", dsp_max_opps);
1693                 /* Update the current OPP number */
1694                 if (pdata->dsp_get_opp)
1695                         i = (*pdata->dsp_get_opp) ();
1696                 hio_mgr->shared_mem->opp_table_struct.curr_opp_pt = i;
1697                 dev_dbg(bridge, "OPP-shm: value programmed = %d\n", i);
1698                 break;
1699         case SHM_GETOPP:
1700                 /* Get the OPP that DSP has requested */
1701                 *(u32 *) pargs = hio_mgr->shared_mem->opp_request.rqst_opp_pt;
1702                 break;
1703         default:
1704                 break;
1705         }
1706 #endif
1707         return 0;
1708 }
1709
1710 /*
1711  *  ======== bridge_io_get_proc_load ========
1712  *      Gets the Processor's Load information
1713  */
1714 int bridge_io_get_proc_load(struct io_mgr *hio_mgr,
1715                                 struct dsp_procloadstat *proc_lstat)
1716 {
1717         proc_lstat->curr_load =
1718                         hio_mgr->shared_mem->load_mon_info.curr_dsp_load;
1719         proc_lstat->predicted_load =
1720             hio_mgr->shared_mem->load_mon_info.pred_dsp_load;
1721         proc_lstat->curr_dsp_freq =
1722             hio_mgr->shared_mem->load_mon_info.curr_dsp_freq;
1723         proc_lstat->predicted_freq =
1724             hio_mgr->shared_mem->load_mon_info.pred_dsp_freq;
1725
1726         dev_dbg(bridge, "Curr Load = %d, Pred Load = %d, Curr Freq = %d, "
1727                 "Pred Freq = %d\n", proc_lstat->curr_load,
1728                 proc_lstat->predicted_load, proc_lstat->curr_dsp_freq,
1729                 proc_lstat->predicted_freq);
1730         return 0;
1731 }
1732
1733 void io_sm_init(void)
1734 {
1735         /* Do nothing */
1736 }
1737
1738 #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG)
1739 void print_dsp_debug_trace(struct io_mgr *hio_mgr)
1740 {
1741         u32 ul_new_message_length = 0, ul_gpp_cur_pointer;
1742
1743         while (true) {
1744                 /* Get the DSP current pointer */
1745                 ul_gpp_cur_pointer =
1746                     *(u32 *) (hio_mgr->ul_trace_buffer_current);
1747                 ul_gpp_cur_pointer =
1748                     hio_mgr->ul_gpp_va + (ul_gpp_cur_pointer -
1749                                           hio_mgr->ul_dsp_va);
1750
1751                 /* No new debug messages available yet */
1752                 if (ul_gpp_cur_pointer == hio_mgr->ul_gpp_read_pointer) {
1753                         break;
1754                 } else if (ul_gpp_cur_pointer > hio_mgr->ul_gpp_read_pointer) {
1755                         /* Continuous data */
1756                         ul_new_message_length =
1757                             ul_gpp_cur_pointer - hio_mgr->ul_gpp_read_pointer;
1758
1759                         memcpy(hio_mgr->pmsg,
1760                                (char *)hio_mgr->ul_gpp_read_pointer,
1761                                ul_new_message_length);
1762                         hio_mgr->pmsg[ul_new_message_length] = '\0';
1763                         /*
1764                          * Advance the GPP trace pointer to DSP current
1765                          * pointer.
1766                          */
1767                         hio_mgr->ul_gpp_read_pointer += ul_new_message_length;
1768                         /* Print the trace messages */
1769                         pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1770                 } else if (ul_gpp_cur_pointer < hio_mgr->ul_gpp_read_pointer) {
1771                         /* Handle trace buffer wraparound */
1772                         memcpy(hio_mgr->pmsg,
1773                                (char *)hio_mgr->ul_gpp_read_pointer,
1774                                hio_mgr->ul_trace_buffer_end -
1775                                hio_mgr->ul_gpp_read_pointer);
1776                         ul_new_message_length =
1777                             ul_gpp_cur_pointer - hio_mgr->ul_trace_buffer_begin;
1778                         memcpy(&hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1779                                               hio_mgr->ul_gpp_read_pointer],
1780                                (char *)hio_mgr->ul_trace_buffer_begin,
1781                                ul_new_message_length);
1782                         hio_mgr->pmsg[hio_mgr->ul_trace_buffer_end -
1783                                       hio_mgr->ul_gpp_read_pointer +
1784                                       ul_new_message_length] = '\0';
1785                         /*
1786                          * Advance the GPP trace pointer to DSP current
1787                          * pointer.
1788                          */
1789                         hio_mgr->ul_gpp_read_pointer =
1790                             hio_mgr->ul_trace_buffer_begin +
1791                             ul_new_message_length;
1792                         /* Print the trace messages */
1793                         pr_info("DSPTrace: %s\n", hio_mgr->pmsg);
1794                 }
1795         }
1796 }
1797 #endif
1798
1799 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1800 /*
1801  *  ======== print_dsp_trace_buffer ========
1802  *      Prints the trace buffer returned from the DSP (if DBG_Trace is enabled).
1803  *  Parameters:
1804  *    hdeh_mgr:          Handle to DEH manager object
1805  *                      number of extra carriage returns to generate.
1806  *  Returns:
1807  *      0:        Success.
1808  *      -ENOMEM:    Unable to allocate memory.
1809  *  Requires:
1810  *      hdeh_mgr muse be valid. Checked in bridge_deh_notify.
1811  */
1812 int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context)
1813 {
1814         int status = 0;
1815         struct cod_manager *cod_mgr;
1816         u32 ul_trace_end;
1817         u32 ul_trace_begin;
1818         u32 trace_cur_pos;
1819         u32 ul_num_bytes = 0;
1820         u32 ul_num_words = 0;
1821         u32 ul_word_size = 2;
1822         char *psz_buf;
1823         char *str_beg;
1824         char *trace_end;
1825         char *buf_end;
1826         char *new_line;
1827
1828         struct bridge_dev_context *pbridge_context = hbridge_context;
1829         struct bridge_drv_interface *intf_fxns;
1830         struct dev_object *dev_obj = (struct dev_object *)
1831             pbridge_context->hdev_obj;
1832
1833         status = dev_get_cod_mgr(dev_obj, &cod_mgr);
1834
1835         if (cod_mgr) {
1836                 /* Look for SYS_PUTCBEG/SYS_PUTCEND */
1837                 status =
1838                     cod_get_sym_value(cod_mgr, COD_TRACEBEG, &ul_trace_begin);
1839         } else {
1840                 status = -EFAULT;
1841         }
1842         if (!status)
1843                 status =
1844                     cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end);
1845
1846         if (!status)
1847                 /* trace_cur_pos will hold the address of a DSP pointer */
1848                 status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS,
1849                                                         &trace_cur_pos);
1850
1851         if (status)
1852                 goto func_end;
1853
1854         ul_num_bytes = (ul_trace_end - ul_trace_begin);
1855
1856         ul_num_words = ul_num_bytes * ul_word_size;
1857         status = dev_get_intf_fxns(dev_obj, &intf_fxns);
1858
1859         if (status)
1860                 goto func_end;
1861
1862         psz_buf = kzalloc(ul_num_bytes + 2, GFP_ATOMIC);
1863         if (psz_buf != NULL) {
1864                 /* Read trace buffer data */
1865                 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1866                         (u8 *)psz_buf, (u32)ul_trace_begin,
1867                         ul_num_bytes, 0);
1868
1869                 if (status)
1870                         goto func_end;
1871
1872                 /* Pack and do newline conversion */
1873                 pr_debug("PrintDspTraceBuffer: "
1874                         "before pack and unpack.\n");
1875                 pr_debug("%s: DSP Trace Buffer Begin:\n"
1876                         "=======================\n%s\n",
1877                         __func__, psz_buf);
1878
1879                 /* Read the value at the DSP address in trace_cur_pos. */
1880                 status = (*intf_fxns->pfn_brd_read)(pbridge_context,
1881                                 (u8 *)&trace_cur_pos, (u32)trace_cur_pos,
1882                                 4, 0);
1883                 if (status)
1884                         goto func_end;
1885                 /* Pack and do newline conversion */
1886                 pr_info("DSP Trace Buffer Begin:\n"
1887                         "=======================\n%s\n",
1888                         psz_buf);
1889
1890
1891                 /* convert to offset */
1892                 trace_cur_pos = trace_cur_pos - ul_trace_begin;
1893
1894                 if (ul_num_bytes) {
1895                         /*
1896                          * The buffer is not full, find the end of the
1897                          * data -- buf_end will be >= pszBuf after
1898                          * while.
1899                          */
1900                         buf_end = &psz_buf[ul_num_bytes+1];
1901                         /* DSP print position */
1902                         trace_end = &psz_buf[trace_cur_pos];
1903
1904                         /*
1905                          * Search buffer for a new_line and replace it
1906                          * with '\0', then print as string.
1907                          * Continue until end of buffer is reached.
1908                          */
1909                         str_beg = trace_end;
1910                         ul_num_bytes = buf_end - str_beg;
1911
1912                         while (str_beg < buf_end) {
1913                                 new_line = strnchr(str_beg, ul_num_bytes,
1914                                                                 '\n');
1915                                 if (new_line && new_line < buf_end) {
1916                                         *new_line = 0;
1917                                         pr_debug("%s\n", str_beg);
1918                                         str_beg = ++new_line;
1919                                         ul_num_bytes = buf_end - str_beg;
1920                                 } else {
1921                                         /*
1922                                          * Assume buffer empty if it contains
1923                                          * a zero
1924                                          */
1925                                         if (*str_beg != '\0') {
1926                                                 str_beg[ul_num_bytes] = 0;
1927                                                 pr_debug("%s\n", str_beg);
1928                                         }
1929                                         str_beg = buf_end;
1930                                         ul_num_bytes = 0;
1931                                 }
1932                         }
1933                         /*
1934                          * Search buffer for a nNewLine and replace it
1935                          * with '\0', then print as string.
1936                          * Continue until buffer is exhausted.
1937                          */
1938                         str_beg = psz_buf;
1939                         ul_num_bytes = trace_end - str_beg;
1940
1941                         while (str_beg < trace_end) {
1942                                 new_line = strnchr(str_beg, ul_num_bytes, '\n');
1943                                 if (new_line != NULL && new_line < trace_end) {
1944                                         *new_line = 0;
1945                                         pr_debug("%s\n", str_beg);
1946                                         str_beg = ++new_line;
1947                                         ul_num_bytes = trace_end - str_beg;
1948                                 } else {
1949                                         /*
1950                                          * Assume buffer empty if it contains
1951                                          * a zero
1952                                          */
1953                                         if (*str_beg != '\0') {
1954                                                 str_beg[ul_num_bytes] = 0;
1955                                                 pr_debug("%s\n", str_beg);
1956                                         }
1957                                         str_beg = trace_end;
1958                                         ul_num_bytes = 0;
1959                                 }
1960                         }
1961                 }
1962                 pr_info("\n=======================\n"
1963                         "DSP Trace Buffer End:\n");
1964                 kfree(psz_buf);
1965         } else {
1966                 status = -ENOMEM;
1967         }
1968 func_end:
1969         if (status)
1970                 dev_dbg(bridge, "%s Failed, status 0x%x\n", __func__, status);
1971         return status;
1972 }
1973
1974 /**
1975  * dump_dsp_stack() - This function dumps the data on the DSP stack.
1976  * @bridge_context:     Bridge driver's device context pointer.
1977  *
1978  */
1979 int dump_dsp_stack(struct bridge_dev_context *bridge_context)
1980 {
1981         int status = 0;
1982         struct cod_manager *code_mgr;
1983         struct node_mgr *node_mgr;
1984         u32 trace_begin;
1985         char name[256];
1986         struct {
1987                 u32 head[2];
1988                 u32 size;
1989         } mmu_fault_dbg_info;
1990         u32 *buffer;
1991         u32 *buffer_beg;
1992         u32 *buffer_end;
1993         u32 exc_type;
1994         u32 dyn_ext_base;
1995         u32 i;
1996         u32 offset_output;
1997         u32 total_size;
1998         u32 poll_cnt;
1999         const char *dsp_regs[] = {"EFR", "IERR", "ITSR", "NTSR",
2000                                 "IRP", "NRP", "AMR", "SSR",
2001                                 "ILC", "RILC", "IER", "CSR"};
2002         const char *exec_ctxt[] = {"Task", "SWI", "HWI", "Unknown"};
2003         struct bridge_drv_interface *intf_fxns;
2004         struct dev_object *dev_object = bridge_context->hdev_obj;
2005
2006         status = dev_get_cod_mgr(dev_object, &code_mgr);
2007         if (!code_mgr) {
2008                 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2009                 status = -EFAULT;
2010         }
2011
2012         if (!status) {
2013                 status = dev_get_node_manager(dev_object, &node_mgr);
2014                 if (!node_mgr) {
2015                         pr_debug("%s: Failed on dev_get_node_manager.\n",
2016                                                                 __func__);
2017                         status = -EFAULT;
2018                 }
2019         }
2020
2021         if (!status) {
2022                 /* Look for SYS_PUTCBEG/SYS_PUTCEND: */
2023                 status =
2024                         cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin);
2025                 pr_debug("%s: trace_begin Value 0x%x\n",
2026                         __func__, trace_begin);
2027                 if (status)
2028                         pr_debug("%s: Failed on cod_get_sym_value.\n",
2029                                                                 __func__);
2030         }
2031         if (!status)
2032                 status = dev_get_intf_fxns(dev_object, &intf_fxns);
2033         /*
2034          * Check for the "magic number" in the trace buffer.  If it has
2035          * yet to appear then poll the trace buffer to wait for it.  Its
2036          * appearance signals that the DSP has finished dumping its state.
2037          */
2038         mmu_fault_dbg_info.head[0] = 0;
2039         mmu_fault_dbg_info.head[1] = 0;
2040         if (!status) {
2041                 poll_cnt = 0;
2042                 while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 ||
2043                         mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) &&
2044                         poll_cnt < POLL_MAX) {
2045
2046                         /* Read DSP dump size from the DSP trace buffer... */
2047                         status = (*intf_fxns->pfn_brd_read)(bridge_context,
2048                                 (u8 *)&mmu_fault_dbg_info, (u32)trace_begin,
2049                                 sizeof(mmu_fault_dbg_info), 0);
2050
2051                         if (status)
2052                                 break;
2053
2054                         poll_cnt++;
2055                 }
2056
2057                 if (mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 &&
2058                         mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) {
2059                         status = -ETIME;
2060                         pr_err("%s:No DSP MMU-Fault information available.\n",
2061                                                         __func__);
2062                 }
2063         }
2064
2065         if (!status) {
2066                 total_size = mmu_fault_dbg_info.size;
2067                 /* Limit the size in case DSP went crazy */
2068                 if (total_size > MAX_MMU_DBGBUFF)
2069                         total_size = MAX_MMU_DBGBUFF;
2070
2071                 buffer = kzalloc(total_size, GFP_ATOMIC);
2072                 if (!buffer) {
2073                         status = -ENOMEM;
2074                         pr_debug("%s: Failed to "
2075                                 "allocate stack dump buffer.\n", __func__);
2076                         goto func_end;
2077                 }
2078
2079                 buffer_beg = buffer;
2080                 buffer_end =  buffer + total_size / 4;
2081
2082                 /* Read bytes from the DSP trace buffer... */
2083                 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2084                                 (u8 *)buffer, (u32)trace_begin,
2085                                 total_size, 0);
2086                 if (status) {
2087                         pr_debug("%s: Failed to Read Trace Buffer.\n",
2088                                                                 __func__);
2089                         goto func_end;
2090                 }
2091
2092                 pr_err("\nAproximate Crash Position:\n"
2093                         "--------------------------\n");
2094
2095                 exc_type = buffer[3];
2096                 if (!exc_type)
2097                         i = buffer[79];         /* IRP */
2098                 else
2099                         i = buffer[80];         /* NRP */
2100
2101                 status =
2102                     cod_get_sym_value(code_mgr, DYNEXTBASE, &dyn_ext_base);
2103                 if (status) {
2104                         status = -EFAULT;
2105                         goto func_end;
2106                 }
2107
2108                 if ((i > dyn_ext_base) && (node_find_addr(node_mgr, i,
2109                         0x1000, &offset_output, name) == 0))
2110                         pr_err("0x%-8x [\"%s\" + 0x%x]\n", i, name,
2111                                                         i - offset_output);
2112                 else
2113                         pr_err("0x%-8x [Unable to match to a symbol.]\n", i);
2114
2115                 buffer += 4;
2116
2117                 pr_err("\nExecution Info:\n"
2118                         "---------------\n");
2119
2120                 if (*buffer < ARRAY_SIZE(exec_ctxt)) {
2121                         pr_err("Execution context \t%s\n",
2122                                 exec_ctxt[*buffer++]);
2123                 } else {
2124                         pr_err("Execution context corrupt\n");
2125                         kfree(buffer_beg);
2126                         return -EFAULT;
2127                 }
2128                 pr_err("Task Handle\t\t0x%x\n", *buffer++);
2129                 pr_err("Stack Pointer\t\t0x%x\n", *buffer++);
2130                 pr_err("Stack Top\t\t0x%x\n", *buffer++);
2131                 pr_err("Stack Bottom\t\t0x%x\n", *buffer++);
2132                 pr_err("Stack Size\t\t0x%x\n", *buffer++);
2133                 pr_err("Stack Size In Use\t0x%x\n", *buffer++);
2134
2135                 pr_err("\nCPU Registers\n"
2136                         "---------------\n");
2137
2138                 for (i = 0; i < 32; i++) {
2139                         if (i == 4 || i == 6 || i == 8)
2140                                 pr_err("A%d 0x%-8x [Function Argument %d]\n",
2141                                                         i, *buffer++, i-3);
2142                         else if (i == 15)
2143                                 pr_err("A15 0x%-8x [Frame Pointer]\n",
2144                                                                 *buffer++);
2145                         else
2146                                 pr_err("A%d 0x%x\n", i, *buffer++);
2147                 }
2148
2149                 pr_err("\nB0 0x%x\n", *buffer++);
2150                 pr_err("B1 0x%x\n", *buffer++);
2151                 pr_err("B2 0x%x\n", *buffer++);
2152
2153                 if ((*buffer > dyn_ext_base) && (node_find_addr(node_mgr,
2154                         *buffer, 0x1000, &offset_output, name) == 0))
2155
2156                         pr_err("B3 0x%-8x [Function Return Pointer:"
2157                                 " \"%s\" + 0x%x]\n", *buffer, name,
2158                                 *buffer - offset_output);
2159                 else
2160                         pr_err("B3 0x%-8x [Function Return Pointer:"
2161                                 "Unable to match to a symbol.]\n", *buffer);
2162
2163                 buffer++;
2164
2165                 for (i = 4; i < 32; i++) {
2166                         if (i == 4 || i == 6 || i == 8)
2167                                 pr_err("B%d 0x%-8x [Function Argument %d]\n",
2168                                                         i, *buffer++, i-2);
2169                         else if (i == 14)
2170                                 pr_err("B14 0x%-8x [Data Page Pointer]\n",
2171                                                                 *buffer++);
2172                         else
2173                                 pr_err("B%d 0x%x\n", i, *buffer++);
2174                 }
2175
2176                 pr_err("\n");
2177
2178                 for (i = 0; i < ARRAY_SIZE(dsp_regs); i++)
2179                         pr_err("%s 0x%x\n", dsp_regs[i], *buffer++);
2180
2181                 pr_err("\nStack:\n"
2182                         "------\n");
2183
2184                 for (i = 0; buffer < buffer_end; i++, buffer++) {
2185                         if ((*buffer > dyn_ext_base) && (
2186                                 node_find_addr(node_mgr, *buffer , 0x600,
2187                                 &offset_output, name) == 0))
2188                                 pr_err("[%d] 0x%-8x [\"%s\" + 0x%x]\n",
2189                                         i, *buffer, name,
2190                                         *buffer - offset_output);
2191                         else
2192                                 pr_err("[%d] 0x%x\n", i, *buffer);
2193                 }
2194                 kfree(buffer_beg);
2195         }
2196 func_end:
2197         return status;
2198 }
2199
2200 /**
2201  * dump_dl_modules() - This functions dumps the _DLModules loaded in DSP side
2202  * @bridge_context:             Bridge driver's device context pointer.
2203  *
2204  */
2205 void dump_dl_modules(struct bridge_dev_context *bridge_context)
2206 {
2207         struct cod_manager *code_mgr;
2208         struct bridge_drv_interface *intf_fxns;
2209         struct bridge_dev_context *bridge_ctxt = bridge_context;
2210         struct dev_object *dev_object = bridge_ctxt->hdev_obj;
2211         struct modules_header modules_hdr;
2212         struct dll_module *module_struct = NULL;
2213         u32 module_dsp_addr;
2214         u32 module_size;
2215         u32 module_struct_size = 0;
2216         u32 sect_ndx;
2217         char *sect_str ;
2218         int status = 0;
2219
2220         status = dev_get_intf_fxns(dev_object, &intf_fxns);
2221         if (status) {
2222                 pr_debug("%s: Failed on dev_get_intf_fxns.\n", __func__);
2223                 goto func_end;
2224         }
2225
2226         status = dev_get_cod_mgr(dev_object, &code_mgr);
2227         if (!code_mgr) {
2228                 pr_debug("%s: Failed on dev_get_cod_mgr.\n", __func__);
2229                 status = -EFAULT;
2230                 goto func_end;
2231         }
2232
2233         /* Lookup  the address of the modules_header structure */
2234         status = cod_get_sym_value(code_mgr, "_DLModules", &module_dsp_addr);
2235         if (status) {
2236                 pr_debug("%s: Failed on cod_get_sym_value for _DLModules.\n",
2237                         __func__);
2238                 goto func_end;
2239         }
2240
2241         pr_debug("%s: _DLModules at 0x%x\n", __func__, module_dsp_addr);
2242
2243         /* Copy the modules_header structure from DSP memory. */
2244         status = (*intf_fxns->pfn_brd_read)(bridge_context, (u8 *) &modules_hdr,
2245                                 (u32) module_dsp_addr, sizeof(modules_hdr), 0);
2246
2247         if (status) {
2248                 pr_debug("%s: Failed failed to read modules header.\n",
2249                                                                 __func__);
2250                 goto func_end;
2251         }
2252
2253         module_dsp_addr = modules_hdr.first_module;
2254         module_size = modules_hdr.first_module_size;
2255
2256         pr_debug("%s: dll_module_header 0x%x %d\n", __func__, module_dsp_addr,
2257                                                                 module_size);
2258
2259         pr_err("\nDynamically Loaded Modules:\n"
2260                 "---------------------------\n");
2261
2262         /* For each dll_module structure in the list... */
2263         while (module_size) {
2264                 /*
2265                  * Allocate/re-allocate memory to hold the dll_module
2266                  * structure. The memory is re-allocated only if the existing
2267                  * allocation is too small.
2268                  */
2269                 if (module_size > module_struct_size) {
2270                         kfree(module_struct);
2271                         module_struct = kzalloc(module_size+128, GFP_ATOMIC);
2272                         module_struct_size = module_size+128;
2273                         pr_debug("%s: allocated module struct %p %d\n",
2274                                 __func__, module_struct, module_struct_size);
2275                         if (!module_struct)
2276                                 goto func_end;
2277                 }
2278                 /* Copy the dll_module structure from DSP memory */
2279                 status = (*intf_fxns->pfn_brd_read)(bridge_context,
2280                         (u8 *)module_struct, module_dsp_addr, module_size, 0);
2281
2282                 if (status) {
2283                         pr_debug(
2284                         "%s: Failed to read dll_module stuct for 0x%x.\n",
2285                         __func__, module_dsp_addr);
2286                         break;
2287                 }
2288
2289                 /* Update info regarding the _next_ module in the list. */
2290                 module_dsp_addr = module_struct->next_module;
2291                 module_size = module_struct->next_module_size;
2292
2293                 pr_debug("%s: next module 0x%x %d, this module num sects %d\n",
2294                         __func__, module_dsp_addr, module_size,
2295                         module_struct->num_sects);
2296
2297                 /*
2298                  * The section name strings start immedialty following
2299                  * the array of dll_sect structures.
2300                  */
2301                 sect_str = (char *) &module_struct->
2302                                         sects[module_struct->num_sects];
2303                 pr_err("%s\n", sect_str);
2304
2305                 /*
2306                  * Advance to the first section name string.
2307                  * Each string follows the one before.
2308                  */
2309                 sect_str += strlen(sect_str) + 1;
2310
2311                 /* Access each dll_sect structure and its name string. */
2312                 for (sect_ndx = 0;
2313                         sect_ndx < module_struct->num_sects; sect_ndx++) {
2314                         pr_err("    Section: 0x%x ",
2315                                 module_struct->sects[sect_ndx].sect_load_adr);
2316
2317                         if (((u32) sect_str - (u32) module_struct) <
2318                                 module_struct_size) {
2319                                 pr_err("%s\n", sect_str);
2320                                 /* Each string follows the one before. */
2321                                 sect_str += strlen(sect_str)+1;
2322                         } else {
2323                                 pr_err("<string error>\n");
2324                                 pr_debug("%s: section name sting address "
2325                                         "is invalid %p\n", __func__, sect_str);
2326                         }
2327                 }
2328         }
2329 func_end:
2330         kfree(module_struct);
2331 }
2332 #endif