Merge branch 'dmaengine' of git://git.linaro.org/people/rmk/linux-arm
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / tidspbridge / core / tiomap3430.c
1 /*
2 * tiomap.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * Processor Manager Driver for TI OMAP3430 EVM.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 #include <plat/dsp.h>
20
21 #include <linux/types.h>
22 /* ----------------------------------- Host OS */
23 #include <dspbridge/host_os.h>
24 #include <linux/mm.h>
25 #include <linux/mmzone.h>
26
27 /* ----------------------------------- DSP/BIOS Bridge */
28 #include <dspbridge/dbdefs.h>
29
30 /* ----------------------------------- OS Adaptation Layer */
31 #include <dspbridge/drv.h>
32 #include <dspbridge/sync.h>
33
34 /* ------------------------------------ Hardware Abstraction Layer */
35 #include <hw_defs.h>
36 #include <hw_mmu.h>
37
38 /* ----------------------------------- Link Driver */
39 #include <dspbridge/dspdefs.h>
40 #include <dspbridge/dspchnl.h>
41 #include <dspbridge/dspdeh.h>
42 #include <dspbridge/dspio.h>
43 #include <dspbridge/dspmsg.h>
44 #include <dspbridge/pwr.h>
45 #include <dspbridge/io_sm.h>
46
47 /* ----------------------------------- Platform Manager */
48 #include <dspbridge/dev.h>
49 #include <dspbridge/dspapi.h>
50 #include <dspbridge/dmm.h>
51 #include <dspbridge/wdt.h>
52
53 /* ----------------------------------- Local */
54 #include "_tiomap.h"
55 #include "_tiomap_pwr.h"
56 #include "tiomap_io.h"
57
58 /* Offset in shared mem to write to in order to synchronize start with DSP */
59 #define SHMSYNCOFFSET 4 /* GPP byte offset */
60
61 #define BUFFERSIZE 1024
62
63 #define TIHELEN_ACKTIMEOUT 10000
64
65 #define MMU_SECTION_ADDR_MASK 0xFFF00000
66 #define MMU_SSECTION_ADDR_MASK 0xFF000000
67 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
68 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
69 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
70 #define PAGES_II_LVL_TABLE 512
71 #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
72
73 /*
74 * This is a totally ugly layer violation, but needed until
75 * omap_ctrl_set_dsp_boot*() are provided.
76 */
77 #define OMAP3_IVA2_BOOTMOD_IDLE 1
78 #define OMAP2_CONTROL_GENERAL 0x270
79 #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
80 #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
81
82 /* Forward Declarations: */
83 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
84 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
85 u8 *host_buff,
86 u32 dsp_addr, u32 ul_num_bytes,
87 u32 mem_type);
88 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
89 u32 dsp_addr);
90 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
91 int *board_state);
92 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt);
93 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
94 u8 *host_buff,
95 u32 dsp_addr, u32 ul_num_bytes,
96 u32 mem_type);
97 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
98 u32 brd_state);
99 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
100 u32 dsp_dest_addr, u32 dsp_src_addr,
101 u32 ul_num_bytes, u32 mem_type);
102 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
103 u8 *host_buff, u32 dsp_addr,
104 u32 ul_num_bytes, u32 mem_type);
105 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
106 u32 ul_mpu_addr, u32 virt_addr,
107 u32 ul_num_bytes, u32 ul_map_attr,
108 struct page **mapped_pages);
109 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
110 u32 virt_addr, u32 ul_num_bytes);
111 static int bridge_dev_create(struct bridge_dev_context
112 **dev_cntxt,
113 struct dev_object *hdev_obj,
114 struct cfg_hostres *config_param);
115 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
116 u32 dw_cmd, void *pargs);
117 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
118 static u32 user_va2_pa(struct mm_struct *mm, u32 address);
119 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
120 u32 va, u32 size,
121 struct hw_mmu_map_attrs_t *map_attrs);
122 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
123 u32 size, struct hw_mmu_map_attrs_t *attrs);
124 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
125 u32 ul_mpu_addr, u32 virt_addr,
126 u32 ul_num_bytes,
127 struct hw_mmu_map_attrs_t *hw_attrs);
128
129 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
130
131 /* ----------------------------------- Globals */
132
133 /* Attributes of L2 page tables for DSP MMU */
134 struct page_info {
135 u32 num_entries; /* Number of valid PTEs in the L2 PT */
136 };
137
138 /* Attributes used to manage the DSP MMU page tables */
139 struct pg_table_attrs {
140 spinlock_t pg_lock; /* Critical section object handle */
141
142 u32 l1_base_pa; /* Physical address of the L1 PT */
143 u32 l1_base_va; /* Virtual address of the L1 PT */
144 u32 l1_size; /* Size of the L1 PT */
145 u32 l1_tbl_alloc_pa;
146 /* Physical address of Allocated mem for L1 table. May not be aligned */
147 u32 l1_tbl_alloc_va;
148 /* Virtual address of Allocated mem for L1 table. May not be aligned */
149 u32 l1_tbl_alloc_sz;
150 /* Size of consistent memory allocated for L1 table.
151 * May not be aligned */
152
153 u32 l2_base_pa; /* Physical address of the L2 PT */
154 u32 l2_base_va; /* Virtual address of the L2 PT */
155 u32 l2_size; /* Size of the L2 PT */
156 u32 l2_tbl_alloc_pa;
157 /* Physical address of Allocated mem for L2 table. May not be aligned */
158 u32 l2_tbl_alloc_va;
159 /* Virtual address of Allocated mem for L2 table. May not be aligned */
160 u32 l2_tbl_alloc_sz;
161 /* Size of consistent memory allocated for L2 table.
162 * May not be aligned */
163
164 u32 l2_num_pages; /* Number of allocated L2 PT */
165 /* Array [l2_num_pages] of L2 PT info structs */
166 struct page_info *pg_info;
167 };
168
169 /*
170 * This Bridge driver's function interface table.
171 */
172 static struct bridge_drv_interface drv_interface_fxns = {
173 /* Bridge API ver. for which this bridge driver is built. */
174 BRD_API_MAJOR_VERSION,
175 BRD_API_MINOR_VERSION,
176 bridge_dev_create,
177 bridge_dev_destroy,
178 bridge_dev_ctrl,
179 bridge_brd_monitor,
180 bridge_brd_start,
181 bridge_brd_stop,
182 bridge_brd_status,
183 bridge_brd_read,
184 bridge_brd_write,
185 bridge_brd_set_state,
186 bridge_brd_mem_copy,
187 bridge_brd_mem_write,
188 bridge_brd_mem_map,
189 bridge_brd_mem_un_map,
190 /* The following CHNL functions are provided by chnl_io.lib: */
191 bridge_chnl_create,
192 bridge_chnl_destroy,
193 bridge_chnl_open,
194 bridge_chnl_close,
195 bridge_chnl_add_io_req,
196 bridge_chnl_get_ioc,
197 bridge_chnl_cancel_io,
198 bridge_chnl_flush_io,
199 bridge_chnl_get_info,
200 bridge_chnl_get_mgr_info,
201 bridge_chnl_idle,
202 bridge_chnl_register_notify,
203 /* The following IO functions are provided by chnl_io.lib: */
204 bridge_io_create,
205 bridge_io_destroy,
206 bridge_io_on_loaded,
207 bridge_io_get_proc_load,
208 /* The following msg_ctrl functions are provided by chnl_io.lib: */
209 bridge_msg_create,
210 bridge_msg_create_queue,
211 bridge_msg_delete,
212 bridge_msg_delete_queue,
213 bridge_msg_get,
214 bridge_msg_put,
215 bridge_msg_register_notify,
216 bridge_msg_set_queue_id,
217 };
218
219 static struct notifier_block dsp_mbox_notifier = {
220 .notifier_call = io_mbox_msg,
221 };
222
223 static inline void flush_all(struct bridge_dev_context *dev_context)
224 {
225 if (dev_context->brd_state == BRD_DSP_HIBERNATION ||
226 dev_context->brd_state == BRD_HIBERNATION)
227 wake_dsp(dev_context, NULL);
228
229 hw_mmu_tlb_flush_all(dev_context->dsp_mmu_base);
230 }
231
232 static void bad_page_dump(u32 pa, struct page *pg)
233 {
234 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
235 pr_emerg("Bad page state in process '%s'\n"
236 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
237 "Backtrace:\n",
238 current->comm, pg, (int)(2 * sizeof(unsigned long)),
239 (unsigned long)pg->flags, pg->mapping,
240 page_mapcount(pg), page_count(pg));
241 dump_stack();
242 }
243
244 /*
245 * ======== bridge_drv_entry ========
246 * purpose:
247 * Bridge Driver entry point.
248 */
249 void bridge_drv_entry(struct bridge_drv_interface **drv_intf,
250 const char *driver_file_name)
251 {
252 if (strcmp(driver_file_name, "UMA") == 0)
253 *drv_intf = &drv_interface_fxns;
254 else
255 dev_dbg(bridge, "%s Unknown Bridge file name", __func__);
256
257 }
258
259 /*
260 * ======== bridge_brd_monitor ========
261 * purpose:
262 * This bridge_brd_monitor puts DSP into a Loadable state.
263 * i.e Application can load and start the device.
264 *
265 * Preconditions:
266 * Device in 'OFF' state.
267 */
268 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
269 {
270 struct bridge_dev_context *dev_context = dev_ctxt;
271 u32 temp;
272 struct omap_dsp_platform_data *pdata =
273 omap_dspbridge_dev->dev.platform_data;
274
275 temp = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
276 OMAP_POWERSTATEST_MASK;
277 if (!(temp & 0x02)) {
278 /* IVA2 is not in ON state */
279 /* Read and set PM_PWSTCTRL_IVA2 to ON */
280 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
281 PWRDM_POWER_ON, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
282 /* Set the SW supervised state transition */
283 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP,
284 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
285
286 /* Wait until the state has moved to ON */
287 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
288 OMAP_INTRANSITION_MASK)
289 ;
290 /* Disable Automatic transition */
291 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
292 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
293 }
294 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
295 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
296 dsp_clk_enable(DSP_CLK_IVA2);
297
298 /* set the device state to IDLE */
299 dev_context->brd_state = BRD_IDLE;
300
301 return 0;
302 }
303
304 /*
305 * ======== bridge_brd_read ========
306 * purpose:
307 * Reads buffers for DSP memory.
308 */
309 static int bridge_brd_read(struct bridge_dev_context *dev_ctxt,
310 u8 *host_buff, u32 dsp_addr,
311 u32 ul_num_bytes, u32 mem_type)
312 {
313 int status = 0;
314 struct bridge_dev_context *dev_context = dev_ctxt;
315 u32 offset;
316 u32 dsp_base_addr = dev_ctxt->dsp_base_addr;
317
318 if (dsp_addr < dev_context->dsp_start_add) {
319 status = -EPERM;
320 return status;
321 }
322 /* change here to account for the 3 bands of the DSP internal memory */
323 if ((dsp_addr - dev_context->dsp_start_add) <
324 dev_context->internal_size) {
325 offset = dsp_addr - dev_context->dsp_start_add;
326 } else {
327 status = read_ext_dsp_data(dev_context, host_buff, dsp_addr,
328 ul_num_bytes, mem_type);
329 return status;
330 }
331 /* copy the data from DSP memory, */
332 memcpy(host_buff, (void *)(dsp_base_addr + offset), ul_num_bytes);
333 return status;
334 }
335
336 /*
337 * ======== bridge_brd_set_state ========
338 * purpose:
339 * This routine updates the Board status.
340 */
341 static int bridge_brd_set_state(struct bridge_dev_context *dev_ctxt,
342 u32 brd_state)
343 {
344 int status = 0;
345 struct bridge_dev_context *dev_context = dev_ctxt;
346
347 dev_context->brd_state = brd_state;
348 return status;
349 }
350
351 /*
352 * ======== bridge_brd_start ========
353 * purpose:
354 * Initializes DSP MMU and Starts DSP.
355 *
356 * Preconditions:
357 * a) DSP domain is 'ACTIVE'.
358 * b) DSP_RST1 is asserted.
359 * b) DSP_RST2 is released.
360 */
361 static int bridge_brd_start(struct bridge_dev_context *dev_ctxt,
362 u32 dsp_addr)
363 {
364 int status = 0;
365 struct bridge_dev_context *dev_context = dev_ctxt;
366 u32 dw_sync_addr = 0;
367 u32 ul_shm_base; /* Gpp Phys SM base addr(byte) */
368 u32 ul_shm_base_virt; /* Dsp Virt SM base addr */
369 u32 ul_tlb_base_virt; /* Base of MMU TLB entry */
370 /* Offset of shm_base_virt from tlb_base_virt */
371 u32 ul_shm_offset_virt;
372 s32 entry_ndx;
373 s32 itmp_entry_ndx = 0; /* DSP-MMU TLB entry base address */
374 struct cfg_hostres *resources = NULL;
375 u32 temp;
376 u32 ul_dsp_clk_rate;
377 u32 ul_dsp_clk_addr;
378 u32 ul_bios_gp_timer;
379 u32 clk_cmd;
380 struct io_mgr *hio_mgr;
381 u32 ul_load_monitor_timer;
382 u32 wdt_en = 0;
383 struct omap_dsp_platform_data *pdata =
384 omap_dspbridge_dev->dev.platform_data;
385
386 /* The device context contains all the mmu setup info from when the
387 * last dsp base image was loaded. The first entry is always
388 * SHMMEM base. */
389 /* Get SHM_BEG - convert to byte address */
390 (void)dev_get_symbol(dev_context->dev_obj, SHMBASENAME,
391 &ul_shm_base_virt);
392 ul_shm_base_virt *= DSPWORDSIZE;
393 /* DSP Virtual address */
394 ul_tlb_base_virt = dev_context->atlb_entry[0].dsp_va;
395 ul_shm_offset_virt =
396 ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
397 /* Kernel logical address */
398 ul_shm_base = dev_context->atlb_entry[0].gpp_va + ul_shm_offset_virt;
399
400 /* 2nd wd is used as sync field */
401 dw_sync_addr = ul_shm_base + SHMSYNCOFFSET;
402 /* Write a signature into the shm base + offset; this will
403 * get cleared when the DSP program starts. */
404 if ((ul_shm_base_virt == 0) || (ul_shm_base == 0)) {
405 pr_err("%s: Illegal SM base\n", __func__);
406 status = -EPERM;
407 } else
408 __raw_writel(0xffffffff, dw_sync_addr);
409
410 if (!status) {
411 resources = dev_context->resources;
412 if (!resources)
413 status = -EPERM;
414
415 /* Assert RST1 i.e only the RST only for DSP megacell */
416 if (!status) {
417 /*
418 * XXX: ioremapping MUST be removed once ctrl
419 * function is made available.
420 */
421 void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K);
422 if (!ctrl)
423 return -ENOMEM;
424
425 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
426 OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD,
427 OMAP2_RM_RSTCTRL);
428 /* Mask address with 1K for compatibility */
429 __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK,
430 ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR);
431 /*
432 * Set bootmode to self loop if dsp_debug flag is true
433 */
434 __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0,
435 ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD);
436
437 iounmap(ctrl);
438 }
439 }
440 if (!status) {
441 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
442 * IVA2 SYSC register */
443 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
444 OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
445 udelay(100);
446 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
447 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
448 udelay(100);
449
450 /* Disbale the DSP MMU */
451 hw_mmu_disable(resources->dmmu_base);
452 /* Disable TWL */
453 hw_mmu_twl_disable(resources->dmmu_base);
454
455 /* Only make TLB entry if both addresses are non-zero */
456 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
457 entry_ndx++) {
458 struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
459 struct hw_mmu_map_attrs_t map_attrs = {
460 .endianism = e->endianism,
461 .element_size = e->elem_size,
462 .mixed_size = e->mixed_mode,
463 };
464
465 if (!e->gpp_pa || !e->dsp_va)
466 continue;
467
468 dev_dbg(bridge,
469 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
470 itmp_entry_ndx,
471 e->gpp_pa,
472 e->dsp_va,
473 e->size);
474
475 hw_mmu_tlb_add(dev_context->dsp_mmu_base,
476 e->gpp_pa,
477 e->dsp_va,
478 e->size,
479 itmp_entry_ndx,
480 &map_attrs, 1, 1);
481
482 itmp_entry_ndx++;
483 }
484 }
485
486 /* Lock the above TLB entries and get the BIOS and load monitor timer
487 * information */
488 if (!status) {
489 hw_mmu_num_locked_set(resources->dmmu_base, itmp_entry_ndx);
490 hw_mmu_victim_num_set(resources->dmmu_base, itmp_entry_ndx);
491 hw_mmu_ttb_set(resources->dmmu_base,
492 dev_context->pt_attrs->l1_base_pa);
493 hw_mmu_twl_enable(resources->dmmu_base);
494 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
495
496 temp = __raw_readl((resources->dmmu_base) + 0x10);
497 temp = (temp & 0xFFFFFFEF) | 0x11;
498 __raw_writel(temp, (resources->dmmu_base) + 0x10);
499
500 /* Let the DSP MMU run */
501 hw_mmu_enable(resources->dmmu_base);
502
503 /* Enable the BIOS clock */
504 (void)dev_get_symbol(dev_context->dev_obj,
505 BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
506 (void)dev_get_symbol(dev_context->dev_obj,
507 BRIDGEINIT_LOADMON_GPTIMER,
508 &ul_load_monitor_timer);
509 }
510
511 if (!status) {
512 if (ul_load_monitor_timer != 0xFFFF) {
513 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
514 ul_load_monitor_timer;
515 dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
516 } else {
517 dev_dbg(bridge, "Not able to get the symbol for Load "
518 "Monitor Timer\n");
519 }
520 }
521
522 if (!status) {
523 if (ul_bios_gp_timer != 0xFFFF) {
524 clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
525 ul_bios_gp_timer;
526 dsp_peripheral_clk_ctrl(dev_context, &clk_cmd);
527 } else {
528 dev_dbg(bridge,
529 "Not able to get the symbol for BIOS Timer\n");
530 }
531 }
532
533 if (!status) {
534 /* Set the DSP clock rate */
535 (void)dev_get_symbol(dev_context->dev_obj,
536 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
537 /*Set Autoidle Mode for IVA2 PLL */
538 (*pdata->dsp_cm_write)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
539 OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL);
540
541 if ((unsigned int *)ul_dsp_clk_addr != NULL) {
542 /* Get the clock rate */
543 ul_dsp_clk_rate = dsp_clk_get_iva2_rate();
544 dev_dbg(bridge, "%s: DSP clock rate (KHZ): 0x%x \n",
545 __func__, ul_dsp_clk_rate);
546 (void)bridge_brd_write(dev_context,
547 (u8 *) &ul_dsp_clk_rate,
548 ul_dsp_clk_addr, sizeof(u32), 0);
549 }
550 /*
551 * Enable Mailbox events and also drain any pending
552 * stale messages.
553 */
554 dev_context->mbox = omap_mbox_get("dsp", &dsp_mbox_notifier);
555 if (IS_ERR(dev_context->mbox)) {
556 dev_context->mbox = NULL;
557 pr_err("%s: Failed to get dsp mailbox handle\n",
558 __func__);
559 status = -EPERM;
560 }
561
562 }
563 if (!status) {
564 /*PM_IVA2GRPSEL_PER = 0xC0;*/
565 temp = readl(resources->per_pm_base + 0xA8);
566 temp = (temp & 0xFFFFFF30) | 0xC0;
567 writel(temp, resources->per_pm_base + 0xA8);
568
569 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
570 temp = readl(resources->per_pm_base + 0xA4);
571 temp = (temp & 0xFFFFFF3F);
572 writel(temp, resources->per_pm_base + 0xA4);
573 /*CM_SLEEPDEP_PER |= 0x04; */
574 temp = readl(resources->per_base + 0x44);
575 temp = (temp & 0xFFFFFFFB) | 0x04;
576 writel(temp, resources->per_base + 0x44);
577
578 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
579 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO,
580 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
581
582 /* Let DSP go */
583 dev_dbg(bridge, "%s Unreset\n", __func__);
584 /* Enable DSP MMU Interrupts */
585 hw_mmu_event_enable(resources->dmmu_base,
586 HW_MMU_ALL_INTERRUPTS);
587 /* release the RST1, DSP starts executing now .. */
588 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
589 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
590
591 dev_dbg(bridge, "Waiting for Sync @ 0x%x\n", dw_sync_addr);
592 dev_dbg(bridge, "DSP c_int00 Address = 0x%x\n", dsp_addr);
593 if (dsp_debug)
594 while (__raw_readw(dw_sync_addr))
595 ;
596
597 /* Wait for DSP to clear word in shared memory */
598 /* Read the Location */
599 if (!wait_for_start(dev_context, dw_sync_addr))
600 status = -ETIMEDOUT;
601
602 dev_get_symbol(dev_context->dev_obj, "_WDT_enable", &wdt_en);
603 if (wdt_en) {
604 /* Start wdt */
605 dsp_wdt_sm_set((void *)ul_shm_base);
606 dsp_wdt_enable(true);
607 }
608
609 status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
610 if (hio_mgr) {
611 io_sh_msetting(hio_mgr, SHM_OPPINFO, NULL);
612 /* Write the synchronization bit to indicate the
613 * completion of OPP table update to DSP
614 */
615 __raw_writel(0XCAFECAFE, dw_sync_addr);
616
617 /* update board state */
618 dev_context->brd_state = BRD_RUNNING;
619 /* (void)chnlsm_enable_interrupt(dev_context); */
620 } else {
621 dev_context->brd_state = BRD_UNKNOWN;
622 }
623 }
624 return status;
625 }
626
627 /*
628 * ======== bridge_brd_stop ========
629 * purpose:
630 * Puts DSP in self loop.
631 *
632 * Preconditions :
633 * a) None
634 */
635 static int bridge_brd_stop(struct bridge_dev_context *dev_ctxt)
636 {
637 int status = 0;
638 struct bridge_dev_context *dev_context = dev_ctxt;
639 struct pg_table_attrs *pt_attrs;
640 u32 dsp_pwr_state;
641 struct omap_dsp_platform_data *pdata =
642 omap_dspbridge_dev->dev.platform_data;
643
644 if (dev_context->brd_state == BRD_STOPPED)
645 return status;
646
647 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
648 * before turning off the clocks.. This is to ensure that there are no
649 * pending L3 or other transactons from IVA2 */
650 dsp_pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
651 OMAP_POWERSTATEST_MASK;
652 if (dsp_pwr_state != PWRDM_POWER_OFF) {
653 (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
654 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
655 sm_interrupt_dsp(dev_context, MBX_PM_DSPIDLE);
656 mdelay(10);
657
658 /* IVA2 is not in OFF state */
659 /* Set PM_PWSTCTRL_IVA2 to OFF */
660 (*pdata->dsp_prm_rmw_bits)(OMAP_POWERSTATEST_MASK,
661 PWRDM_POWER_OFF, OMAP3430_IVA2_MOD, OMAP2_PM_PWSTCTRL);
662 /* Set the SW supervised state transition for Sleep */
663 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP,
664 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
665 }
666 udelay(10);
667 /* Release the Ext Base virtual Address as the next DSP Program
668 * may have a different load address */
669 if (dev_context->dsp_ext_base_addr)
670 dev_context->dsp_ext_base_addr = 0;
671
672 dev_context->brd_state = BRD_STOPPED; /* update board state */
673
674 dsp_wdt_enable(false);
675
676 /* This is a good place to clear the MMU page tables as well */
677 if (dev_context->pt_attrs) {
678 pt_attrs = dev_context->pt_attrs;
679 memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
680 memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
681 memset((u8 *) pt_attrs->pg_info, 0x00,
682 (pt_attrs->l2_num_pages * sizeof(struct page_info)));
683 }
684 /* Disable the mailbox interrupts */
685 if (dev_context->mbox) {
686 omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
687 omap_mbox_put(dev_context->mbox, &dsp_mbox_notifier);
688 dev_context->mbox = NULL;
689 }
690 /* Reset IVA2 clocks*/
691 (*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
692 OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
693
694 dsp_clock_disable_all(dev_context->dsp_per_clks);
695 dsp_clk_disable(DSP_CLK_IVA2);
696
697 return status;
698 }
699
700 /*
701 * ======== bridge_brd_status ========
702 * Returns the board status.
703 */
704 static int bridge_brd_status(struct bridge_dev_context *dev_ctxt,
705 int *board_state)
706 {
707 struct bridge_dev_context *dev_context = dev_ctxt;
708 *board_state = dev_context->brd_state;
709 return 0;
710 }
711
712 /*
713 * ======== bridge_brd_write ========
714 * Copies the buffers to DSP internal or external memory.
715 */
716 static int bridge_brd_write(struct bridge_dev_context *dev_ctxt,
717 u8 *host_buff, u32 dsp_addr,
718 u32 ul_num_bytes, u32 mem_type)
719 {
720 int status = 0;
721 struct bridge_dev_context *dev_context = dev_ctxt;
722
723 if (dsp_addr < dev_context->dsp_start_add) {
724 status = -EPERM;
725 return status;
726 }
727 if ((dsp_addr - dev_context->dsp_start_add) <
728 dev_context->internal_size) {
729 status = write_dsp_data(dev_ctxt, host_buff, dsp_addr,
730 ul_num_bytes, mem_type);
731 } else {
732 status = write_ext_dsp_data(dev_context, host_buff, dsp_addr,
733 ul_num_bytes, mem_type, false);
734 }
735
736 return status;
737 }
738
739 /*
740 * ======== bridge_dev_create ========
741 * Creates a driver object. Puts DSP in self loop.
742 */
743 static int bridge_dev_create(struct bridge_dev_context
744 **dev_cntxt,
745 struct dev_object *hdev_obj,
746 struct cfg_hostres *config_param)
747 {
748 int status = 0;
749 struct bridge_dev_context *dev_context = NULL;
750 s32 entry_ndx;
751 struct cfg_hostres *resources = config_param;
752 struct pg_table_attrs *pt_attrs;
753 u32 pg_tbl_pa;
754 u32 pg_tbl_va;
755 u32 align_size;
756 struct drv_data *drv_datap = dev_get_drvdata(bridge);
757
758 /* Allocate and initialize a data structure to contain the bridge driver
759 * state, which becomes the context for later calls into this driver */
760 dev_context = kzalloc(sizeof(struct bridge_dev_context), GFP_KERNEL);
761 if (!dev_context) {
762 status = -ENOMEM;
763 goto func_end;
764 }
765
766 dev_context->dsp_start_add = (u32) OMAP_GEM_BASE;
767 dev_context->self_loop = (u32) NULL;
768 dev_context->dsp_per_clks = 0;
769 dev_context->internal_size = OMAP_DSP_SIZE;
770 /* Clear dev context MMU table entries.
771 * These get set on bridge_io_on_loaded() call after program loaded. */
772 for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB; entry_ndx++) {
773 dev_context->atlb_entry[entry_ndx].gpp_pa =
774 dev_context->atlb_entry[entry_ndx].dsp_va = 0;
775 }
776 dev_context->dsp_base_addr = (u32) MEM_LINEAR_ADDRESS((void *)
777 (config_param->
778 mem_base
779 [3]),
780 config_param->
781 mem_length
782 [3]);
783 if (!dev_context->dsp_base_addr)
784 status = -EPERM;
785
786 pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
787 if (pt_attrs != NULL) {
788 pt_attrs->l1_size = SZ_16K; /* 4096 entries of 32 bits */
789 align_size = pt_attrs->l1_size;
790 /* Align sizes are expected to be power of 2 */
791 /* we like to get aligned on L1 table size */
792 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
793 align_size, &pg_tbl_pa);
794
795 /* Check if the PA is aligned for us */
796 if ((pg_tbl_pa) & (align_size - 1)) {
797 /* PA not aligned to page table size ,
798 * try with more allocation and align */
799 mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
800 pt_attrs->l1_size);
801 /* we like to get aligned on L1 table size */
802 pg_tbl_va =
803 (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
804 align_size, &pg_tbl_pa);
805 /* We should be able to get aligned table now */
806 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
807 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
808 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
809 /* Align the PA to the next 'align' boundary */
810 pt_attrs->l1_base_pa =
811 ((pg_tbl_pa) +
812 (align_size - 1)) & (~(align_size - 1));
813 pt_attrs->l1_base_va =
814 pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
815 } else {
816 /* We got aligned PA, cool */
817 pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
818 pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
819 pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
820 pt_attrs->l1_base_pa = pg_tbl_pa;
821 pt_attrs->l1_base_va = pg_tbl_va;
822 }
823 if (pt_attrs->l1_base_va)
824 memset((u8 *) pt_attrs->l1_base_va, 0x00,
825 pt_attrs->l1_size);
826
827 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
828 * L4 pages */
829 pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
830 pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
831 pt_attrs->l2_num_pages;
832 align_size = 4; /* Make it u32 aligned */
833 /* we like to get aligned on L1 table size */
834 pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
835 align_size, &pg_tbl_pa);
836 pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
837 pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
838 pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
839 pt_attrs->l2_base_pa = pg_tbl_pa;
840 pt_attrs->l2_base_va = pg_tbl_va;
841
842 if (pt_attrs->l2_base_va)
843 memset((u8 *) pt_attrs->l2_base_va, 0x00,
844 pt_attrs->l2_size);
845
846 pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
847 sizeof(struct page_info), GFP_KERNEL);
848 dev_dbg(bridge,
849 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
850 "%x, size %x\n", pt_attrs->l1_base_pa,
851 pt_attrs->l1_base_va, pt_attrs->l1_size,
852 pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
853 pt_attrs->l2_size);
854 dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
855 pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
856 }
857 if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
858 (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
859 dev_context->pt_attrs = pt_attrs;
860 else
861 status = -ENOMEM;
862
863 if (!status) {
864 spin_lock_init(&pt_attrs->pg_lock);
865 dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
866
867 /* Set the Clock Divisor for the DSP module */
868 udelay(5);
869 /* MMU address is obtained from the host
870 * resources struct */
871 dev_context->dsp_mmu_base = resources->dmmu_base;
872 }
873 if (!status) {
874 dev_context->dev_obj = hdev_obj;
875 /* Store current board state. */
876 dev_context->brd_state = BRD_UNKNOWN;
877 dev_context->resources = resources;
878 dsp_clk_enable(DSP_CLK_IVA2);
879 bridge_brd_stop(dev_context);
880 /* Return ptr to our device state to the DSP API for storage */
881 *dev_cntxt = dev_context;
882 } else {
883 if (pt_attrs != NULL) {
884 kfree(pt_attrs->pg_info);
885
886 if (pt_attrs->l2_tbl_alloc_va) {
887 mem_free_phys_mem((void *)
888 pt_attrs->l2_tbl_alloc_va,
889 pt_attrs->l2_tbl_alloc_pa,
890 pt_attrs->l2_tbl_alloc_sz);
891 }
892 if (pt_attrs->l1_tbl_alloc_va) {
893 mem_free_phys_mem((void *)
894 pt_attrs->l1_tbl_alloc_va,
895 pt_attrs->l1_tbl_alloc_pa,
896 pt_attrs->l1_tbl_alloc_sz);
897 }
898 }
899 kfree(pt_attrs);
900 kfree(dev_context);
901 }
902 func_end:
903 return status;
904 }
905
906 /*
907 * ======== bridge_dev_ctrl ========
908 * Receives device specific commands.
909 */
910 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
911 u32 dw_cmd, void *pargs)
912 {
913 int status = 0;
914 struct bridge_ioctl_extproc *pa_ext_proc =
915 (struct bridge_ioctl_extproc *)pargs;
916 s32 ndx;
917
918 switch (dw_cmd) {
919 case BRDIOCTL_CHNLREAD:
920 break;
921 case BRDIOCTL_CHNLWRITE:
922 break;
923 case BRDIOCTL_SETMMUCONFIG:
924 /* store away dsp-mmu setup values for later use */
925 for (ndx = 0; ndx < BRDIOCTL_NUMOFMMUTLB; ndx++, pa_ext_proc++)
926 dev_context->atlb_entry[ndx] = *pa_ext_proc;
927 break;
928 case BRDIOCTL_DEEPSLEEP:
929 case BRDIOCTL_EMERGENCYSLEEP:
930 /* Currently only DSP Idle is supported Need to update for
931 * later releases */
932 status = sleep_dsp(dev_context, PWR_DEEPSLEEP, pargs);
933 break;
934 case BRDIOCTL_WAKEUP:
935 status = wake_dsp(dev_context, pargs);
936 break;
937 case BRDIOCTL_CLK_CTRL:
938 status = 0;
939 /* Looking For Baseport Fix for Clocks */
940 status = dsp_peripheral_clk_ctrl(dev_context, pargs);
941 break;
942 case BRDIOCTL_PWR_HIBERNATE:
943 status = handle_hibernation_from_dsp(dev_context);
944 break;
945 case BRDIOCTL_PRESCALE_NOTIFY:
946 status = pre_scale_dsp(dev_context, pargs);
947 break;
948 case BRDIOCTL_POSTSCALE_NOTIFY:
949 status = post_scale_dsp(dev_context, pargs);
950 break;
951 case BRDIOCTL_CONSTRAINT_REQUEST:
952 status = handle_constraints_set(dev_context, pargs);
953 break;
954 default:
955 status = -EPERM;
956 break;
957 }
958 return status;
959 }
960
961 /*
962 * ======== bridge_dev_destroy ========
963 * Destroys the driver object.
964 */
965 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
966 {
967 struct pg_table_attrs *pt_attrs;
968 int status = 0;
969 struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
970 dev_ctxt;
971 struct cfg_hostres *host_res;
972 u32 shm_size;
973 struct drv_data *drv_datap = dev_get_drvdata(bridge);
974
975 /* It should never happen */
976 if (!dev_ctxt)
977 return -EFAULT;
978
979 /* first put the device to stop state */
980 bridge_brd_stop(dev_context);
981 if (dev_context->pt_attrs) {
982 pt_attrs = dev_context->pt_attrs;
983 kfree(pt_attrs->pg_info);
984
985 if (pt_attrs->l2_tbl_alloc_va) {
986 mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
987 pt_attrs->l2_tbl_alloc_pa,
988 pt_attrs->l2_tbl_alloc_sz);
989 }
990 if (pt_attrs->l1_tbl_alloc_va) {
991 mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
992 pt_attrs->l1_tbl_alloc_pa,
993 pt_attrs->l1_tbl_alloc_sz);
994 }
995 kfree(pt_attrs);
996
997 }
998
999 if (dev_context->resources) {
1000 host_res = dev_context->resources;
1001 shm_size = drv_datap->shm_size;
1002 if (shm_size >= 0x10000) {
1003 if ((host_res->mem_base[1]) &&
1004 (host_res->mem_phys[1])) {
1005 mem_free_phys_mem((void *)
1006 host_res->mem_base
1007 [1],
1008 host_res->mem_phys
1009 [1], shm_size);
1010 }
1011 } else {
1012 dev_dbg(bridge, "%s: Error getting shm size "
1013 "from registry: %x. Not calling "
1014 "mem_free_phys_mem\n", __func__,
1015 status);
1016 }
1017 host_res->mem_base[1] = 0;
1018 host_res->mem_phys[1] = 0;
1019
1020 if (host_res->mem_base[0])
1021 iounmap((void *)host_res->mem_base[0]);
1022 if (host_res->mem_base[2])
1023 iounmap((void *)host_res->mem_base[2]);
1024 if (host_res->mem_base[3])
1025 iounmap((void *)host_res->mem_base[3]);
1026 if (host_res->mem_base[4])
1027 iounmap((void *)host_res->mem_base[4]);
1028 if (host_res->dmmu_base)
1029 iounmap(host_res->dmmu_base);
1030 if (host_res->per_base)
1031 iounmap(host_res->per_base);
1032 if (host_res->per_pm_base)
1033 iounmap((void *)host_res->per_pm_base);
1034 if (host_res->core_pm_base)
1035 iounmap((void *)host_res->core_pm_base);
1036
1037 host_res->mem_base[0] = (u32) NULL;
1038 host_res->mem_base[2] = (u32) NULL;
1039 host_res->mem_base[3] = (u32) NULL;
1040 host_res->mem_base[4] = (u32) NULL;
1041 host_res->dmmu_base = NULL;
1042
1043 kfree(host_res);
1044 }
1045
1046 /* Free the driver's device context: */
1047 kfree(drv_datap->base_img);
1048 kfree((void *)dev_ctxt);
1049 return status;
1050 }
1051
1052 static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt,
1053 u32 dsp_dest_addr, u32 dsp_src_addr,
1054 u32 ul_num_bytes, u32 mem_type)
1055 {
1056 int status = 0;
1057 u32 src_addr = dsp_src_addr;
1058 u32 dest_addr = dsp_dest_addr;
1059 u32 copy_bytes = 0;
1060 u32 total_bytes = ul_num_bytes;
1061 u8 host_buf[BUFFERSIZE];
1062 struct bridge_dev_context *dev_context = dev_ctxt;
1063 while (total_bytes > 0 && !status) {
1064 copy_bytes =
1065 total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes;
1066 /* Read from External memory */
1067 status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr,
1068 copy_bytes, mem_type);
1069 if (!status) {
1070 if (dest_addr < (dev_context->dsp_start_add +
1071 dev_context->internal_size)) {
1072 /* Write to Internal memory */
1073 status = write_dsp_data(dev_ctxt, host_buf,
1074 dest_addr, copy_bytes,
1075 mem_type);
1076 } else {
1077 /* Write to External memory */
1078 status =
1079 write_ext_dsp_data(dev_ctxt, host_buf,
1080 dest_addr, copy_bytes,
1081 mem_type, false);
1082 }
1083 }
1084 total_bytes -= copy_bytes;
1085 src_addr += copy_bytes;
1086 dest_addr += copy_bytes;
1087 }
1088 return status;
1089 }
1090
1091 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1092 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
1093 u8 *host_buff, u32 dsp_addr,
1094 u32 ul_num_bytes, u32 mem_type)
1095 {
1096 int status = 0;
1097 struct bridge_dev_context *dev_context = dev_ctxt;
1098 u32 ul_remain_bytes = 0;
1099 u32 ul_bytes = 0;
1100 ul_remain_bytes = ul_num_bytes;
1101 while (ul_remain_bytes > 0 && !status) {
1102 ul_bytes =
1103 ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes;
1104 if (dsp_addr < (dev_context->dsp_start_add +
1105 dev_context->internal_size)) {
1106 status =
1107 write_dsp_data(dev_ctxt, host_buff, dsp_addr,
1108 ul_bytes, mem_type);
1109 } else {
1110 status = write_ext_dsp_data(dev_ctxt, host_buff,
1111 dsp_addr, ul_bytes,
1112 mem_type, true);
1113 }
1114 ul_remain_bytes -= ul_bytes;
1115 dsp_addr += ul_bytes;
1116 host_buff = host_buff + ul_bytes;
1117 }
1118 return status;
1119 }
1120
1121 /*
1122 * ======== bridge_brd_mem_map ========
1123 * This function maps MPU buffer to the DSP address space. It performs
1124 * linear to physical address translation if required. It translates each
1125 * page since linear addresses can be physically non-contiguous
1126 * All address & size arguments are assumed to be page aligned (in proc.c)
1127 *
1128 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1129 */
1130 static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
1131 u32 ul_mpu_addr, u32 virt_addr,
1132 u32 ul_num_bytes, u32 ul_map_attr,
1133 struct page **mapped_pages)
1134 {
1135 u32 attrs;
1136 int status = 0;
1137 struct bridge_dev_context *dev_context = dev_ctxt;
1138 struct hw_mmu_map_attrs_t hw_attrs;
1139 struct vm_area_struct *vma;
1140 struct mm_struct *mm = current->mm;
1141 u32 write = 0;
1142 u32 num_usr_pgs = 0;
1143 struct page *mapped_page, *pg;
1144 s32 pg_num;
1145 u32 va = virt_addr;
1146 struct task_struct *curr_task = current;
1147 u32 pg_i = 0;
1148 u32 mpu_addr, pa;
1149
1150 dev_dbg(bridge,
1151 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1152 __func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
1153 ul_map_attr);
1154 if (ul_num_bytes == 0)
1155 return -EINVAL;
1156
1157 if (ul_map_attr & DSP_MAP_DIR_MASK) {
1158 attrs = ul_map_attr;
1159 } else {
1160 /* Assign default attributes */
1161 attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
1162 }
1163 /* Take mapping properties */
1164 if (attrs & DSP_MAPBIGENDIAN)
1165 hw_attrs.endianism = HW_BIG_ENDIAN;
1166 else
1167 hw_attrs.endianism = HW_LITTLE_ENDIAN;
1168
1169 hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
1170 ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
1171 /* Ignore element_size if mixed_size is enabled */
1172 if (hw_attrs.mixed_size == 0) {
1173 if (attrs & DSP_MAPELEMSIZE8) {
1174 /* Size is 8 bit */
1175 hw_attrs.element_size = HW_ELEM_SIZE8BIT;
1176 } else if (attrs & DSP_MAPELEMSIZE16) {
1177 /* Size is 16 bit */
1178 hw_attrs.element_size = HW_ELEM_SIZE16BIT;
1179 } else if (attrs & DSP_MAPELEMSIZE32) {
1180 /* Size is 32 bit */
1181 hw_attrs.element_size = HW_ELEM_SIZE32BIT;
1182 } else if (attrs & DSP_MAPELEMSIZE64) {
1183 /* Size is 64 bit */
1184 hw_attrs.element_size = HW_ELEM_SIZE64BIT;
1185 } else {
1186 /*
1187 * Mixedsize isn't enabled, so size can't be
1188 * zero here
1189 */
1190 return -EINVAL;
1191 }
1192 }
1193 if (attrs & DSP_MAPDONOTLOCK)
1194 hw_attrs.donotlockmpupage = 1;
1195 else
1196 hw_attrs.donotlockmpupage = 0;
1197
1198 if (attrs & DSP_MAPVMALLOCADDR) {
1199 return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
1200 ul_num_bytes, &hw_attrs);
1201 }
1202 /*
1203 * Do OS-specific user-va to pa translation.
1204 * Combine physically contiguous regions to reduce TLBs.
1205 * Pass the translated pa to pte_update.
1206 */
1207 if ((attrs & DSP_MAPPHYSICALADDR)) {
1208 status = pte_update(dev_context, ul_mpu_addr, virt_addr,
1209 ul_num_bytes, &hw_attrs);
1210 goto func_cont;
1211 }
1212
1213 /*
1214 * Important Note: ul_mpu_addr is mapped from user application process
1215 * to current process - it must lie completely within the current
1216 * virtual memory address space in order to be of use to us here!
1217 */
1218 down_read(&mm->mmap_sem);
1219 vma = find_vma(mm, ul_mpu_addr);
1220 if (vma)
1221 dev_dbg(bridge,
1222 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1223 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1224 ul_num_bytes, vma->vm_start, vma->vm_end,
1225 vma->vm_flags);
1226
1227 /*
1228 * It is observed that under some circumstances, the user buffer is
1229 * spread across several VMAs. So loop through and check if the entire
1230 * user buffer is covered
1231 */
1232 while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
1233 /* jump to the next VMA region */
1234 vma = find_vma(mm, vma->vm_end + 1);
1235 dev_dbg(bridge,
1236 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1237 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
1238 ul_num_bytes, vma->vm_start, vma->vm_end,
1239 vma->vm_flags);
1240 }
1241 if (!vma) {
1242 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1243 __func__, ul_mpu_addr, ul_num_bytes);
1244 status = -EINVAL;
1245 up_read(&mm->mmap_sem);
1246 goto func_cont;
1247 }
1248
1249 if (vma->vm_flags & VM_IO) {
1250 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1251 mpu_addr = ul_mpu_addr;
1252
1253 /* Get the physical addresses for user buffer */
1254 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1255 pa = user_va2_pa(mm, mpu_addr);
1256 if (!pa) {
1257 status = -EPERM;
1258 pr_err("DSPBRIDGE: VM_IO mapping physical"
1259 "address is invalid\n");
1260 break;
1261 }
1262 if (pfn_valid(__phys_to_pfn(pa))) {
1263 pg = PHYS_TO_PAGE(pa);
1264 get_page(pg);
1265 if (page_count(pg) < 1) {
1266 pr_err("Bad page in VM_IO buffer\n");
1267 bad_page_dump(pa, pg);
1268 }
1269 }
1270 status = pte_set(dev_context->pt_attrs, pa,
1271 va, HW_PAGE_SIZE4KB, &hw_attrs);
1272 if (status)
1273 break;
1274
1275 va += HW_PAGE_SIZE4KB;
1276 mpu_addr += HW_PAGE_SIZE4KB;
1277 pa += HW_PAGE_SIZE4KB;
1278 }
1279 } else {
1280 num_usr_pgs = ul_num_bytes / PG_SIZE4K;
1281 if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
1282 write = 1;
1283
1284 for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
1285 pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
1286 write, 1, &mapped_page, NULL);
1287 if (pg_num > 0) {
1288 if (page_count(mapped_page) < 1) {
1289 pr_err("Bad page count after doing"
1290 "get_user_pages on"
1291 "user buffer\n");
1292 bad_page_dump(page_to_phys(mapped_page),
1293 mapped_page);
1294 }
1295 status = pte_set(dev_context->pt_attrs,
1296 page_to_phys(mapped_page), va,
1297 HW_PAGE_SIZE4KB, &hw_attrs);
1298 if (status)
1299 break;
1300
1301 if (mapped_pages)
1302 mapped_pages[pg_i] = mapped_page;
1303
1304 va += HW_PAGE_SIZE4KB;
1305 ul_mpu_addr += HW_PAGE_SIZE4KB;
1306 } else {
1307 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1308 "MPU addr = 0x%x,"
1309 "vma->vm_flags = 0x%lx,"
1310 "get_user_pages Err"
1311 "Value = %d, Buffer"
1312 "size=0x%x\n", ul_mpu_addr,
1313 vma->vm_flags, pg_num, ul_num_bytes);
1314 status = -EPERM;
1315 break;
1316 }
1317 }
1318 }
1319 up_read(&mm->mmap_sem);
1320 func_cont:
1321 if (status) {
1322 /*
1323 * Roll out the mapped pages incase it failed in middle of
1324 * mapping
1325 */
1326 if (pg_i) {
1327 bridge_brd_mem_un_map(dev_context, virt_addr,
1328 (pg_i * PG_SIZE4K));
1329 }
1330 status = -EPERM;
1331 }
1332 /*
1333 * In any case, flush the TLB
1334 * This is called from here instead from pte_update to avoid unnecessary
1335 * repetition while mapping non-contiguous physical regions of a virtual
1336 * region
1337 */
1338 flush_all(dev_context);
1339 dev_dbg(bridge, "%s status %x\n", __func__, status);
1340 return status;
1341 }
1342
1343 /*
1344 * ======== bridge_brd_mem_un_map ========
1345 * Invalidate the PTEs for the DSP VA block to be unmapped.
1346 *
1347 * PTEs of a mapped memory block are contiguous in any page table
1348 * So, instead of looking up the PTE address for every 4K block,
1349 * we clear consecutive PTEs until we unmap all the bytes
1350 */
1351 static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
1352 u32 virt_addr, u32 ul_num_bytes)
1353 {
1354 u32 l1_base_va;
1355 u32 l2_base_va;
1356 u32 l2_base_pa;
1357 u32 l2_page_num;
1358 u32 pte_val;
1359 u32 pte_size;
1360 u32 pte_count;
1361 u32 pte_addr_l1;
1362 u32 pte_addr_l2 = 0;
1363 u32 rem_bytes;
1364 u32 rem_bytes_l2;
1365 u32 va_curr;
1366 struct page *pg = NULL;
1367 int status = 0;
1368 struct bridge_dev_context *dev_context = dev_ctxt;
1369 struct pg_table_attrs *pt = dev_context->pt_attrs;
1370 u32 temp;
1371 u32 paddr;
1372 u32 numof4k_pages = 0;
1373
1374 va_curr = virt_addr;
1375 rem_bytes = ul_num_bytes;
1376 rem_bytes_l2 = 0;
1377 l1_base_va = pt->l1_base_va;
1378 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1379 dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1380 "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
1381 ul_num_bytes, l1_base_va, pte_addr_l1);
1382
1383 while (rem_bytes && !status) {
1384 u32 va_curr_orig = va_curr;
1385 /* Find whether the L1 PTE points to a valid L2 PT */
1386 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
1387 pte_val = *(u32 *) pte_addr_l1;
1388 pte_size = hw_mmu_pte_size_l1(pte_val);
1389
1390 if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
1391 goto skip_coarse_page;
1392
1393 /*
1394 * Get the L2 PA from the L1 PTE, and find
1395 * corresponding L2 VA
1396 */
1397 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1398 l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1399 l2_page_num =
1400 (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1401 /*
1402 * Find the L2 PTE address from which we will start
1403 * clearing, the number of PTEs to be cleared on this
1404 * page, and the size of VA space that needs to be
1405 * cleared on this L2 page
1406 */
1407 pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
1408 pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
1409 pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
1410 if (rem_bytes < (pte_count * PG_SIZE4K))
1411 pte_count = rem_bytes / PG_SIZE4K;
1412 rem_bytes_l2 = pte_count * PG_SIZE4K;
1413
1414 /*
1415 * Unmap the VA space on this L2 PT. A quicker way
1416 * would be to clear pte_count entries starting from
1417 * pte_addr_l2. However, below code checks that we don't
1418 * clear invalid entries or less than 64KB for a 64KB
1419 * entry. Similar checking is done for L1 PTEs too
1420 * below
1421 */
1422 while (rem_bytes_l2 && !status) {
1423 pte_val = *(u32 *) pte_addr_l2;
1424 pte_size = hw_mmu_pte_size_l2(pte_val);
1425 /* va_curr aligned to pte_size? */
1426 if (pte_size == 0 || rem_bytes_l2 < pte_size ||
1427 va_curr & (pte_size - 1)) {
1428 status = -EPERM;
1429 break;
1430 }
1431
1432 /* Collect Physical addresses from VA */
1433 paddr = (pte_val & ~(pte_size - 1));
1434 if (pte_size == HW_PAGE_SIZE64KB)
1435 numof4k_pages = 16;
1436 else
1437 numof4k_pages = 1;
1438 temp = 0;
1439 while (temp++ < numof4k_pages) {
1440 if (!pfn_valid(__phys_to_pfn(paddr))) {
1441 paddr += HW_PAGE_SIZE4KB;
1442 continue;
1443 }
1444 pg = PHYS_TO_PAGE(paddr);
1445 if (page_count(pg) < 1) {
1446 pr_info("DSPBRIDGE: UNMAP function: "
1447 "COUNT 0 FOR PA 0x%x, size = "
1448 "0x%x\n", paddr, ul_num_bytes);
1449 bad_page_dump(paddr, pg);
1450 } else {
1451 set_page_dirty(pg);
1452 page_cache_release(pg);
1453 }
1454 paddr += HW_PAGE_SIZE4KB;
1455 }
1456 if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
1457 status = -EPERM;
1458 goto EXIT_LOOP;
1459 }
1460
1461 status = 0;
1462 rem_bytes_l2 -= pte_size;
1463 va_curr += pte_size;
1464 pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
1465 }
1466 spin_lock(&pt->pg_lock);
1467 if (rem_bytes_l2 == 0) {
1468 pt->pg_info[l2_page_num].num_entries -= pte_count;
1469 if (pt->pg_info[l2_page_num].num_entries == 0) {
1470 /*
1471 * Clear the L1 PTE pointing to the L2 PT
1472 */
1473 if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
1474 HW_MMU_COARSE_PAGE_SIZE))
1475 status = 0;
1476 else {
1477 status = -EPERM;
1478 spin_unlock(&pt->pg_lock);
1479 goto EXIT_LOOP;
1480 }
1481 }
1482 rem_bytes -= pte_count * PG_SIZE4K;
1483 } else
1484 status = -EPERM;
1485
1486 spin_unlock(&pt->pg_lock);
1487 continue;
1488 skip_coarse_page:
1489 /* va_curr aligned to pte_size? */
1490 /* pte_size = 1 MB or 16 MB */
1491 if (pte_size == 0 || rem_bytes < pte_size ||
1492 va_curr & (pte_size - 1)) {
1493 status = -EPERM;
1494 break;
1495 }
1496
1497 if (pte_size == HW_PAGE_SIZE1MB)
1498 numof4k_pages = 256;
1499 else
1500 numof4k_pages = 4096;
1501 temp = 0;
1502 /* Collect Physical addresses from VA */
1503 paddr = (pte_val & ~(pte_size - 1));
1504 while (temp++ < numof4k_pages) {
1505 if (pfn_valid(__phys_to_pfn(paddr))) {
1506 pg = PHYS_TO_PAGE(paddr);
1507 if (page_count(pg) < 1) {
1508 pr_info("DSPBRIDGE: UNMAP function: "
1509 "COUNT 0 FOR PA 0x%x, size = "
1510 "0x%x\n", paddr, ul_num_bytes);
1511 bad_page_dump(paddr, pg);
1512 } else {
1513 set_page_dirty(pg);
1514 page_cache_release(pg);
1515 }
1516 }
1517 paddr += HW_PAGE_SIZE4KB;
1518 }
1519 if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
1520 status = 0;
1521 rem_bytes -= pte_size;
1522 va_curr += pte_size;
1523 } else {
1524 status = -EPERM;
1525 goto EXIT_LOOP;
1526 }
1527 }
1528 /*
1529 * It is better to flush the TLB here, so that any stale old entries
1530 * get flushed
1531 */
1532 EXIT_LOOP:
1533 flush_all(dev_context);
1534 dev_dbg(bridge,
1535 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1536 " rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
1537 pte_addr_l2, rem_bytes, rem_bytes_l2, status);
1538 return status;
1539 }
1540
1541 /*
1542 * ======== user_va2_pa ========
1543 * Purpose:
1544 * This function walks through the page tables to convert a userland
1545 * virtual address to physical address
1546 */
1547 static u32 user_va2_pa(struct mm_struct *mm, u32 address)
1548 {
1549 pgd_t *pgd;
1550 pud_t *pud;
1551 pmd_t *pmd;
1552 pte_t *ptep, pte;
1553
1554 pgd = pgd_offset(mm, address);
1555 if (pgd_none(*pgd) || pgd_bad(*pgd))
1556 return 0;
1557
1558 pud = pud_offset(pgd, address);
1559 if (pud_none(*pud) || pud_bad(*pud))
1560 return 0;
1561
1562 pmd = pmd_offset(pud, address);
1563 if (pmd_none(*pmd) || pmd_bad(*pmd))
1564 return 0;
1565
1566 ptep = pte_offset_map(pmd, address);
1567 if (ptep) {
1568 pte = *ptep;
1569 if (pte_present(pte))
1570 return pte & PAGE_MASK;
1571 }
1572
1573 return 0;
1574 }
1575
1576 /*
1577 * ======== pte_update ========
1578 * This function calculates the optimum page-aligned addresses and sizes
1579 * Caller must pass page-aligned values
1580 */
1581 static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
1582 u32 va, u32 size,
1583 struct hw_mmu_map_attrs_t *map_attrs)
1584 {
1585 u32 i;
1586 u32 all_bits;
1587 u32 pa_curr = pa;
1588 u32 va_curr = va;
1589 u32 num_bytes = size;
1590 struct bridge_dev_context *dev_context = dev_ctxt;
1591 int status = 0;
1592 u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
1593 HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
1594 };
1595
1596 while (num_bytes && !status) {
1597 /* To find the max. page size with which both PA & VA are
1598 * aligned */
1599 all_bits = pa_curr | va_curr;
1600
1601 for (i = 0; i < 4; i++) {
1602 if ((num_bytes >= page_size[i]) && ((all_bits &
1603 (page_size[i] -
1604 1)) == 0)) {
1605 status =
1606 pte_set(dev_context->pt_attrs, pa_curr,
1607 va_curr, page_size[i], map_attrs);
1608 pa_curr += page_size[i];
1609 va_curr += page_size[i];
1610 num_bytes -= page_size[i];
1611 /* Don't try smaller sizes. Hopefully we have
1612 * reached an address aligned to a bigger page
1613 * size */
1614 break;
1615 }
1616 }
1617 }
1618
1619 return status;
1620 }
1621
1622 /*
1623 * ======== pte_set ========
1624 * This function calculates PTE address (MPU virtual) to be updated
1625 * It also manages the L2 page tables
1626 */
1627 static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
1628 u32 size, struct hw_mmu_map_attrs_t *attrs)
1629 {
1630 u32 i;
1631 u32 pte_val;
1632 u32 pte_addr_l1;
1633 u32 pte_size;
1634 /* Base address of the PT that will be updated */
1635 u32 pg_tbl_va;
1636 u32 l1_base_va;
1637 /* Compiler warns that the next three variables might be used
1638 * uninitialized in this function. Doesn't seem so. Working around,
1639 * anyways. */
1640 u32 l2_base_va = 0;
1641 u32 l2_base_pa = 0;
1642 u32 l2_page_num = 0;
1643 int status = 0;
1644
1645 l1_base_va = pt->l1_base_va;
1646 pg_tbl_va = l1_base_va;
1647 if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
1648 /* Find whether the L1 PTE points to a valid L2 PT */
1649 pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
1650 if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
1651 pte_val = *(u32 *) pte_addr_l1;
1652 pte_size = hw_mmu_pte_size_l1(pte_val);
1653 } else {
1654 return -EPERM;
1655 }
1656 spin_lock(&pt->pg_lock);
1657 if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
1658 /* Get the L2 PA from the L1 PTE, and find
1659 * corresponding L2 VA */
1660 l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
1661 l2_base_va =
1662 l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
1663 l2_page_num =
1664 (l2_base_pa -
1665 pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
1666 } else if (pte_size == 0) {
1667 /* L1 PTE is invalid. Allocate a L2 PT and
1668 * point the L1 PTE to it */
1669 /* Find a free L2 PT. */
1670 for (i = 0; (i < pt->l2_num_pages) &&
1671 (pt->pg_info[i].num_entries != 0); i++)
1672 ;
1673 if (i < pt->l2_num_pages) {
1674 l2_page_num = i;
1675 l2_base_pa = pt->l2_base_pa + (l2_page_num *
1676 HW_MMU_COARSE_PAGE_SIZE);
1677 l2_base_va = pt->l2_base_va + (l2_page_num *
1678 HW_MMU_COARSE_PAGE_SIZE);
1679 /* Endianness attributes are ignored for
1680 * HW_MMU_COARSE_PAGE_SIZE */
1681 status =
1682 hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
1683 HW_MMU_COARSE_PAGE_SIZE,
1684 attrs);
1685 } else {
1686 status = -ENOMEM;
1687 }
1688 } else {
1689 /* Found valid L1 PTE of another size.
1690 * Should not overwrite it. */
1691 status = -EPERM;
1692 }
1693 if (!status) {
1694 pg_tbl_va = l2_base_va;
1695 if (size == HW_PAGE_SIZE64KB)
1696 pt->pg_info[l2_page_num].num_entries += 16;
1697 else
1698 pt->pg_info[l2_page_num].num_entries++;
1699 dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1700 "%x, num_entries %x\n", l2_base_va,
1701 l2_base_pa, l2_page_num,
1702 pt->pg_info[l2_page_num].num_entries);
1703 }
1704 spin_unlock(&pt->pg_lock);
1705 }
1706 if (!status) {
1707 dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1708 pg_tbl_va, pa, va, size);
1709 dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
1710 "mixed_size %x\n", attrs->endianism,
1711 attrs->element_size, attrs->mixed_size);
1712 status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
1713 }
1714
1715 return status;
1716 }
1717
1718 /* Memory map kernel VA -- memory allocated with vmalloc */
1719 static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
1720 u32 ul_mpu_addr, u32 virt_addr,
1721 u32 ul_num_bytes,
1722 struct hw_mmu_map_attrs_t *hw_attrs)
1723 {
1724 int status = 0;
1725 struct page *page[1];
1726 u32 i;
1727 u32 pa_curr;
1728 u32 pa_next;
1729 u32 va_curr;
1730 u32 size_curr;
1731 u32 num_pages;
1732 u32 pa;
1733 u32 num_of4k_pages;
1734 u32 temp = 0;
1735
1736 /*
1737 * Do Kernel va to pa translation.
1738 * Combine physically contiguous regions to reduce TLBs.
1739 * Pass the translated pa to pte_update.
1740 */
1741 num_pages = ul_num_bytes / PAGE_SIZE; /* PAGE_SIZE = OS page size */
1742 i = 0;
1743 va_curr = ul_mpu_addr;
1744 page[0] = vmalloc_to_page((void *)va_curr);
1745 pa_next = page_to_phys(page[0]);
1746 while (!status && (i < num_pages)) {
1747 /*
1748 * Reuse pa_next from the previous iteraion to avoid
1749 * an extra va2pa call
1750 */
1751 pa_curr = pa_next;
1752 size_curr = PAGE_SIZE;
1753 /*
1754 * If the next page is physically contiguous,
1755 * map it with the current one by increasing
1756 * the size of the region to be mapped
1757 */
1758 while (++i < num_pages) {
1759 page[0] =
1760 vmalloc_to_page((void *)(va_curr + size_curr));
1761 pa_next = page_to_phys(page[0]);
1762
1763 if (pa_next == (pa_curr + size_curr))
1764 size_curr += PAGE_SIZE;
1765 else
1766 break;
1767
1768 }
1769 if (pa_next == 0) {
1770 status = -ENOMEM;
1771 break;
1772 }
1773 pa = pa_curr;
1774 num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
1775 while (temp++ < num_of4k_pages) {
1776 get_page(PHYS_TO_PAGE(pa));
1777 pa += HW_PAGE_SIZE4KB;
1778 }
1779 status = pte_update(dev_context, pa_curr, virt_addr +
1780 (va_curr - ul_mpu_addr), size_curr,
1781 hw_attrs);
1782 va_curr += size_curr;
1783 }
1784 /*
1785 * In any case, flush the TLB
1786 * This is called from here instead from pte_update to avoid unnecessary
1787 * repetition while mapping non-contiguous physical regions of a virtual
1788 * region
1789 */
1790 flush_all(dev_context);
1791 dev_dbg(bridge, "%s status %x\n", __func__, status);
1792 return status;
1793 }
1794
1795 /*
1796 * ======== wait_for_start ========
1797 * Wait for the singal from DSP that it has started, or time out.
1798 */
1799 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr)
1800 {
1801 u16 timeout = TIHELEN_ACKTIMEOUT;
1802
1803 /* Wait for response from board */
1804 while (__raw_readw(dw_sync_addr) && --timeout)
1805 udelay(10);
1806
1807 /* If timed out: return false */
1808 if (!timeout) {
1809 pr_err("%s: Timed out waiting DSP to Start\n", __func__);
1810 return false;
1811 }
1812 return true;
1813 }