4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
6 * Processor Manager Driver for TI OMAP3430 EVM.
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
19 #include <linux/platform_data/dsp-omap.h>
21 #include <linux/types.h>
22 /* ----------------------------------- Host OS */
23 #include <dspbridge/host_os.h>
25 #include <linux/mmzone.h>
27 /* ----------------------------------- DSP/BIOS Bridge */
28 #include <dspbridge/dbdefs.h>
30 /* ----------------------------------- OS Adaptation Layer */
31 #include <dspbridge/drv.h>
32 #include <dspbridge/sync.h>
34 /* ------------------------------------ Hardware Abstraction Layer */
38 /* ----------------------------------- Link Driver */
39 #include <dspbridge/dspdefs.h>
40 #include <dspbridge/dspchnl.h>
41 #include <dspbridge/dspdeh.h>
42 #include <dspbridge/dspio.h>
43 #include <dspbridge/dspmsg.h>
44 #include <dspbridge/pwr.h>
45 #include <dspbridge/io_sm.h>
47 /* ----------------------------------- Platform Manager */
48 #include <dspbridge/dev.h>
49 #include <dspbridge/dspapi.h>
50 #include <dspbridge/dmm.h>
51 #include <dspbridge/wdt.h>
53 /* ----------------------------------- Local */
55 #include "_tiomap_pwr.h"
56 #include "tiomap_io.h"
58 /* Offset in shared mem to write to in order to synchronize start with DSP */
59 #define SHMSYNCOFFSET 4 /* GPP byte offset */
61 #define BUFFERSIZE 1024
63 #define TIHELEN_ACKTIMEOUT 10000
65 #define MMU_SECTION_ADDR_MASK 0xFFF00000
66 #define MMU_SSECTION_ADDR_MASK 0xFF000000
67 #define MMU_LARGE_PAGE_MASK 0xFFFF0000
68 #define MMU_SMALL_PAGE_MASK 0xFFFFF000
69 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
70 #define PAGES_II_LVL_TABLE 512
71 #define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
77 /* Forward Declarations: */
78 static int bridge_brd_monitor(struct bridge_dev_context
*dev_ctxt
);
79 static int bridge_brd_read(struct bridge_dev_context
*dev_ctxt
,
81 u32 dsp_addr
, u32 ul_num_bytes
,
83 static int bridge_brd_start(struct bridge_dev_context
*dev_ctxt
,
85 static int bridge_brd_status(struct bridge_dev_context
*dev_ctxt
,
87 static int bridge_brd_stop(struct bridge_dev_context
*dev_ctxt
);
88 static int bridge_brd_write(struct bridge_dev_context
*dev_ctxt
,
90 u32 dsp_addr
, u32 ul_num_bytes
,
92 static int bridge_brd_set_state(struct bridge_dev_context
*dev_ctxt
,
94 static int bridge_brd_mem_copy(struct bridge_dev_context
*dev_ctxt
,
95 u32 dsp_dest_addr
, u32 dsp_src_addr
,
96 u32 ul_num_bytes
, u32 mem_type
);
97 static int bridge_brd_mem_write(struct bridge_dev_context
*dev_ctxt
,
98 u8
*host_buff
, u32 dsp_addr
,
99 u32 ul_num_bytes
, u32 mem_type
);
100 static int bridge_brd_mem_map(struct bridge_dev_context
*dev_ctxt
,
101 u32 ul_mpu_addr
, u32 virt_addr
,
102 u32 ul_num_bytes
, u32 ul_map_attr
,
103 struct page
**mapped_pages
);
104 static int bridge_brd_mem_un_map(struct bridge_dev_context
*dev_ctxt
,
105 u32 virt_addr
, u32 ul_num_bytes
);
106 static int bridge_dev_create(struct bridge_dev_context
108 struct dev_object
*hdev_obj
,
109 struct cfg_hostres
*config_param
);
110 static int bridge_dev_ctrl(struct bridge_dev_context
*dev_context
,
111 u32 dw_cmd
, void *pargs
);
112 static int bridge_dev_destroy(struct bridge_dev_context
*dev_ctxt
);
113 static u32
user_va2_pa(struct mm_struct
*mm
, u32 address
);
114 static int pte_update(struct bridge_dev_context
*dev_ctxt
, u32 pa
,
116 struct hw_mmu_map_attrs_t
*map_attrs
);
117 static int pte_set(struct pg_table_attrs
*pt
, u32 pa
, u32 va
,
118 u32 size
, struct hw_mmu_map_attrs_t
*attrs
);
119 static int mem_map_vmalloc(struct bridge_dev_context
*dev_context
,
120 u32 ul_mpu_addr
, u32 virt_addr
,
122 struct hw_mmu_map_attrs_t
*hw_attrs
);
124 bool wait_for_start(struct bridge_dev_context
*dev_context
,
125 void __iomem
*sync_addr
);
127 /* ----------------------------------- Globals */
129 /* Attributes of L2 page tables for DSP MMU */
131 u32 num_entries
; /* Number of valid PTEs in the L2 PT */
134 /* Attributes used to manage the DSP MMU page tables */
135 struct pg_table_attrs
{
136 spinlock_t pg_lock
; /* Critical section object handle */
138 u32 l1_base_pa
; /* Physical address of the L1 PT */
139 u32 l1_base_va
; /* Virtual address of the L1 PT */
140 u32 l1_size
; /* Size of the L1 PT */
142 /* Physical address of Allocated mem for L1 table. May not be aligned */
144 /* Virtual address of Allocated mem for L1 table. May not be aligned */
146 /* Size of consistent memory allocated for L1 table.
147 * May not be aligned */
149 u32 l2_base_pa
; /* Physical address of the L2 PT */
150 u32 l2_base_va
; /* Virtual address of the L2 PT */
151 u32 l2_size
; /* Size of the L2 PT */
153 /* Physical address of Allocated mem for L2 table. May not be aligned */
155 /* Virtual address of Allocated mem for L2 table. May not be aligned */
157 /* Size of consistent memory allocated for L2 table.
158 * May not be aligned */
160 u32 l2_num_pages
; /* Number of allocated L2 PT */
161 /* Array [l2_num_pages] of L2 PT info structs */
162 struct page_info
*pg_info
;
166 * This Bridge driver's function interface table.
168 static struct bridge_drv_interface drv_interface_fxns
= {
169 /* Bridge API ver. for which this bridge driver is built. */
170 BRD_API_MAJOR_VERSION
,
171 BRD_API_MINOR_VERSION
,
181 bridge_brd_set_state
,
183 bridge_brd_mem_write
,
185 bridge_brd_mem_un_map
,
186 /* The following CHNL functions are provided by chnl_io.lib: */
191 bridge_chnl_add_io_req
,
193 bridge_chnl_cancel_io
,
194 bridge_chnl_flush_io
,
195 bridge_chnl_get_info
,
196 bridge_chnl_get_mgr_info
,
198 bridge_chnl_register_notify
,
199 /* The following IO functions are provided by chnl_io.lib: */
203 bridge_io_get_proc_load
,
204 /* The following msg_ctrl functions are provided by chnl_io.lib: */
206 bridge_msg_create_queue
,
208 bridge_msg_delete_queue
,
211 bridge_msg_register_notify
,
212 bridge_msg_set_queue_id
,
215 static struct notifier_block dsp_mbox_notifier
= {
216 .notifier_call
= io_mbox_msg
,
219 static inline void flush_all(struct bridge_dev_context
*dev_context
)
221 if (dev_context
->brd_state
== BRD_DSP_HIBERNATION
||
222 dev_context
->brd_state
== BRD_HIBERNATION
)
223 wake_dsp(dev_context
, NULL
);
225 hw_mmu_tlb_flush_all(dev_context
->dsp_mmu_base
);
228 static void bad_page_dump(u32 pa
, struct page
*pg
)
230 pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa
);
231 pr_emerg("Bad page state in process '%s'\n"
232 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
234 current
->comm
, pg
, (int)(2 * sizeof(unsigned long)),
235 (unsigned long)pg
->flags
, pg
->mapping
,
236 page_mapcount(pg
), page_count(pg
));
241 * ======== bridge_drv_entry ========
243 * Bridge Driver entry point.
245 void bridge_drv_entry(struct bridge_drv_interface
**drv_intf
,
246 const char *driver_file_name
)
248 if (strcmp(driver_file_name
, "UMA") == 0)
249 *drv_intf
= &drv_interface_fxns
;
251 dev_dbg(bridge
, "%s Unknown Bridge file name", __func__
);
256 * ======== bridge_brd_monitor ========
258 * This bridge_brd_monitor puts DSP into a Loadable state.
259 * i.e Application can load and start the device.
262 * Device in 'OFF' state.
264 static int bridge_brd_monitor(struct bridge_dev_context
*dev_ctxt
)
266 struct bridge_dev_context
*dev_context
= dev_ctxt
;
268 struct omap_dsp_platform_data
*pdata
=
269 omap_dspbridge_dev
->dev
.platform_data
;
271 temp
= (*pdata
->dsp_prm_read
)(OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTST
) &
272 OMAP_POWERSTATEST_MASK
;
273 if (!(temp
& 0x02)) {
274 /* IVA2 is not in ON state */
275 /* Read and set PM_PWSTCTRL_IVA2 to ON */
276 (*pdata
->dsp_prm_rmw_bits
)(OMAP_POWERSTATEST_MASK
,
277 PWRDM_POWER_ON
, OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTCTRL
);
278 /* Set the SW supervised state transition */
279 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP
,
280 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
282 /* Wait until the state has moved to ON */
283 while ((*pdata
->dsp_prm_read
)(OMAP3430_IVA2_MOD
,
285 OMAP_INTRANSITION_MASK
)
287 /* Disable Automatic transition */
288 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO
,
289 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
291 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
292 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
293 dsp_clk_enable(DSP_CLK_IVA2
);
295 /* set the device state to IDLE */
296 dev_context
->brd_state
= BRD_IDLE
;
302 * ======== bridge_brd_read ========
304 * Reads buffers for DSP memory.
306 static int bridge_brd_read(struct bridge_dev_context
*dev_ctxt
,
307 u8
*host_buff
, u32 dsp_addr
,
308 u32 ul_num_bytes
, u32 mem_type
)
311 struct bridge_dev_context
*dev_context
= dev_ctxt
;
313 u32 dsp_base_addr
= dev_ctxt
->dsp_base_addr
;
315 if (dsp_addr
< dev_context
->dsp_start_add
) {
319 /* change here to account for the 3 bands of the DSP internal memory */
320 if ((dsp_addr
- dev_context
->dsp_start_add
) <
321 dev_context
->internal_size
) {
322 offset
= dsp_addr
- dev_context
->dsp_start_add
;
324 status
= read_ext_dsp_data(dev_context
, host_buff
, dsp_addr
,
325 ul_num_bytes
, mem_type
);
328 /* copy the data from DSP memory */
329 memcpy(host_buff
, (void *)(dsp_base_addr
+ offset
), ul_num_bytes
);
334 * ======== bridge_brd_set_state ========
336 * This routine updates the Board status.
338 static int bridge_brd_set_state(struct bridge_dev_context
*dev_ctxt
,
342 struct bridge_dev_context
*dev_context
= dev_ctxt
;
344 dev_context
->brd_state
= brd_state
;
349 * ======== bridge_brd_start ========
351 * Initializes DSP MMU and Starts DSP.
354 * a) DSP domain is 'ACTIVE'.
355 * b) DSP_RST1 is asserted.
356 * b) DSP_RST2 is released.
358 static int bridge_brd_start(struct bridge_dev_context
*dev_ctxt
,
362 struct bridge_dev_context
*dev_context
= dev_ctxt
;
363 void __iomem
*sync_addr
;
364 u32 ul_shm_base
; /* Gpp Phys SM base addr(byte) */
365 u32 ul_shm_base_virt
; /* Dsp Virt SM base addr */
366 u32 ul_tlb_base_virt
; /* Base of MMU TLB entry */
368 /* Offset of shm_base_virt from tlb_base_virt */
369 u32 ul_shm_offset_virt
;
371 s32 itmp_entry_ndx
= 0; /* DSP-MMU TLB entry base address */
372 struct cfg_hostres
*resources
= NULL
;
376 u32 ul_bios_gp_timer
;
378 struct io_mgr
*hio_mgr
;
379 u32 ul_load_monitor_timer
;
381 struct omap_dsp_platform_data
*pdata
=
382 omap_dspbridge_dev
->dev
.platform_data
;
384 /* The device context contains all the mmu setup info from when the
385 * last dsp base image was loaded. The first entry is always
387 /* Get SHM_BEG - convert to byte address */
388 (void)dev_get_symbol(dev_context
->dev_obj
, SHMBASENAME
,
390 ul_shm_base_virt
*= DSPWORDSIZE
;
391 /* DSP Virtual address */
392 ul_tlb_base_virt
= dev_context
->atlb_entry
[0].dsp_va
;
394 ul_shm_base_virt
- (ul_tlb_base_virt
* DSPWORDSIZE
);
395 /* Kernel logical address */
396 ul_shm_base
= dev_context
->atlb_entry
[0].gpp_va
+ ul_shm_offset_virt
;
398 /* SHM physical sync address */
399 shm_sync_pa
= dev_context
->atlb_entry
[0].gpp_pa
+ ul_shm_offset_virt
+
402 /* 2nd wd is used as sync field */
403 sync_addr
= ioremap(shm_sync_pa
, SZ_32
);
407 /* Write a signature into the shm base + offset; this will
408 * get cleared when the DSP program starts. */
409 if ((ul_shm_base_virt
== 0) || (ul_shm_base
== 0)) {
410 pr_err("%s: Illegal SM base\n", __func__
);
413 __raw_writel(0xffffffff, sync_addr
);
416 resources
= dev_context
->resources
;
420 /* Assert RST1 i.e only the RST only for DSP megacell */
422 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST1_IVA2_MASK
,
423 OMAP3430_RST1_IVA2_MASK
,
427 /* Mask address with 1K for compatibility */
428 pdata
->set_bootaddr(dsp_addr
&
429 OMAP3_IVA2_BOOTADDR_MASK
);
430 pdata
->set_bootmode(dsp_debug
? IDLE
: DIRECT
);
434 /* Reset and Unreset the RST2, so that BOOTADDR is copied to
435 * IVA2 SYSC register */
436 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
,
437 OMAP3430_RST2_IVA2_MASK
, OMAP3430_IVA2_MOD
,
440 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
441 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
444 /* Disbale the DSP MMU */
445 hw_mmu_disable(resources
->dmmu_base
);
447 hw_mmu_twl_disable(resources
->dmmu_base
);
449 /* Only make TLB entry if both addresses are non-zero */
450 for (entry_ndx
= 0; entry_ndx
< BRDIOCTL_NUMOFMMUTLB
;
452 struct bridge_ioctl_extproc
*e
=
453 &dev_context
->atlb_entry
[entry_ndx
];
454 struct hw_mmu_map_attrs_t map_attrs
= {
455 .endianism
= e
->endianism
,
456 .element_size
= e
->elem_size
,
457 .mixed_size
= e
->mixed_mode
,
460 if (!e
->gpp_pa
|| !e
->dsp_va
)
464 "MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
470 hw_mmu_tlb_add(dev_context
->dsp_mmu_base
,
481 /* Lock the above TLB entries and get the BIOS and load monitor timer
484 hw_mmu_num_locked_set(resources
->dmmu_base
, itmp_entry_ndx
);
485 hw_mmu_victim_num_set(resources
->dmmu_base
, itmp_entry_ndx
);
486 hw_mmu_ttb_set(resources
->dmmu_base
,
487 dev_context
->pt_attrs
->l1_base_pa
);
488 hw_mmu_twl_enable(resources
->dmmu_base
);
489 /* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
491 temp
= __raw_readl((resources
->dmmu_base
) + 0x10);
492 temp
= (temp
& 0xFFFFFFEF) | 0x11;
493 __raw_writel(temp
, (resources
->dmmu_base
) + 0x10);
495 /* Let the DSP MMU run */
496 hw_mmu_enable(resources
->dmmu_base
);
498 /* Enable the BIOS clock */
499 (void)dev_get_symbol(dev_context
->dev_obj
,
500 BRIDGEINIT_BIOSGPTIMER
, &ul_bios_gp_timer
);
501 (void)dev_get_symbol(dev_context
->dev_obj
,
502 BRIDGEINIT_LOADMON_GPTIMER
,
503 &ul_load_monitor_timer
);
507 if (ul_load_monitor_timer
!= 0xFFFF) {
508 clk_cmd
= (BPWR_ENABLE_CLOCK
<< MBX_PM_CLK_CMDSHIFT
) |
509 ul_load_monitor_timer
;
510 dsp_peripheral_clk_ctrl(dev_context
, &clk_cmd
);
512 dev_dbg(bridge
, "Not able to get the symbol for Load "
518 if (ul_bios_gp_timer
!= 0xFFFF) {
519 clk_cmd
= (BPWR_ENABLE_CLOCK
<< MBX_PM_CLK_CMDSHIFT
) |
521 dsp_peripheral_clk_ctrl(dev_context
, &clk_cmd
);
524 "Not able to get the symbol for BIOS Timer\n");
529 /* Set the DSP clock rate */
530 (void)dev_get_symbol(dev_context
->dev_obj
,
531 "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr
);
532 /*Set Autoidle Mode for IVA2 PLL */
533 (*pdata
->dsp_cm_write
)(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT
,
534 OMAP3430_IVA2_MOD
, OMAP3430_CM_AUTOIDLE_PLL
);
536 if ((unsigned int *)ul_dsp_clk_addr
!= NULL
) {
537 /* Get the clock rate */
538 ul_dsp_clk_rate
= dsp_clk_get_iva2_rate();
539 dev_dbg(bridge
, "%s: DSP clock rate (KHZ): 0x%x \n",
540 __func__
, ul_dsp_clk_rate
);
541 (void)bridge_brd_write(dev_context
,
542 (u8
*) &ul_dsp_clk_rate
,
543 ul_dsp_clk_addr
, sizeof(u32
), 0);
546 * Enable Mailbox events and also drain any pending
549 dev_context
->mbox
= omap_mbox_get("dsp", &dsp_mbox_notifier
);
550 if (IS_ERR(dev_context
->mbox
)) {
551 dev_context
->mbox
= NULL
;
552 pr_err("%s: Failed to get dsp mailbox handle\n",
559 /*PM_IVA2GRPSEL_PER = 0xC0;*/
560 temp
= readl(resources
->per_pm_base
+ 0xA8);
561 temp
= (temp
& 0xFFFFFF30) | 0xC0;
562 writel(temp
, resources
->per_pm_base
+ 0xA8);
564 /*PM_MPUGRPSEL_PER &= 0xFFFFFF3F; */
565 temp
= readl(resources
->per_pm_base
+ 0xA4);
566 temp
= (temp
& 0xFFFFFF3F);
567 writel(temp
, resources
->per_pm_base
+ 0xA4);
568 /*CM_SLEEPDEP_PER |= 0x04; */
569 temp
= readl(resources
->per_base
+ 0x44);
570 temp
= (temp
& 0xFFFFFFFB) | 0x04;
571 writel(temp
, resources
->per_base
+ 0x44);
573 /*CM_CLKSTCTRL_IVA2 = 0x00000003 -To Allow automatic transitions */
574 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_ENABLE_AUTO
,
575 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
578 dev_dbg(bridge
, "%s Unreset\n", __func__
);
579 /* Enable DSP MMU Interrupts */
580 hw_mmu_event_enable(resources
->dmmu_base
,
581 HW_MMU_ALL_INTERRUPTS
);
582 /* release the RST1, DSP starts executing now .. */
583 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST1_IVA2_MASK
, 0,
584 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
586 dev_dbg(bridge
, "Waiting for Sync @ 0x%x\n", *(u32
*)sync_addr
);
587 dev_dbg(bridge
, "DSP c_int00 Address = 0x%x\n", dsp_addr
);
589 while (__raw_readw(sync_addr
))
592 /* Wait for DSP to clear word in shared memory */
593 /* Read the Location */
594 if (!wait_for_start(dev_context
, sync_addr
))
597 dev_get_symbol(dev_context
->dev_obj
, "_WDT_enable", &wdt_en
);
600 dsp_wdt_sm_set((void *)ul_shm_base
);
601 dsp_wdt_enable(true);
604 status
= dev_get_io_mgr(dev_context
->dev_obj
, &hio_mgr
);
606 io_sh_msetting(hio_mgr
, SHM_OPPINFO
, NULL
);
607 /* Write the synchronization bit to indicate the
608 * completion of OPP table update to DSP
610 __raw_writel(0XCAFECAFE, sync_addr
);
612 /* update board state */
613 dev_context
->brd_state
= BRD_RUNNING
;
614 /* (void)chnlsm_enable_interrupt(dev_context); */
616 dev_context
->brd_state
= BRD_UNKNOWN
;
626 * ======== bridge_brd_stop ========
628 * Puts DSP in self loop.
633 static int bridge_brd_stop(struct bridge_dev_context
*dev_ctxt
)
636 struct bridge_dev_context
*dev_context
= dev_ctxt
;
637 struct pg_table_attrs
*pt_attrs
;
639 struct omap_dsp_platform_data
*pdata
=
640 omap_dspbridge_dev
->dev
.platform_data
;
642 if (dev_context
->brd_state
== BRD_STOPPED
)
645 /* as per TRM, it is advised to first drive the IVA2 to 'Standby' mode,
646 * before turning off the clocks.. This is to ensure that there are no
647 * pending L3 or other transactons from IVA2 */
648 dsp_pwr_state
= (*pdata
->dsp_prm_read
)
649 (OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTST
) & OMAP_POWERSTATEST_MASK
;
650 if (dsp_pwr_state
!= PWRDM_POWER_OFF
) {
651 (*pdata
->dsp_prm_rmw_bits
)(OMAP3430_RST2_IVA2_MASK
, 0,
652 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
653 sm_interrupt_dsp(dev_context
, MBX_PM_DSPIDLE
);
656 /* IVA2 is not in OFF state */
657 /* Set PM_PWSTCTRL_IVA2 to OFF */
658 (*pdata
->dsp_prm_rmw_bits
)(OMAP_POWERSTATEST_MASK
,
659 PWRDM_POWER_OFF
, OMAP3430_IVA2_MOD
, OMAP2_PM_PWSTCTRL
);
660 /* Set the SW supervised state transition for Sleep */
661 (*pdata
->dsp_cm_write
)(OMAP34XX_CLKSTCTRL_FORCE_SLEEP
,
662 OMAP3430_IVA2_MOD
, OMAP2_CM_CLKSTCTRL
);
665 /* Release the Ext Base virtual Address as the next DSP Program
666 * may have a different load address */
667 if (dev_context
->dsp_ext_base_addr
)
668 dev_context
->dsp_ext_base_addr
= 0;
670 dev_context
->brd_state
= BRD_STOPPED
; /* update board state */
672 dsp_wdt_enable(false);
674 /* This is a good place to clear the MMU page tables as well */
675 if (dev_context
->pt_attrs
) {
676 pt_attrs
= dev_context
->pt_attrs
;
677 memset((u8
*) pt_attrs
->l1_base_va
, 0x00, pt_attrs
->l1_size
);
678 memset((u8
*) pt_attrs
->l2_base_va
, 0x00, pt_attrs
->l2_size
);
679 memset((u8
*) pt_attrs
->pg_info
, 0x00,
680 (pt_attrs
->l2_num_pages
* sizeof(struct page_info
)));
682 /* Disable the mailbox interrupts */
683 if (dev_context
->mbox
) {
684 omap_mbox_disable_irq(dev_context
->mbox
, IRQ_RX
);
685 omap_mbox_put(dev_context
->mbox
, &dsp_mbox_notifier
);
686 dev_context
->mbox
= NULL
;
688 /* Reset IVA2 clocks*/
689 (*pdata
->dsp_prm_write
)(OMAP3430_RST1_IVA2_MASK
|
690 OMAP3430_RST2_IVA2_MASK
| OMAP3430_RST3_IVA2_MASK
,
691 OMAP3430_IVA2_MOD
, OMAP2_RM_RSTCTRL
);
693 dsp_clock_disable_all(dev_context
->dsp_per_clks
);
694 dsp_clk_disable(DSP_CLK_IVA2
);
700 * ======== bridge_brd_status ========
701 * Returns the board status.
703 static int bridge_brd_status(struct bridge_dev_context
*dev_ctxt
,
706 struct bridge_dev_context
*dev_context
= dev_ctxt
;
707 *board_state
= dev_context
->brd_state
;
712 * ======== bridge_brd_write ========
713 * Copies the buffers to DSP internal or external memory.
715 static int bridge_brd_write(struct bridge_dev_context
*dev_ctxt
,
716 u8
*host_buff
, u32 dsp_addr
,
717 u32 ul_num_bytes
, u32 mem_type
)
720 struct bridge_dev_context
*dev_context
= dev_ctxt
;
722 if (dsp_addr
< dev_context
->dsp_start_add
) {
726 if ((dsp_addr
- dev_context
->dsp_start_add
) <
727 dev_context
->internal_size
) {
728 status
= write_dsp_data(dev_ctxt
, host_buff
, dsp_addr
,
729 ul_num_bytes
, mem_type
);
731 status
= write_ext_dsp_data(dev_context
, host_buff
, dsp_addr
,
732 ul_num_bytes
, mem_type
, false);
739 * ======== bridge_dev_create ========
740 * Creates a driver object. Puts DSP in self loop.
742 static int bridge_dev_create(struct bridge_dev_context
744 struct dev_object
*hdev_obj
,
745 struct cfg_hostres
*config_param
)
748 struct bridge_dev_context
*dev_context
= NULL
;
750 struct cfg_hostres
*resources
= config_param
;
751 struct pg_table_attrs
*pt_attrs
;
755 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
757 /* Allocate and initialize a data structure to contain the bridge driver
758 * state, which becomes the context for later calls into this driver */
759 dev_context
= kzalloc(sizeof(struct bridge_dev_context
), GFP_KERNEL
);
765 dev_context
->dsp_start_add
= (u32
) OMAP_GEM_BASE
;
766 dev_context
->self_loop
= (u32
) NULL
;
767 dev_context
->dsp_per_clks
= 0;
768 dev_context
->internal_size
= OMAP_DSP_SIZE
;
769 /* Clear dev context MMU table entries.
770 * These get set on bridge_io_on_loaded() call after program loaded. */
771 for (entry_ndx
= 0; entry_ndx
< BRDIOCTL_NUMOFMMUTLB
; entry_ndx
++) {
772 dev_context
->atlb_entry
[entry_ndx
].gpp_pa
=
773 dev_context
->atlb_entry
[entry_ndx
].dsp_va
= 0;
775 dev_context
->dsp_base_addr
= (u32
) MEM_LINEAR_ADDRESS((void *)
782 if (!dev_context
->dsp_base_addr
)
785 pt_attrs
= kzalloc(sizeof(struct pg_table_attrs
), GFP_KERNEL
);
786 if (pt_attrs
!= NULL
) {
787 pt_attrs
->l1_size
= SZ_16K
; /* 4096 entries of 32 bits */
788 align_size
= pt_attrs
->l1_size
;
789 /* Align sizes are expected to be power of 2 */
790 /* we like to get aligned on L1 table size */
791 pg_tbl_va
= (u32
) mem_alloc_phys_mem(pt_attrs
->l1_size
,
792 align_size
, &pg_tbl_pa
);
794 /* Check if the PA is aligned for us */
795 if ((pg_tbl_pa
) & (align_size
- 1)) {
796 /* PA not aligned to page table size ,
797 * try with more allocation and align */
798 mem_free_phys_mem((void *)pg_tbl_va
, pg_tbl_pa
,
800 /* we like to get aligned on L1 table size */
802 (u32
) mem_alloc_phys_mem((pt_attrs
->l1_size
) * 2,
803 align_size
, &pg_tbl_pa
);
804 /* We should be able to get aligned table now */
805 pt_attrs
->l1_tbl_alloc_pa
= pg_tbl_pa
;
806 pt_attrs
->l1_tbl_alloc_va
= pg_tbl_va
;
807 pt_attrs
->l1_tbl_alloc_sz
= pt_attrs
->l1_size
* 2;
808 /* Align the PA to the next 'align' boundary */
809 pt_attrs
->l1_base_pa
=
811 (align_size
- 1)) & (~(align_size
- 1));
812 pt_attrs
->l1_base_va
=
813 pg_tbl_va
+ (pt_attrs
->l1_base_pa
- pg_tbl_pa
);
815 /* We got aligned PA, cool */
816 pt_attrs
->l1_tbl_alloc_pa
= pg_tbl_pa
;
817 pt_attrs
->l1_tbl_alloc_va
= pg_tbl_va
;
818 pt_attrs
->l1_tbl_alloc_sz
= pt_attrs
->l1_size
;
819 pt_attrs
->l1_base_pa
= pg_tbl_pa
;
820 pt_attrs
->l1_base_va
= pg_tbl_va
;
822 if (pt_attrs
->l1_base_va
)
823 memset((u8
*) pt_attrs
->l1_base_va
, 0x00,
826 /* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
828 pt_attrs
->l2_num_pages
= ((DMMPOOLSIZE
>> 20) + 6);
829 pt_attrs
->l2_size
= HW_MMU_COARSE_PAGE_SIZE
*
830 pt_attrs
->l2_num_pages
;
831 align_size
= 4; /* Make it u32 aligned */
832 /* we like to get aligned on L1 table size */
833 pg_tbl_va
= (u32
) mem_alloc_phys_mem(pt_attrs
->l2_size
,
834 align_size
, &pg_tbl_pa
);
835 pt_attrs
->l2_tbl_alloc_pa
= pg_tbl_pa
;
836 pt_attrs
->l2_tbl_alloc_va
= pg_tbl_va
;
837 pt_attrs
->l2_tbl_alloc_sz
= pt_attrs
->l2_size
;
838 pt_attrs
->l2_base_pa
= pg_tbl_pa
;
839 pt_attrs
->l2_base_va
= pg_tbl_va
;
841 if (pt_attrs
->l2_base_va
)
842 memset((u8
*) pt_attrs
->l2_base_va
, 0x00,
845 pt_attrs
->pg_info
= kzalloc(pt_attrs
->l2_num_pages
*
846 sizeof(struct page_info
), GFP_KERNEL
);
848 "L1 pa %x, va %x, size %x\n L2 pa %x, va "
849 "%x, size %x\n", pt_attrs
->l1_base_pa
,
850 pt_attrs
->l1_base_va
, pt_attrs
->l1_size
,
851 pt_attrs
->l2_base_pa
, pt_attrs
->l2_base_va
,
853 dev_dbg(bridge
, "pt_attrs %p L2 NumPages %x pg_info %p\n",
854 pt_attrs
, pt_attrs
->l2_num_pages
, pt_attrs
->pg_info
);
856 if ((pt_attrs
!= NULL
) && (pt_attrs
->l1_base_va
!= 0) &&
857 (pt_attrs
->l2_base_va
!= 0) && (pt_attrs
->pg_info
!= NULL
))
858 dev_context
->pt_attrs
= pt_attrs
;
863 spin_lock_init(&pt_attrs
->pg_lock
);
864 dev_context
->tc_word_swap_on
= drv_datap
->tc_wordswapon
;
866 /* Set the Clock Divisor for the DSP module */
868 /* MMU address is obtained from the host
869 * resources struct */
870 dev_context
->dsp_mmu_base
= resources
->dmmu_base
;
873 dev_context
->dev_obj
= hdev_obj
;
874 /* Store current board state. */
875 dev_context
->brd_state
= BRD_UNKNOWN
;
876 dev_context
->resources
= resources
;
877 dsp_clk_enable(DSP_CLK_IVA2
);
878 bridge_brd_stop(dev_context
);
879 /* Return ptr to our device state to the DSP API for storage */
880 *dev_cntxt
= dev_context
;
882 if (pt_attrs
!= NULL
) {
883 kfree(pt_attrs
->pg_info
);
885 if (pt_attrs
->l2_tbl_alloc_va
) {
886 mem_free_phys_mem((void *)
887 pt_attrs
->l2_tbl_alloc_va
,
888 pt_attrs
->l2_tbl_alloc_pa
,
889 pt_attrs
->l2_tbl_alloc_sz
);
891 if (pt_attrs
->l1_tbl_alloc_va
) {
892 mem_free_phys_mem((void *)
893 pt_attrs
->l1_tbl_alloc_va
,
894 pt_attrs
->l1_tbl_alloc_pa
,
895 pt_attrs
->l1_tbl_alloc_sz
);
906 * ======== bridge_dev_ctrl ========
907 * Receives device specific commands.
909 static int bridge_dev_ctrl(struct bridge_dev_context
*dev_context
,
910 u32 dw_cmd
, void *pargs
)
913 struct bridge_ioctl_extproc
*pa_ext_proc
=
914 (struct bridge_ioctl_extproc
*)pargs
;
918 case BRDIOCTL_CHNLREAD
:
920 case BRDIOCTL_CHNLWRITE
:
922 case BRDIOCTL_SETMMUCONFIG
:
923 /* store away dsp-mmu setup values for later use */
924 for (ndx
= 0; ndx
< BRDIOCTL_NUMOFMMUTLB
; ndx
++, pa_ext_proc
++)
925 dev_context
->atlb_entry
[ndx
] = *pa_ext_proc
;
927 case BRDIOCTL_DEEPSLEEP
:
928 case BRDIOCTL_EMERGENCYSLEEP
:
929 /* Currently only DSP Idle is supported Need to update for
931 status
= sleep_dsp(dev_context
, PWR_DEEPSLEEP
, pargs
);
933 case BRDIOCTL_WAKEUP
:
934 status
= wake_dsp(dev_context
, pargs
);
936 case BRDIOCTL_CLK_CTRL
:
938 /* Looking For Baseport Fix for Clocks */
939 status
= dsp_peripheral_clk_ctrl(dev_context
, pargs
);
941 case BRDIOCTL_PWR_HIBERNATE
:
942 status
= handle_hibernation_from_dsp(dev_context
);
944 case BRDIOCTL_PRESCALE_NOTIFY
:
945 status
= pre_scale_dsp(dev_context
, pargs
);
947 case BRDIOCTL_POSTSCALE_NOTIFY
:
948 status
= post_scale_dsp(dev_context
, pargs
);
950 case BRDIOCTL_CONSTRAINT_REQUEST
:
951 status
= handle_constraints_set(dev_context
, pargs
);
961 * ======== bridge_dev_destroy ========
962 * Destroys the driver object.
964 static int bridge_dev_destroy(struct bridge_dev_context
*dev_ctxt
)
966 struct pg_table_attrs
*pt_attrs
;
968 struct bridge_dev_context
*dev_context
= (struct bridge_dev_context
*)
970 struct cfg_hostres
*host_res
;
972 struct drv_data
*drv_datap
= dev_get_drvdata(bridge
);
974 /* It should never happen */
978 /* first put the device to stop state */
979 bridge_brd_stop(dev_context
);
980 if (dev_context
->pt_attrs
) {
981 pt_attrs
= dev_context
->pt_attrs
;
982 kfree(pt_attrs
->pg_info
);
984 if (pt_attrs
->l2_tbl_alloc_va
) {
985 mem_free_phys_mem((void *)pt_attrs
->l2_tbl_alloc_va
,
986 pt_attrs
->l2_tbl_alloc_pa
,
987 pt_attrs
->l2_tbl_alloc_sz
);
989 if (pt_attrs
->l1_tbl_alloc_va
) {
990 mem_free_phys_mem((void *)pt_attrs
->l1_tbl_alloc_va
,
991 pt_attrs
->l1_tbl_alloc_pa
,
992 pt_attrs
->l1_tbl_alloc_sz
);
998 if (dev_context
->resources
) {
999 host_res
= dev_context
->resources
;
1000 shm_size
= drv_datap
->shm_size
;
1001 if (shm_size
>= 0x10000) {
1002 if ((host_res
->mem_base
[1]) &&
1003 (host_res
->mem_phys
[1])) {
1004 mem_free_phys_mem((void *)
1011 dev_dbg(bridge
, "%s: Error getting shm size "
1012 "from registry: %x. Not calling "
1013 "mem_free_phys_mem\n", __func__
,
1016 host_res
->mem_base
[1] = 0;
1017 host_res
->mem_phys
[1] = 0;
1019 if (host_res
->mem_base
[0])
1020 iounmap((void *)host_res
->mem_base
[0]);
1021 if (host_res
->mem_base
[2])
1022 iounmap((void *)host_res
->mem_base
[2]);
1023 if (host_res
->mem_base
[3])
1024 iounmap((void *)host_res
->mem_base
[3]);
1025 if (host_res
->mem_base
[4])
1026 iounmap((void *)host_res
->mem_base
[4]);
1027 if (host_res
->dmmu_base
)
1028 iounmap(host_res
->dmmu_base
);
1029 if (host_res
->per_base
)
1030 iounmap(host_res
->per_base
);
1031 if (host_res
->per_pm_base
)
1032 iounmap((void *)host_res
->per_pm_base
);
1033 if (host_res
->core_pm_base
)
1034 iounmap((void *)host_res
->core_pm_base
);
1036 host_res
->mem_base
[0] = (u32
) NULL
;
1037 host_res
->mem_base
[2] = (u32
) NULL
;
1038 host_res
->mem_base
[3] = (u32
) NULL
;
1039 host_res
->mem_base
[4] = (u32
) NULL
;
1040 host_res
->dmmu_base
= NULL
;
1045 /* Free the driver's device context: */
1046 kfree(drv_datap
->base_img
);
1047 kfree((void *)dev_ctxt
);
1051 static int bridge_brd_mem_copy(struct bridge_dev_context
*dev_ctxt
,
1052 u32 dsp_dest_addr
, u32 dsp_src_addr
,
1053 u32 ul_num_bytes
, u32 mem_type
)
1056 u32 src_addr
= dsp_src_addr
;
1057 u32 dest_addr
= dsp_dest_addr
;
1059 u32 total_bytes
= ul_num_bytes
;
1060 u8 host_buf
[BUFFERSIZE
];
1061 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1062 while (total_bytes
> 0 && !status
) {
1064 total_bytes
> BUFFERSIZE
? BUFFERSIZE
: total_bytes
;
1065 /* Read from External memory */
1066 status
= read_ext_dsp_data(dev_ctxt
, host_buf
, src_addr
,
1067 copy_bytes
, mem_type
);
1069 if (dest_addr
< (dev_context
->dsp_start_add
+
1070 dev_context
->internal_size
)) {
1071 /* Write to Internal memory */
1072 status
= write_dsp_data(dev_ctxt
, host_buf
,
1073 dest_addr
, copy_bytes
,
1076 /* Write to External memory */
1078 write_ext_dsp_data(dev_ctxt
, host_buf
,
1079 dest_addr
, copy_bytes
,
1083 total_bytes
-= copy_bytes
;
1084 src_addr
+= copy_bytes
;
1085 dest_addr
+= copy_bytes
;
1090 /* Mem Write does not halt the DSP to write unlike bridge_brd_write */
1091 static int bridge_brd_mem_write(struct bridge_dev_context
*dev_ctxt
,
1092 u8
*host_buff
, u32 dsp_addr
,
1093 u32 ul_num_bytes
, u32 mem_type
)
1096 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1097 u32 ul_remain_bytes
= 0;
1099 ul_remain_bytes
= ul_num_bytes
;
1100 while (ul_remain_bytes
> 0 && !status
) {
1102 ul_remain_bytes
> BUFFERSIZE
? BUFFERSIZE
: ul_remain_bytes
;
1103 if (dsp_addr
< (dev_context
->dsp_start_add
+
1104 dev_context
->internal_size
)) {
1106 write_dsp_data(dev_ctxt
, host_buff
, dsp_addr
,
1107 ul_bytes
, mem_type
);
1109 status
= write_ext_dsp_data(dev_ctxt
, host_buff
,
1113 ul_remain_bytes
-= ul_bytes
;
1114 dsp_addr
+= ul_bytes
;
1115 host_buff
= host_buff
+ ul_bytes
;
1121 * ======== bridge_brd_mem_map ========
1122 * This function maps MPU buffer to the DSP address space. It performs
1123 * linear to physical address translation if required. It translates each
1124 * page since linear addresses can be physically non-contiguous
1125 * All address & size arguments are assumed to be page aligned (in proc.c)
1127 * TODO: Disable MMU while updating the page tables (but that'll stall DSP)
1129 static int bridge_brd_mem_map(struct bridge_dev_context
*dev_ctxt
,
1130 u32 ul_mpu_addr
, u32 virt_addr
,
1131 u32 ul_num_bytes
, u32 ul_map_attr
,
1132 struct page
**mapped_pages
)
1136 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1137 struct hw_mmu_map_attrs_t hw_attrs
;
1138 struct vm_area_struct
*vma
;
1139 struct mm_struct
*mm
= current
->mm
;
1141 u32 num_usr_pgs
= 0;
1142 struct page
*mapped_page
, *pg
;
1145 struct task_struct
*curr_task
= current
;
1150 "%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
1151 __func__
, dev_ctxt
, ul_mpu_addr
, virt_addr
, ul_num_bytes
,
1153 if (ul_num_bytes
== 0)
1156 if (ul_map_attr
& DSP_MAP_DIR_MASK
) {
1157 attrs
= ul_map_attr
;
1159 /* Assign default attributes */
1160 attrs
= ul_map_attr
| (DSP_MAPVIRTUALADDR
| DSP_MAPELEMSIZE16
);
1162 /* Take mapping properties */
1163 if (attrs
& DSP_MAPBIGENDIAN
)
1164 hw_attrs
.endianism
= HW_BIG_ENDIAN
;
1166 hw_attrs
.endianism
= HW_LITTLE_ENDIAN
;
1168 hw_attrs
.mixed_size
= (enum hw_mmu_mixed_size_t
)
1169 ((attrs
& DSP_MAPMIXEDELEMSIZE
) >> 2);
1170 /* Ignore element_size if mixed_size is enabled */
1171 if (hw_attrs
.mixed_size
== 0) {
1172 if (attrs
& DSP_MAPELEMSIZE8
) {
1174 hw_attrs
.element_size
= HW_ELEM_SIZE8BIT
;
1175 } else if (attrs
& DSP_MAPELEMSIZE16
) {
1176 /* Size is 16 bit */
1177 hw_attrs
.element_size
= HW_ELEM_SIZE16BIT
;
1178 } else if (attrs
& DSP_MAPELEMSIZE32
) {
1179 /* Size is 32 bit */
1180 hw_attrs
.element_size
= HW_ELEM_SIZE32BIT
;
1181 } else if (attrs
& DSP_MAPELEMSIZE64
) {
1182 /* Size is 64 bit */
1183 hw_attrs
.element_size
= HW_ELEM_SIZE64BIT
;
1186 * Mixedsize isn't enabled, so size can't be
1192 if (attrs
& DSP_MAPDONOTLOCK
)
1193 hw_attrs
.donotlockmpupage
= 1;
1195 hw_attrs
.donotlockmpupage
= 0;
1197 if (attrs
& DSP_MAPVMALLOCADDR
) {
1198 return mem_map_vmalloc(dev_ctxt
, ul_mpu_addr
, virt_addr
,
1199 ul_num_bytes
, &hw_attrs
);
1202 * Do OS-specific user-va to pa translation.
1203 * Combine physically contiguous regions to reduce TLBs.
1204 * Pass the translated pa to pte_update.
1206 if ((attrs
& DSP_MAPPHYSICALADDR
)) {
1207 status
= pte_update(dev_context
, ul_mpu_addr
, virt_addr
,
1208 ul_num_bytes
, &hw_attrs
);
1213 * Important Note: ul_mpu_addr is mapped from user application process
1214 * to current process - it must lie completely within the current
1215 * virtual memory address space in order to be of use to us here!
1217 down_read(&mm
->mmap_sem
);
1218 vma
= find_vma(mm
, ul_mpu_addr
);
1221 "VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
1222 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr
,
1223 ul_num_bytes
, vma
->vm_start
, vma
->vm_end
,
1227 * It is observed that under some circumstances, the user buffer is
1228 * spread across several VMAs. So loop through and check if the entire
1229 * user buffer is covered
1231 while ((vma
) && (ul_mpu_addr
+ ul_num_bytes
> vma
->vm_end
)) {
1232 /* jump to the next VMA region */
1233 vma
= find_vma(mm
, vma
->vm_end
+ 1);
1235 "VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
1236 "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr
,
1237 ul_num_bytes
, vma
->vm_start
, vma
->vm_end
,
1241 pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
1242 __func__
, ul_mpu_addr
, ul_num_bytes
);
1244 up_read(&mm
->mmap_sem
);
1248 if (vma
->vm_flags
& VM_IO
) {
1249 num_usr_pgs
= ul_num_bytes
/ PG_SIZE4K
;
1250 mpu_addr
= ul_mpu_addr
;
1252 /* Get the physical addresses for user buffer */
1253 for (pg_i
= 0; pg_i
< num_usr_pgs
; pg_i
++) {
1254 pa
= user_va2_pa(mm
, mpu_addr
);
1257 pr_err("DSPBRIDGE: VM_IO mapping physical"
1258 "address is invalid\n");
1261 if (pfn_valid(__phys_to_pfn(pa
))) {
1262 pg
= PHYS_TO_PAGE(pa
);
1264 if (page_count(pg
) < 1) {
1265 pr_err("Bad page in VM_IO buffer\n");
1266 bad_page_dump(pa
, pg
);
1269 status
= pte_set(dev_context
->pt_attrs
, pa
,
1270 va
, HW_PAGE_SIZE4KB
, &hw_attrs
);
1274 va
+= HW_PAGE_SIZE4KB
;
1275 mpu_addr
+= HW_PAGE_SIZE4KB
;
1276 pa
+= HW_PAGE_SIZE4KB
;
1279 num_usr_pgs
= ul_num_bytes
/ PG_SIZE4K
;
1280 if (vma
->vm_flags
& (VM_WRITE
| VM_MAYWRITE
))
1283 for (pg_i
= 0; pg_i
< num_usr_pgs
; pg_i
++) {
1284 pg_num
= get_user_pages(curr_task
, mm
, ul_mpu_addr
, 1,
1285 write
, 1, &mapped_page
, NULL
);
1287 if (page_count(mapped_page
) < 1) {
1288 pr_err("Bad page count after doing"
1291 bad_page_dump(page_to_phys(mapped_page
),
1294 status
= pte_set(dev_context
->pt_attrs
,
1295 page_to_phys(mapped_page
), va
,
1296 HW_PAGE_SIZE4KB
, &hw_attrs
);
1301 mapped_pages
[pg_i
] = mapped_page
;
1303 va
+= HW_PAGE_SIZE4KB
;
1304 ul_mpu_addr
+= HW_PAGE_SIZE4KB
;
1306 pr_err("DSPBRIDGE: get_user_pages FAILED,"
1308 "vma->vm_flags = 0x%lx,"
1309 "get_user_pages Err"
1310 "Value = %d, Buffer"
1311 "size=0x%x\n", ul_mpu_addr
,
1312 vma
->vm_flags
, pg_num
, ul_num_bytes
);
1318 up_read(&mm
->mmap_sem
);
1322 * Roll out the mapped pages incase it failed in middle of
1326 bridge_brd_mem_un_map(dev_context
, virt_addr
,
1327 (pg_i
* PG_SIZE4K
));
1332 * In any case, flush the TLB
1333 * This is called from here instead from pte_update to avoid unnecessary
1334 * repetition while mapping non-contiguous physical regions of a virtual
1337 flush_all(dev_context
);
1338 dev_dbg(bridge
, "%s status %x\n", __func__
, status
);
1343 * ======== bridge_brd_mem_un_map ========
1344 * Invalidate the PTEs for the DSP VA block to be unmapped.
1346 * PTEs of a mapped memory block are contiguous in any page table
1347 * So, instead of looking up the PTE address for every 4K block,
1348 * we clear consecutive PTEs until we unmap all the bytes
1350 static int bridge_brd_mem_un_map(struct bridge_dev_context
*dev_ctxt
,
1351 u32 virt_addr
, u32 ul_num_bytes
)
1361 u32 pte_addr_l2
= 0;
1365 struct page
*pg
= NULL
;
1367 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1368 struct pg_table_attrs
*pt
= dev_context
->pt_attrs
;
1371 u32 numof4k_pages
= 0;
1373 va_curr
= virt_addr
;
1374 rem_bytes
= ul_num_bytes
;
1376 l1_base_va
= pt
->l1_base_va
;
1377 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va_curr
);
1378 dev_dbg(bridge
, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
1379 "pte_addr_l1 %x\n", __func__
, dev_ctxt
, virt_addr
,
1380 ul_num_bytes
, l1_base_va
, pte_addr_l1
);
1382 while (rem_bytes
&& !status
) {
1383 u32 va_curr_orig
= va_curr
;
1384 /* Find whether the L1 PTE points to a valid L2 PT */
1385 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va_curr
);
1386 pte_val
= *(u32
*) pte_addr_l1
;
1387 pte_size
= hw_mmu_pte_size_l1(pte_val
);
1389 if (pte_size
!= HW_MMU_COARSE_PAGE_SIZE
)
1390 goto skip_coarse_page
;
1393 * Get the L2 PA from the L1 PTE, and find
1394 * corresponding L2 VA
1396 l2_base_pa
= hw_mmu_pte_coarse_l1(pte_val
);
1397 l2_base_va
= l2_base_pa
- pt
->l2_base_pa
+ pt
->l2_base_va
;
1399 (l2_base_pa
- pt
->l2_base_pa
) / HW_MMU_COARSE_PAGE_SIZE
;
1401 * Find the L2 PTE address from which we will start
1402 * clearing, the number of PTEs to be cleared on this
1403 * page, and the size of VA space that needs to be
1404 * cleared on this L2 page
1406 pte_addr_l2
= hw_mmu_pte_addr_l2(l2_base_va
, va_curr
);
1407 pte_count
= pte_addr_l2
& (HW_MMU_COARSE_PAGE_SIZE
- 1);
1408 pte_count
= (HW_MMU_COARSE_PAGE_SIZE
- pte_count
) / sizeof(u32
);
1409 if (rem_bytes
< (pte_count
* PG_SIZE4K
))
1410 pte_count
= rem_bytes
/ PG_SIZE4K
;
1411 rem_bytes_l2
= pte_count
* PG_SIZE4K
;
1414 * Unmap the VA space on this L2 PT. A quicker way
1415 * would be to clear pte_count entries starting from
1416 * pte_addr_l2. However, below code checks that we don't
1417 * clear invalid entries or less than 64KB for a 64KB
1418 * entry. Similar checking is done for L1 PTEs too
1421 while (rem_bytes_l2
&& !status
) {
1422 pte_val
= *(u32
*) pte_addr_l2
;
1423 pte_size
= hw_mmu_pte_size_l2(pte_val
);
1424 /* va_curr aligned to pte_size? */
1425 if (pte_size
== 0 || rem_bytes_l2
< pte_size
||
1426 va_curr
& (pte_size
- 1)) {
1431 /* Collect Physical addresses from VA */
1432 paddr
= (pte_val
& ~(pte_size
- 1));
1433 if (pte_size
== HW_PAGE_SIZE64KB
)
1438 while (temp
++ < numof4k_pages
) {
1439 if (!pfn_valid(__phys_to_pfn(paddr
))) {
1440 paddr
+= HW_PAGE_SIZE4KB
;
1443 pg
= PHYS_TO_PAGE(paddr
);
1444 if (page_count(pg
) < 1) {
1445 pr_info("DSPBRIDGE: UNMAP function: "
1446 "COUNT 0 FOR PA 0x%x, size = "
1447 "0x%x\n", paddr
, ul_num_bytes
);
1448 bad_page_dump(paddr
, pg
);
1451 page_cache_release(pg
);
1453 paddr
+= HW_PAGE_SIZE4KB
;
1455 if (hw_mmu_pte_clear(pte_addr_l2
, va_curr
, pte_size
)) {
1461 rem_bytes_l2
-= pte_size
;
1462 va_curr
+= pte_size
;
1463 pte_addr_l2
+= (pte_size
>> 12) * sizeof(u32
);
1465 spin_lock(&pt
->pg_lock
);
1466 if (rem_bytes_l2
== 0) {
1467 pt
->pg_info
[l2_page_num
].num_entries
-= pte_count
;
1468 if (pt
->pg_info
[l2_page_num
].num_entries
== 0) {
1470 * Clear the L1 PTE pointing to the L2 PT
1472 if (!hw_mmu_pte_clear(l1_base_va
, va_curr_orig
,
1473 HW_MMU_COARSE_PAGE_SIZE
))
1477 spin_unlock(&pt
->pg_lock
);
1481 rem_bytes
-= pte_count
* PG_SIZE4K
;
1485 spin_unlock(&pt
->pg_lock
);
1488 /* va_curr aligned to pte_size? */
1489 /* pte_size = 1 MB or 16 MB */
1490 if (pte_size
== 0 || rem_bytes
< pte_size
||
1491 va_curr
& (pte_size
- 1)) {
1496 if (pte_size
== HW_PAGE_SIZE1MB
)
1497 numof4k_pages
= 256;
1499 numof4k_pages
= 4096;
1501 /* Collect Physical addresses from VA */
1502 paddr
= (pte_val
& ~(pte_size
- 1));
1503 while (temp
++ < numof4k_pages
) {
1504 if (pfn_valid(__phys_to_pfn(paddr
))) {
1505 pg
= PHYS_TO_PAGE(paddr
);
1506 if (page_count(pg
) < 1) {
1507 pr_info("DSPBRIDGE: UNMAP function: "
1508 "COUNT 0 FOR PA 0x%x, size = "
1509 "0x%x\n", paddr
, ul_num_bytes
);
1510 bad_page_dump(paddr
, pg
);
1513 page_cache_release(pg
);
1516 paddr
+= HW_PAGE_SIZE4KB
;
1518 if (!hw_mmu_pte_clear(l1_base_va
, va_curr
, pte_size
)) {
1520 rem_bytes
-= pte_size
;
1521 va_curr
+= pte_size
;
1528 * It is better to flush the TLB here, so that any stale old entries
1532 flush_all(dev_context
);
1534 "%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
1535 " rem_bytes_l2 %x status %x\n", __func__
, va_curr
, pte_addr_l1
,
1536 pte_addr_l2
, rem_bytes
, rem_bytes_l2
, status
);
1541 * ======== user_va2_pa ========
1543 * This function walks through the page tables to convert a userland
1544 * virtual address to physical address
1546 static u32
user_va2_pa(struct mm_struct
*mm
, u32 address
)
1553 pgd
= pgd_offset(mm
, address
);
1554 if (pgd_none(*pgd
) || pgd_bad(*pgd
))
1557 pud
= pud_offset(pgd
, address
);
1558 if (pud_none(*pud
) || pud_bad(*pud
))
1561 pmd
= pmd_offset(pud
, address
);
1562 if (pmd_none(*pmd
) || pmd_bad(*pmd
))
1565 ptep
= pte_offset_map(pmd
, address
);
1568 if (pte_present(pte
))
1569 return pte
& PAGE_MASK
;
1576 * ======== pte_update ========
1577 * This function calculates the optimum page-aligned addresses and sizes
1578 * Caller must pass page-aligned values
1580 static int pte_update(struct bridge_dev_context
*dev_ctxt
, u32 pa
,
1582 struct hw_mmu_map_attrs_t
*map_attrs
)
1588 u32 num_bytes
= size
;
1589 struct bridge_dev_context
*dev_context
= dev_ctxt
;
1591 u32 page_size
[] = { HW_PAGE_SIZE16MB
, HW_PAGE_SIZE1MB
,
1592 HW_PAGE_SIZE64KB
, HW_PAGE_SIZE4KB
1595 while (num_bytes
&& !status
) {
1596 /* To find the max. page size with which both PA & VA are
1598 all_bits
= pa_curr
| va_curr
;
1600 for (i
= 0; i
< 4; i
++) {
1601 if ((num_bytes
>= page_size
[i
]) && ((all_bits
&
1605 pte_set(dev_context
->pt_attrs
, pa_curr
,
1606 va_curr
, page_size
[i
], map_attrs
);
1607 pa_curr
+= page_size
[i
];
1608 va_curr
+= page_size
[i
];
1609 num_bytes
-= page_size
[i
];
1610 /* Don't try smaller sizes. Hopefully we have
1611 * reached an address aligned to a bigger page
1622 * ======== pte_set ========
1623 * This function calculates PTE address (MPU virtual) to be updated
1624 * It also manages the L2 page tables
1626 static int pte_set(struct pg_table_attrs
*pt
, u32 pa
, u32 va
,
1627 u32 size
, struct hw_mmu_map_attrs_t
*attrs
)
1633 /* Base address of the PT that will be updated */
1636 /* Compiler warns that the next three variables might be used
1637 * uninitialized in this function. Doesn't seem so. Working around,
1641 u32 l2_page_num
= 0;
1644 l1_base_va
= pt
->l1_base_va
;
1645 pg_tbl_va
= l1_base_va
;
1646 if ((size
== HW_PAGE_SIZE64KB
) || (size
== HW_PAGE_SIZE4KB
)) {
1647 /* Find whether the L1 PTE points to a valid L2 PT */
1648 pte_addr_l1
= hw_mmu_pte_addr_l1(l1_base_va
, va
);
1649 if (pte_addr_l1
<= (pt
->l1_base_va
+ pt
->l1_size
)) {
1650 pte_val
= *(u32
*) pte_addr_l1
;
1651 pte_size
= hw_mmu_pte_size_l1(pte_val
);
1655 spin_lock(&pt
->pg_lock
);
1656 if (pte_size
== HW_MMU_COARSE_PAGE_SIZE
) {
1657 /* Get the L2 PA from the L1 PTE, and find
1658 * corresponding L2 VA */
1659 l2_base_pa
= hw_mmu_pte_coarse_l1(pte_val
);
1661 l2_base_pa
- pt
->l2_base_pa
+ pt
->l2_base_va
;
1664 pt
->l2_base_pa
) / HW_MMU_COARSE_PAGE_SIZE
;
1665 } else if (pte_size
== 0) {
1666 /* L1 PTE is invalid. Allocate a L2 PT and
1667 * point the L1 PTE to it */
1668 /* Find a free L2 PT. */
1669 for (i
= 0; (i
< pt
->l2_num_pages
) &&
1670 (pt
->pg_info
[i
].num_entries
!= 0); i
++)
1672 if (i
< pt
->l2_num_pages
) {
1674 l2_base_pa
= pt
->l2_base_pa
+ (l2_page_num
*
1675 HW_MMU_COARSE_PAGE_SIZE
);
1676 l2_base_va
= pt
->l2_base_va
+ (l2_page_num
*
1677 HW_MMU_COARSE_PAGE_SIZE
);
1678 /* Endianness attributes are ignored for
1679 * HW_MMU_COARSE_PAGE_SIZE */
1681 hw_mmu_pte_set(l1_base_va
, l2_base_pa
, va
,
1682 HW_MMU_COARSE_PAGE_SIZE
,
1688 /* Found valid L1 PTE of another size.
1689 * Should not overwrite it. */
1693 pg_tbl_va
= l2_base_va
;
1694 if (size
== HW_PAGE_SIZE64KB
)
1695 pt
->pg_info
[l2_page_num
].num_entries
+= 16;
1697 pt
->pg_info
[l2_page_num
].num_entries
++;
1698 dev_dbg(bridge
, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
1699 "%x, num_entries %x\n", l2_base_va
,
1700 l2_base_pa
, l2_page_num
,
1701 pt
->pg_info
[l2_page_num
].num_entries
);
1703 spin_unlock(&pt
->pg_lock
);
1706 dev_dbg(bridge
, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
1707 pg_tbl_va
, pa
, va
, size
);
1708 dev_dbg(bridge
, "PTE: endianism %x, element_size %x, "
1709 "mixed_size %x\n", attrs
->endianism
,
1710 attrs
->element_size
, attrs
->mixed_size
);
1711 status
= hw_mmu_pte_set(pg_tbl_va
, pa
, va
, size
, attrs
);
1717 /* Memory map kernel VA -- memory allocated with vmalloc */
1718 static int mem_map_vmalloc(struct bridge_dev_context
*dev_context
,
1719 u32 ul_mpu_addr
, u32 virt_addr
,
1721 struct hw_mmu_map_attrs_t
*hw_attrs
)
1724 struct page
*page
[1];
1736 * Do Kernel va to pa translation.
1737 * Combine physically contiguous regions to reduce TLBs.
1738 * Pass the translated pa to pte_update.
1740 num_pages
= ul_num_bytes
/ PAGE_SIZE
; /* PAGE_SIZE = OS page size */
1742 va_curr
= ul_mpu_addr
;
1743 page
[0] = vmalloc_to_page((void *)va_curr
);
1744 pa_next
= page_to_phys(page
[0]);
1745 while (!status
&& (i
< num_pages
)) {
1747 * Reuse pa_next from the previous iteration to avoid
1748 * an extra va2pa call
1751 size_curr
= PAGE_SIZE
;
1753 * If the next page is physically contiguous,
1754 * map it with the current one by increasing
1755 * the size of the region to be mapped
1757 while (++i
< num_pages
) {
1759 vmalloc_to_page((void *)(va_curr
+ size_curr
));
1760 pa_next
= page_to_phys(page
[0]);
1762 if (pa_next
== (pa_curr
+ size_curr
))
1763 size_curr
+= PAGE_SIZE
;
1773 num_of4k_pages
= size_curr
/ HW_PAGE_SIZE4KB
;
1774 while (temp
++ < num_of4k_pages
) {
1775 get_page(PHYS_TO_PAGE(pa
));
1776 pa
+= HW_PAGE_SIZE4KB
;
1778 status
= pte_update(dev_context
, pa_curr
, virt_addr
+
1779 (va_curr
- ul_mpu_addr
), size_curr
,
1781 va_curr
+= size_curr
;
1784 * In any case, flush the TLB
1785 * This is called from here instead from pte_update to avoid unnecessary
1786 * repetition while mapping non-contiguous physical regions of a virtual
1789 flush_all(dev_context
);
1790 dev_dbg(bridge
, "%s status %x\n", __func__
, status
);
1795 * ======== wait_for_start ========
1796 * Wait for the singal from DSP that it has started, or time out.
1798 bool wait_for_start(struct bridge_dev_context
*dev_context
,
1799 void __iomem
*sync_addr
)
1801 u16 timeout
= TIHELEN_ACKTIMEOUT
;
1803 /* Wait for response from board */
1804 while (__raw_readw(sync_addr
) && --timeout
)
1807 /* If timed out: return false */
1809 pr_err("%s: Timed out waiting DSP to Start\n", __func__
);