]>
Commit | Line | Data |
---|---|---|
7d55524d ORL |
1 | /* |
2 | * drv.c | |
3 | * | |
4 | * DSP-BIOS Bridge driver support functions for TI OMAP processors. | |
5 | * | |
6 | * DSP/BIOS Bridge resource allocation module. | |
7 | * | |
8 | * Copyright (C) 2005-2006 Texas Instruments, Inc. | |
9 | * | |
10 | * This package is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR | |
15 | * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED | |
16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
17 | */ | |
2094f12d | 18 | #include <linux/types.h> |
7d55524d ORL |
19 | |
20 | /* ----------------------------------- Host OS */ | |
21 | #include <dspbridge/host_os.h> | |
22 | ||
23 | /* ----------------------------------- DSP/BIOS Bridge */ | |
7d55524d ORL |
24 | #include <dspbridge/dbdefs.h> |
25 | ||
26 | /* ----------------------------------- Trace & Debug */ | |
27 | #include <dspbridge/dbc.h> | |
28 | ||
29 | /* ----------------------------------- OS Adaptation Layer */ | |
7d55524d ORL |
30 | #include <dspbridge/list.h> |
31 | ||
32 | /* ----------------------------------- This */ | |
33 | #include <dspbridge/drv.h> | |
34 | #include <dspbridge/dev.h> | |
35 | ||
36 | #include <dspbridge/node.h> | |
37 | #include <dspbridge/proc.h> | |
38 | #include <dspbridge/strm.h> | |
39 | #include <dspbridge/nodepriv.h> | |
40 | #include <dspbridge/dspchnl.h> | |
41 | #include <dspbridge/resourcecleanup.h> | |
42 | ||
43 | /* ----------------------------------- Defines, Data Structures, Typedefs */ | |
44 | struct drv_object { | |
45 | struct lst_list *dev_list; | |
46 | struct lst_list *dev_node_string; | |
47 | }; | |
48 | ||
49 | /* | |
50 | * This is the Device Extension. Named with the Prefix | |
51 | * DRV_ since it is living in this module | |
52 | */ | |
53 | struct drv_ext { | |
54 | struct list_head link; | |
55 | char sz_string[MAXREGPATHLENGTH]; | |
56 | }; | |
57 | ||
58 | /* ----------------------------------- Globals */ | |
59 | static s32 refs; | |
60 | static bool ext_phys_mem_pool_enabled; | |
61 | struct ext_phys_mem_pool { | |
62 | u32 phys_mem_base; | |
63 | u32 phys_mem_size; | |
64 | u32 virt_mem_base; | |
65 | u32 next_phys_alloc_ptr; | |
66 | }; | |
67 | static struct ext_phys_mem_pool ext_mem_pool; | |
68 | ||
69 | /* ----------------------------------- Function Prototypes */ | |
70 | static int request_bridge_resources(struct cfg_hostres *res); | |
71 | ||
72 | ||
73 | /* GPP PROCESS CLEANUP CODE */ | |
74 | ||
0624f52f | 75 | static int drv_proc_free_node_res(int id, void *p, void *data); |
7d55524d ORL |
76 | |
77 | /* Allocate and add a node resource element | |
78 | * This function is called from .Node_Allocate. */ | |
e6890692 RS |
79 | int drv_insert_node_res_element(void *hnode, void *node_resource, |
80 | void *process_ctxt) | |
7d55524d ORL |
81 | { |
82 | struct node_res_object **node_res_obj = | |
e6890692 RS |
83 | (struct node_res_object **)node_resource; |
84 | struct process_context *ctxt = (struct process_context *)process_ctxt; | |
7d55524d | 85 | int status = 0; |
0624f52f | 86 | int retval; |
7d55524d ORL |
87 | |
88 | *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL); | |
0624f52f ER |
89 | if (!*node_res_obj) { |
90 | status = -ENOMEM; | |
91 | goto func_end; | |
92 | } | |
7d55524d | 93 | |
0624f52f ER |
94 | (*node_res_obj)->hnode = hnode; |
95 | retval = idr_get_new(ctxt->node_id, *node_res_obj, | |
96 | &(*node_res_obj)->id); | |
97 | if (retval == -EAGAIN) { | |
98 | if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) { | |
99 | pr_err("%s: OUT OF MEMORY\n", __func__); | |
100 | status = -ENOMEM; | |
101 | goto func_end; | |
7d55524d | 102 | } |
7d55524d | 103 | |
0624f52f ER |
104 | retval = idr_get_new(ctxt->node_id, *node_res_obj, |
105 | &(*node_res_obj)->id); | |
106 | } | |
107 | if (retval) { | |
108 | pr_err("%s: FAILED, IDR is FULL\n", __func__); | |
109 | status = -EFAULT; | |
7d55524d | 110 | } |
0624f52f ER |
111 | func_end: |
112 | if (status) | |
113 | kfree(*node_res_obj); | |
7d55524d ORL |
114 | |
115 | return status; | |
116 | } | |
117 | ||
118 | /* Release all Node resources and its context | |
0624f52f ER |
119 | * Actual Node De-Allocation */ |
120 | static int drv_proc_free_node_res(int id, void *p, void *data) | |
7d55524d | 121 | { |
0624f52f ER |
122 | struct process_context *ctxt = data; |
123 | int status; | |
124 | struct node_res_object *node_res_obj = p; | |
7d55524d ORL |
125 | u32 node_state; |
126 | ||
0624f52f ER |
127 | if (node_res_obj->node_allocated) { |
128 | node_state = node_get_state(node_res_obj->hnode); | |
129 | if (node_state <= NODE_DELETING) { | |
130 | if ((node_state == NODE_RUNNING) || | |
131 | (node_state == NODE_PAUSED) || | |
132 | (node_state == NODE_TERMINATING)) | |
133 | node_terminate | |
134 | (node_res_obj->hnode, &status); | |
7d55524d | 135 | |
0624f52f | 136 | node_delete(node_res_obj, ctxt); |
7d55524d ORL |
137 | } |
138 | } | |
0624f52f ER |
139 | |
140 | return 0; | |
7d55524d ORL |
141 | } |
142 | ||
143 | /* Release all Mapped and Reserved DMM resources */ | |
e6890692 | 144 | int drv_remove_all_dmm_res_elements(void *process_ctxt) |
7d55524d | 145 | { |
e6890692 | 146 | struct process_context *ctxt = (struct process_context *)process_ctxt; |
7d55524d ORL |
147 | int status = 0; |
148 | struct dmm_map_object *temp_map, *map_obj; | |
a2890350 | 149 | struct dmm_rsv_object *temp_rsv, *rsv_obj; |
7d55524d ORL |
150 | |
151 | /* Free DMM mapped memory resources */ | |
152 | list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) { | |
153 | status = proc_un_map(ctxt->hprocessor, | |
154 | (void *)map_obj->dsp_addr, ctxt); | |
b66e0986 | 155 | if (status) |
7d55524d ORL |
156 | pr_err("%s: proc_un_map failed!" |
157 | " status = 0x%xn", __func__, status); | |
158 | } | |
a2890350 FC |
159 | |
160 | /* Free DMM reserved memory resources */ | |
161 | list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) { | |
162 | status = proc_un_reserve_memory(ctxt->hprocessor, (void *) | |
163 | rsv_obj->dsp_reserved_addr, | |
164 | ctxt); | |
165 | if (status) | |
166 | pr_err("%s: proc_un_reserve_memory failed!" | |
167 | " status = 0x%xn", __func__, status); | |
168 | } | |
7d55524d ORL |
169 | return status; |
170 | } | |
171 | ||
172 | /* Update Node allocation status */ | |
e6890692 | 173 | void drv_proc_node_update_status(void *node_resource, s32 status) |
7d55524d ORL |
174 | { |
175 | struct node_res_object *node_res_obj = | |
e6890692 RS |
176 | (struct node_res_object *)node_resource; |
177 | DBC_ASSERT(node_resource != NULL); | |
7d55524d ORL |
178 | node_res_obj->node_allocated = status; |
179 | } | |
180 | ||
181 | /* Update Node Heap status */ | |
e6890692 | 182 | void drv_proc_node_update_heap_status(void *node_resource, s32 status) |
7d55524d ORL |
183 | { |
184 | struct node_res_object *node_res_obj = | |
e6890692 RS |
185 | (struct node_res_object *)node_resource; |
186 | DBC_ASSERT(node_resource != NULL); | |
7d55524d ORL |
187 | node_res_obj->heap_allocated = status; |
188 | } | |
189 | ||
190 | /* Release all Node resources and its context | |
191 | * This is called from .bridge_release. | |
192 | */ | |
e6890692 | 193 | int drv_remove_all_node_res_elements(void *process_ctxt) |
7d55524d | 194 | { |
0624f52f | 195 | struct process_context *ctxt = process_ctxt; |
7d55524d | 196 | |
0624f52f ER |
197 | idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt); |
198 | idr_destroy(ctxt->node_id); | |
7d55524d | 199 | |
0624f52f | 200 | return 0; |
7d55524d ORL |
201 | } |
202 | ||
203 | /* Allocate the STRM resource element | |
204 | * This is called after the actual resource is allocated | |
205 | */ | |
c8c1ad8c RS |
206 | int drv_proc_insert_strm_res_element(void *stream_obj, |
207 | void *strm_res, void *process_ctxt) | |
7d55524d ORL |
208 | { |
209 | struct strm_res_object **pstrm_res = | |
c8c1ad8c | 210 | (struct strm_res_object **)strm_res; |
e6890692 | 211 | struct process_context *ctxt = (struct process_context *)process_ctxt; |
7d55524d | 212 | int status = 0; |
4ec09714 | 213 | int retval; |
7d55524d ORL |
214 | |
215 | *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL); | |
4ec09714 | 216 | if (*pstrm_res == NULL) { |
7d55524d | 217 | status = -EFAULT; |
4ec09714 | 218 | goto func_end; |
7d55524d | 219 | } |
7d55524d | 220 | |
4ec09714 ER |
221 | (*pstrm_res)->hstream = stream_obj; |
222 | retval = idr_get_new(ctxt->stream_id, *pstrm_res, | |
223 | &(*pstrm_res)->id); | |
224 | if (retval == -EAGAIN) { | |
225 | if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) { | |
226 | pr_err("%s: OUT OF MEMORY\n", __func__); | |
227 | status = -ENOMEM; | |
228 | goto func_end; | |
229 | } | |
7d55524d | 230 | |
4ec09714 ER |
231 | retval = idr_get_new(ctxt->stream_id, *pstrm_res, |
232 | &(*pstrm_res)->id); | |
7d55524d | 233 | } |
4ec09714 ER |
234 | if (retval) { |
235 | pr_err("%s: FAILED, IDR is FULL\n", __func__); | |
236 | status = -EPERM; | |
237 | } | |
238 | ||
239 | func_end: | |
7d55524d ORL |
240 | return status; |
241 | } | |
242 | ||
4ec09714 | 243 | static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt) |
7d55524d | 244 | { |
4ec09714 ER |
245 | struct process_context *ctxt = process_ctxt; |
246 | struct strm_res_object *strm_res = p; | |
7d55524d ORL |
247 | struct stream_info strm_info; |
248 | struct dsp_streaminfo user; | |
249 | u8 **ap_buffer = NULL; | |
250 | u8 *buf_ptr; | |
251 | u32 ul_bytes; | |
252 | u32 dw_arg; | |
253 | s32 ul_buf_size; | |
254 | ||
4ec09714 ER |
255 | if (strm_res->num_bufs) { |
256 | ap_buffer = kmalloc((strm_res->num_bufs * | |
257 | sizeof(u8 *)), GFP_KERNEL); | |
258 | if (ap_buffer) { | |
259 | strm_free_buffer(strm_res, | |
260 | ap_buffer, | |
261 | strm_res->num_bufs, | |
262 | ctxt); | |
263 | kfree(ap_buffer); | |
7d55524d | 264 | } |
7d55524d | 265 | } |
4ec09714 ER |
266 | strm_info.user_strm = &user; |
267 | user.number_bufs_in_stream = 0; | |
268 | strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info)); | |
269 | while (user.number_bufs_in_stream--) | |
270 | strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes, | |
271 | (u32 *) &ul_buf_size, &dw_arg); | |
272 | strm_close(strm_res, ctxt); | |
273 | return 0; | |
7d55524d ORL |
274 | } |
275 | ||
4ec09714 ER |
276 | /* Release all Stream resources and its context |
277 | * This is called from .bridge_release. | |
278 | */ | |
279 | int drv_remove_all_strm_res_elements(void *process_ctxt) | |
7d55524d | 280 | { |
4ec09714 | 281 | struct process_context *ctxt = process_ctxt; |
7d55524d | 282 | |
4ec09714 ER |
283 | idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt); |
284 | idr_destroy(ctxt->stream_id); | |
7d55524d | 285 | |
4ec09714 | 286 | return 0; |
7d55524d ORL |
287 | } |
288 | ||
289 | /* Updating the stream resource element */ | |
c8c1ad8c | 290 | int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources) |
7d55524d ORL |
291 | { |
292 | int status = 0; | |
293 | struct strm_res_object **strm_res = | |
c8c1ad8c | 294 | (struct strm_res_object **)strm_resources; |
7d55524d ORL |
295 | |
296 | (*strm_res)->num_bufs = num_bufs; | |
297 | return status; | |
298 | } | |
299 | ||
300 | /* GPP PROCESS CLEANUP CODE END */ | |
301 | ||
302 | /* | |
303 | * ======== = drv_create ======== = | |
304 | * Purpose: | |
305 | * DRV Object gets created only once during Driver Loading. | |
306 | */ | |
e6bf74f0 | 307 | int drv_create(struct drv_object **drv_obj) |
7d55524d ORL |
308 | { |
309 | int status = 0; | |
310 | struct drv_object *pdrv_object = NULL; | |
b87561f7 | 311 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d | 312 | |
e436d07d | 313 | DBC_REQUIRE(drv_obj != NULL); |
7d55524d ORL |
314 | DBC_REQUIRE(refs > 0); |
315 | ||
316 | pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL); | |
317 | if (pdrv_object) { | |
318 | /* Create and Initialize List of device objects */ | |
319 | pdrv_object->dev_list = kzalloc(sizeof(struct lst_list), | |
320 | GFP_KERNEL); | |
321 | if (pdrv_object->dev_list) { | |
322 | /* Create and Initialize List of device Extension */ | |
323 | pdrv_object->dev_node_string = | |
324 | kzalloc(sizeof(struct lst_list), GFP_KERNEL); | |
325 | if (!(pdrv_object->dev_node_string)) { | |
326 | status = -EPERM; | |
327 | } else { | |
328 | INIT_LIST_HEAD(&pdrv_object-> | |
329 | dev_node_string->head); | |
330 | INIT_LIST_HEAD(&pdrv_object->dev_list->head); | |
331 | } | |
332 | } else { | |
333 | status = -ENOMEM; | |
334 | } | |
335 | } else { | |
336 | status = -ENOMEM; | |
337 | } | |
b87561f7 IGC |
338 | /* Store the DRV Object in the driver data */ |
339 | if (!status) { | |
340 | if (drv_datap) { | |
341 | drv_datap->drv_object = (void *)pdrv_object; | |
342 | } else { | |
343 | status = -EPERM; | |
344 | pr_err("%s: Failed to store DRV object\n", __func__); | |
345 | } | |
346 | } | |
347 | ||
a741ea6e | 348 | if (!status) { |
e436d07d | 349 | *drv_obj = pdrv_object; |
7d55524d ORL |
350 | } else { |
351 | kfree(pdrv_object->dev_list); | |
352 | kfree(pdrv_object->dev_node_string); | |
353 | /* Free the DRV Object */ | |
354 | kfree(pdrv_object); | |
355 | } | |
356 | ||
b66e0986 | 357 | DBC_ENSURE(status || pdrv_object); |
7d55524d ORL |
358 | return status; |
359 | } | |
360 | ||
361 | /* | |
362 | * ======== drv_exit ======== | |
363 | * Purpose: | |
364 | * Discontinue usage of the DRV module. | |
365 | */ | |
366 | void drv_exit(void) | |
367 | { | |
368 | DBC_REQUIRE(refs > 0); | |
369 | ||
370 | refs--; | |
371 | ||
372 | DBC_ENSURE(refs >= 0); | |
373 | } | |
374 | ||
375 | /* | |
376 | * ======== = drv_destroy ======== = | |
377 | * purpose: | |
378 | * Invoked during bridge de-initialization | |
379 | */ | |
e6890692 | 380 | int drv_destroy(struct drv_object *driver_obj) |
7d55524d ORL |
381 | { |
382 | int status = 0; | |
e6890692 | 383 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; |
b87561f7 | 384 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d ORL |
385 | |
386 | DBC_REQUIRE(refs > 0); | |
387 | DBC_REQUIRE(pdrv_object); | |
388 | ||
389 | /* | |
390 | * Delete the List if it exists.Should not come here | |
391 | * as the drv_remove_dev_object and the Last drv_request_resources | |
392 | * removes the list if the lists are empty. | |
393 | */ | |
394 | kfree(pdrv_object->dev_list); | |
395 | kfree(pdrv_object->dev_node_string); | |
396 | kfree(pdrv_object); | |
b87561f7 IGC |
397 | /* Update the DRV Object in the driver data */ |
398 | if (drv_datap) { | |
399 | drv_datap->drv_object = NULL; | |
400 | } else { | |
401 | status = -EPERM; | |
402 | pr_err("%s: Failed to store DRV object\n", __func__); | |
403 | } | |
7d55524d ORL |
404 | |
405 | return status; | |
406 | } | |
407 | ||
408 | /* | |
409 | * ======== drv_get_dev_object ======== | |
410 | * Purpose: | |
411 | * Given a index, returns a handle to DevObject from the list. | |
412 | */ | |
413 | int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj, | |
e436d07d | 414 | struct dev_object **device_obj) |
7d55524d ORL |
415 | { |
416 | int status = 0; | |
b3d23688 | 417 | #ifdef CONFIG_TIDSPBRIDGE_DEBUG |
7d55524d ORL |
418 | /* used only for Assertions and debug messages */ |
419 | struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj; | |
420 | #endif | |
421 | struct dev_object *dev_obj; | |
422 | u32 i; | |
423 | DBC_REQUIRE(pdrv_obj); | |
e436d07d | 424 | DBC_REQUIRE(device_obj != NULL); |
7d55524d ORL |
425 | DBC_REQUIRE(index >= 0); |
426 | DBC_REQUIRE(refs > 0); | |
427 | DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list))); | |
428 | ||
429 | dev_obj = (struct dev_object *)drv_get_first_dev_object(); | |
430 | for (i = 0; i < index; i++) { | |
431 | dev_obj = | |
432 | (struct dev_object *)drv_get_next_dev_object((u32) dev_obj); | |
433 | } | |
434 | if (dev_obj) { | |
e436d07d | 435 | *device_obj = (struct dev_object *)dev_obj; |
7d55524d | 436 | } else { |
e436d07d | 437 | *device_obj = NULL; |
7d55524d ORL |
438 | status = -EPERM; |
439 | } | |
440 | ||
441 | return status; | |
442 | } | |
443 | ||
444 | /* | |
445 | * ======== drv_get_first_dev_object ======== | |
446 | * Purpose: | |
447 | * Retrieve the first Device Object handle from an internal linked list of | |
448 | * of DEV_OBJECTs maintained by DRV. | |
449 | */ | |
450 | u32 drv_get_first_dev_object(void) | |
451 | { | |
452 | u32 dw_dev_object = 0; | |
453 | struct drv_object *pdrv_obj; | |
73b87a91 | 454 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d | 455 | |
73b87a91 IGC |
456 | if (drv_datap && drv_datap->drv_object) { |
457 | pdrv_obj = drv_datap->drv_object; | |
7d55524d ORL |
458 | if ((pdrv_obj->dev_list != NULL) && |
459 | !LST_IS_EMPTY(pdrv_obj->dev_list)) | |
460 | dw_dev_object = (u32) lst_first(pdrv_obj->dev_list); | |
73b87a91 IGC |
461 | } else { |
462 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | |
7d55524d ORL |
463 | } |
464 | ||
465 | return dw_dev_object; | |
466 | } | |
467 | ||
468 | /* | |
469 | * ======== DRV_GetFirstDevNodeString ======== | |
470 | * Purpose: | |
471 | * Retrieve the first Device Extension from an internal linked list of | |
472 | * of Pointer to dev_node Strings maintained by DRV. | |
473 | */ | |
474 | u32 drv_get_first_dev_extension(void) | |
475 | { | |
476 | u32 dw_dev_extension = 0; | |
477 | struct drv_object *pdrv_obj; | |
73b87a91 | 478 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d | 479 | |
73b87a91 IGC |
480 | if (drv_datap && drv_datap->drv_object) { |
481 | pdrv_obj = drv_datap->drv_object; | |
7d55524d ORL |
482 | if ((pdrv_obj->dev_node_string != NULL) && |
483 | !LST_IS_EMPTY(pdrv_obj->dev_node_string)) { | |
484 | dw_dev_extension = | |
485 | (u32) lst_first(pdrv_obj->dev_node_string); | |
486 | } | |
73b87a91 IGC |
487 | } else { |
488 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | |
7d55524d ORL |
489 | } |
490 | ||
491 | return dw_dev_extension; | |
492 | } | |
493 | ||
494 | /* | |
495 | * ======== drv_get_next_dev_object ======== | |
496 | * Purpose: | |
497 | * Retrieve the next Device Object handle from an internal linked list of | |
498 | * of DEV_OBJECTs maintained by DRV, after having previously called | |
499 | * drv_get_first_dev_object() and zero or more DRV_GetNext. | |
500 | */ | |
501 | u32 drv_get_next_dev_object(u32 hdev_obj) | |
502 | { | |
503 | u32 dw_next_dev_object = 0; | |
504 | struct drv_object *pdrv_obj; | |
73b87a91 | 505 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d ORL |
506 | |
507 | DBC_REQUIRE(hdev_obj != 0); | |
508 | ||
73b87a91 IGC |
509 | if (drv_datap && drv_datap->drv_object) { |
510 | pdrv_obj = drv_datap->drv_object; | |
7d55524d ORL |
511 | if ((pdrv_obj->dev_list != NULL) && |
512 | !LST_IS_EMPTY(pdrv_obj->dev_list)) { | |
513 | dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list, | |
514 | (struct list_head *) | |
515 | hdev_obj); | |
516 | } | |
73b87a91 IGC |
517 | } else { |
518 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | |
7d55524d | 519 | } |
73b87a91 | 520 | |
7d55524d ORL |
521 | return dw_next_dev_object; |
522 | } | |
523 | ||
524 | /* | |
525 | * ======== drv_get_next_dev_extension ======== | |
526 | * Purpose: | |
527 | * Retrieve the next Device Extension from an internal linked list of | |
528 | * of pointer to DevNodeString maintained by DRV, after having previously | |
529 | * called drv_get_first_dev_extension() and zero or more | |
530 | * drv_get_next_dev_extension(). | |
531 | */ | |
e6890692 | 532 | u32 drv_get_next_dev_extension(u32 dev_extension) |
7d55524d ORL |
533 | { |
534 | u32 dw_dev_extension = 0; | |
535 | struct drv_object *pdrv_obj; | |
73b87a91 | 536 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d | 537 | |
e6890692 | 538 | DBC_REQUIRE(dev_extension != 0); |
7d55524d | 539 | |
73b87a91 IGC |
540 | if (drv_datap && drv_datap->drv_object) { |
541 | pdrv_obj = drv_datap->drv_object; | |
7d55524d ORL |
542 | if ((pdrv_obj->dev_node_string != NULL) && |
543 | !LST_IS_EMPTY(pdrv_obj->dev_node_string)) { | |
544 | dw_dev_extension = | |
545 | (u32) lst_next(pdrv_obj->dev_node_string, | |
e6890692 | 546 | (struct list_head *)dev_extension); |
7d55524d | 547 | } |
73b87a91 IGC |
548 | } else { |
549 | pr_err("%s: Failed to retrieve the object handle\n", __func__); | |
7d55524d ORL |
550 | } |
551 | ||
552 | return dw_dev_extension; | |
553 | } | |
554 | ||
555 | /* | |
556 | * ======== drv_init ======== | |
557 | * Purpose: | |
558 | * Initialize DRV module private state. | |
559 | */ | |
560 | int drv_init(void) | |
561 | { | |
562 | s32 ret = 1; /* function return value */ | |
563 | ||
564 | DBC_REQUIRE(refs >= 0); | |
565 | ||
566 | if (ret) | |
567 | refs++; | |
568 | ||
569 | DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0))); | |
570 | ||
571 | return ret; | |
572 | } | |
573 | ||
574 | /* | |
575 | * ======== drv_insert_dev_object ======== | |
576 | * Purpose: | |
577 | * Insert a DevObject into the list of Manager object. | |
578 | */ | |
e6890692 | 579 | int drv_insert_dev_object(struct drv_object *driver_obj, |
7d55524d ORL |
580 | struct dev_object *hdev_obj) |
581 | { | |
e6890692 | 582 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; |
7d55524d ORL |
583 | |
584 | DBC_REQUIRE(refs > 0); | |
585 | DBC_REQUIRE(hdev_obj != NULL); | |
586 | DBC_REQUIRE(pdrv_object); | |
587 | DBC_ASSERT(pdrv_object->dev_list); | |
588 | ||
589 | lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj); | |
590 | ||
a741ea6e | 591 | DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list)); |
7d55524d | 592 | |
a741ea6e | 593 | return 0; |
7d55524d ORL |
594 | } |
595 | ||
596 | /* | |
597 | * ======== drv_remove_dev_object ======== | |
598 | * Purpose: | |
599 | * Search for and remove a DeviceObject from the given list of DRV | |
600 | * objects. | |
601 | */ | |
e6890692 | 602 | int drv_remove_dev_object(struct drv_object *driver_obj, |
7d55524d ORL |
603 | struct dev_object *hdev_obj) |
604 | { | |
605 | int status = -EPERM; | |
e6890692 | 606 | struct drv_object *pdrv_object = (struct drv_object *)driver_obj; |
7d55524d ORL |
607 | struct list_head *cur_elem; |
608 | ||
609 | DBC_REQUIRE(refs > 0); | |
610 | DBC_REQUIRE(pdrv_object); | |
611 | DBC_REQUIRE(hdev_obj != NULL); | |
612 | ||
613 | DBC_REQUIRE(pdrv_object->dev_list != NULL); | |
614 | DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list)); | |
615 | ||
616 | /* Search list for p_proc_object: */ | |
617 | for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL; | |
618 | cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) { | |
619 | /* If found, remove it. */ | |
620 | if ((struct dev_object *)cur_elem == hdev_obj) { | |
621 | lst_remove_elem(pdrv_object->dev_list, cur_elem); | |
622 | status = 0; | |
623 | break; | |
624 | } | |
625 | } | |
626 | /* Remove list if empty. */ | |
627 | if (LST_IS_EMPTY(pdrv_object->dev_list)) { | |
628 | kfree(pdrv_object->dev_list); | |
629 | pdrv_object->dev_list = NULL; | |
630 | } | |
631 | DBC_ENSURE((pdrv_object->dev_list == NULL) || | |
632 | !LST_IS_EMPTY(pdrv_object->dev_list)); | |
633 | ||
634 | return status; | |
635 | } | |
636 | ||
637 | /* | |
638 | * ======== drv_request_resources ======== | |
639 | * Purpose: | |
640 | * Requests resources from the OS. | |
641 | */ | |
aa09b091 | 642 | int drv_request_resources(u32 dw_context, u32 *dev_node_strg) |
7d55524d ORL |
643 | { |
644 | int status = 0; | |
645 | struct drv_object *pdrv_object; | |
646 | struct drv_ext *pszdev_node; | |
73b87a91 | 647 | struct drv_data *drv_datap = dev_get_drvdata(bridge); |
7d55524d ORL |
648 | |
649 | DBC_REQUIRE(dw_context != 0); | |
aa09b091 | 650 | DBC_REQUIRE(dev_node_strg != NULL); |
7d55524d ORL |
651 | |
652 | /* | |
653 | * Allocate memory to hold the string. This will live untill | |
654 | * it is freed in the Release resources. Update the driver object | |
655 | * list. | |
656 | */ | |
657 | ||
73b87a91 IGC |
658 | if (!drv_datap || !drv_datap->drv_object) |
659 | status = -ENODATA; | |
660 | else | |
661 | pdrv_object = drv_datap->drv_object; | |
662 | ||
a741ea6e | 663 | if (!status) { |
7d55524d ORL |
664 | pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL); |
665 | if (pszdev_node) { | |
666 | lst_init_elem(&pszdev_node->link); | |
667 | strncpy(pszdev_node->sz_string, | |
668 | (char *)dw_context, MAXREGPATHLENGTH - 1); | |
669 | pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0'; | |
670 | /* Update the Driver Object List */ | |
aa09b091 | 671 | *dev_node_strg = (u32) pszdev_node->sz_string; |
7d55524d ORL |
672 | lst_put_tail(pdrv_object->dev_node_string, |
673 | (struct list_head *)pszdev_node); | |
674 | } else { | |
675 | status = -ENOMEM; | |
aa09b091 | 676 | *dev_node_strg = 0; |
7d55524d ORL |
677 | } |
678 | } else { | |
679 | dev_dbg(bridge, "%s: Failed to get Driver Object from Registry", | |
680 | __func__); | |
aa09b091 | 681 | *dev_node_strg = 0; |
7d55524d ORL |
682 | } |
683 | ||
a741ea6e | 684 | DBC_ENSURE((!status && dev_node_strg != NULL && |
7d55524d | 685 | !LST_IS_EMPTY(pdrv_object->dev_node_string)) || |
b66e0986 | 686 | (status && *dev_node_strg == 0)); |
7d55524d ORL |
687 | |
688 | return status; | |
689 | } | |
690 | ||
691 | /* | |
692 | * ======== drv_release_resources ======== | |
693 | * Purpose: | |
694 | * Releases resources from the OS. | |
695 | */ | |
696 | int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj) | |
697 | { | |
698 | int status = 0; | |
699 | struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj; | |
700 | struct drv_ext *pszdev_node; | |
701 | ||
702 | /* | |
703 | * Irrespective of the status go ahead and clean it | |
704 | * The following will over write the status. | |
705 | */ | |
706 | for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension(); | |
707 | pszdev_node != NULL; pszdev_node = (struct drv_ext *) | |
708 | drv_get_next_dev_extension((u32) pszdev_node)) { | |
709 | if (!pdrv_object->dev_node_string) { | |
710 | /* When this could happen? */ | |
711 | continue; | |
712 | } | |
713 | if ((u32) pszdev_node == dw_context) { | |
714 | /* Found it */ | |
715 | /* Delete from the Driver object list */ | |
716 | lst_remove_elem(pdrv_object->dev_node_string, | |
717 | (struct list_head *)pszdev_node); | |
718 | kfree((void *)pszdev_node); | |
719 | break; | |
720 | } | |
721 | /* Delete the List if it is empty */ | |
722 | if (LST_IS_EMPTY(pdrv_object->dev_node_string)) { | |
723 | kfree(pdrv_object->dev_node_string); | |
724 | pdrv_object->dev_node_string = NULL; | |
725 | } | |
726 | } | |
727 | return status; | |
728 | } | |
729 | ||
730 | /* | |
731 | * ======== request_bridge_resources ======== | |
732 | * Purpose: | |
733 | * Reserves shared memory for bridge. | |
734 | */ | |
735 | static int request_bridge_resources(struct cfg_hostres *res) | |
736 | { | |
7d55524d ORL |
737 | struct cfg_hostres *host_res = res; |
738 | ||
739 | /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ | |
740 | host_res->num_mem_windows = 2; | |
741 | ||
742 | /* First window is for DSP internal memory */ | |
743 | host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE); | |
744 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]); | |
745 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]); | |
9d4f81a7 | 746 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); |
7d55524d ORL |
747 | |
748 | /* for 24xx base port is not mapping the mamory for DSP | |
749 | * internal memory TODO Do a ioremap here */ | |
750 | /* Second window is for DSP external memory shared with MPU */ | |
751 | ||
752 | /* These are hard-coded values */ | |
753 | host_res->birq_registers = 0; | |
754 | host_res->birq_attrib = 0; | |
755 | host_res->dw_offset_for_monitor = 0; | |
756 | host_res->dw_chnl_offset = 0; | |
757 | /* CHNL_MAXCHANNELS */ | |
758 | host_res->dw_num_chnls = CHNL_MAXCHANNELS; | |
759 | host_res->dw_chnl_buf_size = 0x400; | |
760 | ||
a741ea6e | 761 | return 0; |
7d55524d ORL |
762 | } |
763 | ||
764 | /* | |
765 | * ======== drv_request_bridge_res_dsp ======== | |
766 | * Purpose: | |
767 | * Reserves shared memory for bridge. | |
768 | */ | |
769 | int drv_request_bridge_res_dsp(void **phost_resources) | |
770 | { | |
771 | int status = 0; | |
772 | struct cfg_hostres *host_res; | |
773 | u32 dw_buff_size; | |
774 | u32 dma_addr; | |
775 | u32 shm_size; | |
776 | struct drv_data *drv_datap = dev_get_drvdata(bridge); | |
777 | ||
778 | dw_buff_size = sizeof(struct cfg_hostres); | |
779 | ||
780 | host_res = kzalloc(dw_buff_size, GFP_KERNEL); | |
781 | ||
782 | if (host_res != NULL) { | |
783 | request_bridge_resources(host_res); | |
784 | /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */ | |
785 | host_res->num_mem_windows = 4; | |
786 | ||
787 | host_res->dw_mem_base[0] = 0; | |
788 | host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE, | |
789 | OMAP_DSP_MEM1_SIZE); | |
790 | host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE, | |
791 | OMAP_DSP_MEM2_SIZE); | |
792 | host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE, | |
793 | OMAP_DSP_MEM3_SIZE); | |
794 | host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE, | |
795 | OMAP_PER_CM_SIZE); | |
796 | host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE, | |
797 | OMAP_PER_PRM_SIZE); | |
798 | host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE, | |
799 | OMAP_CORE_PRM_SIZE); | |
9d4f81a7 FC |
800 | host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE, |
801 | OMAP_DMMU_SIZE); | |
7d55524d ORL |
802 | |
803 | dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", | |
804 | host_res->dw_mem_base[0]); | |
805 | dev_dbg(bridge, "dw_mem_base[1] 0x%x\n", | |
806 | host_res->dw_mem_base[1]); | |
807 | dev_dbg(bridge, "dw_mem_base[2] 0x%x\n", | |
808 | host_res->dw_mem_base[2]); | |
809 | dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", | |
810 | host_res->dw_mem_base[3]); | |
811 | dev_dbg(bridge, "dw_mem_base[4] 0x%x\n", | |
812 | host_res->dw_mem_base[4]); | |
9d4f81a7 | 813 | dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base); |
7d55524d ORL |
814 | |
815 | shm_size = drv_datap->shm_size; | |
816 | if (shm_size >= 0x10000) { | |
817 | /* Allocate Physically contiguous, | |
818 | * non-cacheable memory */ | |
819 | host_res->dw_mem_base[1] = | |
820 | (u32) mem_alloc_phys_mem(shm_size, 0x100000, | |
821 | &dma_addr); | |
822 | if (host_res->dw_mem_base[1] == 0) { | |
823 | status = -ENOMEM; | |
824 | pr_err("shm reservation Failed\n"); | |
825 | } else { | |
826 | host_res->dw_mem_length[1] = shm_size; | |
827 | host_res->dw_mem_phys[1] = dma_addr; | |
828 | ||
829 | dev_dbg(bridge, "%s: Bridge shm address 0x%x " | |
830 | "dma_addr %x size %x\n", __func__, | |
831 | host_res->dw_mem_base[1], | |
832 | dma_addr, shm_size); | |
833 | } | |
834 | } | |
a741ea6e | 835 | if (!status) { |
7d55524d ORL |
836 | /* These are hard-coded values */ |
837 | host_res->birq_registers = 0; | |
838 | host_res->birq_attrib = 0; | |
839 | host_res->dw_offset_for_monitor = 0; | |
840 | host_res->dw_chnl_offset = 0; | |
841 | /* CHNL_MAXCHANNELS */ | |
842 | host_res->dw_num_chnls = CHNL_MAXCHANNELS; | |
843 | host_res->dw_chnl_buf_size = 0x400; | |
844 | dw_buff_size = sizeof(struct cfg_hostres); | |
845 | } | |
846 | *phost_resources = host_res; | |
847 | } | |
848 | /* End Mem alloc */ | |
849 | return status; | |
850 | } | |
851 | ||
fb6aabb7 | 852 | void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size) |
7d55524d ORL |
853 | { |
854 | u32 pool_virt_base; | |
855 | ||
856 | /* get the virtual address for the physical memory pool passed */ | |
fb6aabb7 | 857 | pool_virt_base = (u32) ioremap(pool_phys_base, pool_size); |
7d55524d ORL |
858 | |
859 | if ((void **)pool_virt_base == NULL) { | |
860 | pr_err("%s: external physical memory map failed\n", __func__); | |
861 | ext_phys_mem_pool_enabled = false; | |
862 | } else { | |
fb6aabb7 RS |
863 | ext_mem_pool.phys_mem_base = pool_phys_base; |
864 | ext_mem_pool.phys_mem_size = pool_size; | |
7d55524d | 865 | ext_mem_pool.virt_mem_base = pool_virt_base; |
fb6aabb7 | 866 | ext_mem_pool.next_phys_alloc_ptr = pool_phys_base; |
7d55524d ORL |
867 | ext_phys_mem_pool_enabled = true; |
868 | } | |
869 | } | |
870 | ||
871 | void mem_ext_phys_pool_release(void) | |
872 | { | |
873 | if (ext_phys_mem_pool_enabled) { | |
874 | iounmap((void *)(ext_mem_pool.virt_mem_base)); | |
875 | ext_phys_mem_pool_enabled = false; | |
876 | } | |
877 | } | |
878 | ||
879 | /* | |
880 | * ======== mem_ext_phys_mem_alloc ======== | |
881 | * Purpose: | |
882 | * Allocate physically contiguous, uncached memory from external memory pool | |
883 | */ | |
884 | ||
e6bf74f0 | 885 | static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr) |
7d55524d ORL |
886 | { |
887 | u32 new_alloc_ptr; | |
888 | u32 offset; | |
889 | u32 virt_addr; | |
890 | ||
891 | if (align == 0) | |
892 | align = 1; | |
893 | ||
894 | if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size) | |
895 | - ext_mem_pool.next_phys_alloc_ptr)) { | |
13b18c29 | 896 | phys_addr = NULL; |
7d55524d ORL |
897 | return NULL; |
898 | } else { | |
899 | offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1)); | |
900 | if (offset == 0) | |
901 | new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr; | |
902 | else | |
903 | new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) + | |
904 | (align - offset); | |
905 | if ((new_alloc_ptr + bytes) <= | |
906 | (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) { | |
907 | /* we can allocate */ | |
13b18c29 | 908 | *phys_addr = new_alloc_ptr; |
7d55524d ORL |
909 | ext_mem_pool.next_phys_alloc_ptr = |
910 | new_alloc_ptr + bytes; | |
911 | virt_addr = | |
912 | ext_mem_pool.virt_mem_base + (new_alloc_ptr - | |
913 | ext_mem_pool. | |
914 | phys_mem_base); | |
915 | return (void *)virt_addr; | |
916 | } else { | |
13b18c29 | 917 | *phys_addr = 0; |
7d55524d ORL |
918 | return NULL; |
919 | } | |
920 | } | |
921 | } | |
922 | ||
923 | /* | |
924 | * ======== mem_alloc_phys_mem ======== | |
925 | * Purpose: | |
926 | * Allocate physically contiguous, uncached memory | |
927 | */ | |
0cd343a4 | 928 | void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask, |
e6bf74f0 | 929 | u32 *physical_address) |
7d55524d ORL |
930 | { |
931 | void *va_mem = NULL; | |
932 | dma_addr_t pa_mem; | |
933 | ||
934 | if (byte_size > 0) { | |
935 | if (ext_phys_mem_pool_enabled) { | |
0cd343a4 | 936 | va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask, |
7d55524d ORL |
937 | (u32 *) &pa_mem); |
938 | } else | |
939 | va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem, | |
940 | GFP_KERNEL); | |
941 | if (va_mem == NULL) | |
13b18c29 | 942 | *physical_address = 0; |
7d55524d | 943 | else |
13b18c29 | 944 | *physical_address = pa_mem; |
7d55524d ORL |
945 | } |
946 | return va_mem; | |
947 | } | |
948 | ||
949 | /* | |
950 | * ======== mem_free_phys_mem ======== | |
951 | * Purpose: | |
952 | * Free the given block of physically contiguous memory. | |
953 | */ | |
318b5df9 | 954 | void mem_free_phys_mem(void *virtual_address, u32 physical_address, |
7d55524d ORL |
955 | u32 byte_size) |
956 | { | |
318b5df9 | 957 | DBC_REQUIRE(virtual_address != NULL); |
7d55524d ORL |
958 | |
959 | if (!ext_phys_mem_pool_enabled) | |
318b5df9 | 960 | dma_free_coherent(NULL, byte_size, virtual_address, |
13b18c29 | 961 | physical_address); |
7d55524d | 962 | } |