]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - sound/pci/asihpi/hpimsgx.c
Merge branch 'docs-next' of git://git.lwn.net/linux-2.6
[mirror_ubuntu-hirsute-kernel.git] / sound / pci / asihpi / hpimsgx.c
1 /******************************************************************************
2
3 AudioScience HPI driver
4 Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as
8 published by the Free Software Foundation;
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18
19 Extended Message Function With Response Cacheing
20
21 (C) Copyright AudioScience Inc. 2002
22 *****************************************************************************/
23 #define SOURCEFILE_NAME "hpimsgx.c"
24 #include "hpi_internal.h"
25 #include "hpimsginit.h"
26 #include "hpimsgx.h"
27 #include "hpidebug.h"
28
29 static struct pci_device_id asihpi_pci_tbl[] = {
30 #include "hpipcida.h"
31 };
32
33 static struct hpios_spinlock msgx_lock;
34
35 static hpi_handler_func *hpi_entry_points[HPI_MAX_ADAPTERS];
36
37 static hpi_handler_func *hpi_lookup_entry_point_function(const struct hpi_pci
38 *pci_info)
39 {
40
41 int i;
42
43 for (i = 0; asihpi_pci_tbl[i].vendor != 0; i++) {
44 if (asihpi_pci_tbl[i].vendor != PCI_ANY_ID
45 && asihpi_pci_tbl[i].vendor != pci_info->vendor_id)
46 continue;
47 if (asihpi_pci_tbl[i].device != PCI_ANY_ID
48 && asihpi_pci_tbl[i].device != pci_info->device_id)
49 continue;
50 if (asihpi_pci_tbl[i].subvendor != PCI_ANY_ID
51 && asihpi_pci_tbl[i].subvendor !=
52 pci_info->subsys_vendor_id)
53 continue;
54 if (asihpi_pci_tbl[i].subdevice != PCI_ANY_ID
55 && asihpi_pci_tbl[i].subdevice !=
56 pci_info->subsys_device_id)
57 continue;
58
59 HPI_DEBUG_LOG(DEBUG, " %x,%lu\n", i,
60 asihpi_pci_tbl[i].driver_data);
61 return (hpi_handler_func *) asihpi_pci_tbl[i].driver_data;
62 }
63
64 return NULL;
65 }
66
67 static inline void hw_entry_point(struct hpi_message *phm,
68 struct hpi_response *phr)
69 {
70
71 hpi_handler_func *ep;
72
73 if (phm->adapter_index < HPI_MAX_ADAPTERS) {
74 ep = (hpi_handler_func *) hpi_entry_points[phm->
75 adapter_index];
76 if (ep) {
77 HPI_DEBUG_MESSAGE(DEBUG, phm);
78 ep(phm, phr);
79 HPI_DEBUG_RESPONSE(phr);
80 return;
81 }
82 }
83 hpi_init_response(phr, phm->object, phm->function,
84 HPI_ERROR_PROCESSING_MESSAGE);
85 }
86
87 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr);
88 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr);
89
90 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr);
91 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr);
92
93 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
94 void *h_owner);
95 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
96 void *h_owner);
97 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
98 void *h_owner);
99 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
100 void *h_owner);
101
102 static void HPIMSGX__reset(u16 adapter_index);
103 static u16 HPIMSGX__init(struct hpi_message *phm, struct hpi_response *phr);
104 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner);
105
106 #ifndef DISABLE_PRAGMA_PACK1
107 #pragma pack(push, 1)
108 #endif
109
110 struct hpi_subsys_response {
111 struct hpi_response_header h;
112 struct hpi_subsys_res s;
113 };
114
115 struct hpi_adapter_response {
116 struct hpi_response_header h;
117 struct hpi_adapter_res a;
118 };
119
120 struct hpi_mixer_response {
121 struct hpi_response_header h;
122 struct hpi_mixer_res m;
123 };
124
125 struct hpi_stream_response {
126 struct hpi_response_header h;
127 struct hpi_stream_res d;
128 };
129
130 struct adapter_info {
131 u16 type;
132 u16 num_instreams;
133 u16 num_outstreams;
134 };
135
136 struct asi_open_state {
137 int open_flag;
138 void *h_owner;
139 };
140
141 #ifndef DISABLE_PRAGMA_PACK1
142 #pragma pack(pop)
143 #endif
144
145 /* Globals */
146 static struct hpi_adapter_response rESP_HPI_ADAPTER_OPEN[HPI_MAX_ADAPTERS];
147
148 static struct hpi_stream_response
149 rESP_HPI_OSTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
150
151 static struct hpi_stream_response
152 rESP_HPI_ISTREAM_OPEN[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
153
154 static struct hpi_mixer_response rESP_HPI_MIXER_OPEN[HPI_MAX_ADAPTERS];
155
156 static struct hpi_subsys_response gRESP_HPI_SUBSYS_FIND_ADAPTERS;
157
158 static struct adapter_info aDAPTER_INFO[HPI_MAX_ADAPTERS];
159
160 /* use these to keep track of opens from user mode apps/DLLs */
161 static struct asi_open_state
162 outstream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
163
164 static struct asi_open_state
165 instream_user_open[HPI_MAX_ADAPTERS][HPI_MAX_STREAMS];
166
167 static void subsys_message(struct hpi_message *phm, struct hpi_response *phr,
168 void *h_owner)
169 {
170 switch (phm->function) {
171 case HPI_SUBSYS_GET_VERSION:
172 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
173 HPI_SUBSYS_GET_VERSION, 0);
174 phr->u.s.version = HPI_VER >> 8; /* return major.minor */
175 phr->u.s.data = HPI_VER; /* return major.minor.release */
176 break;
177 case HPI_SUBSYS_OPEN:
178 /*do not propagate the message down the chain */
179 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_OPEN, 0);
180 break;
181 case HPI_SUBSYS_CLOSE:
182 /*do not propagate the message down the chain */
183 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM, HPI_SUBSYS_CLOSE,
184 0);
185 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
186 break;
187 case HPI_SUBSYS_DRIVER_LOAD:
188 /* Initialize this module's internal state */
189 hpios_msgxlock_init(&msgx_lock);
190 memset(&hpi_entry_points, 0, sizeof(hpi_entry_points));
191 hpios_locked_mem_init();
192 /* Init subsys_findadapters response to no-adapters */
193 HPIMSGX__reset(HPIMSGX_ALLADAPTERS);
194 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
195 HPI_SUBSYS_DRIVER_LOAD, 0);
196 /* individual HPIs dont implement driver load */
197 HPI_COMMON(phm, phr);
198 break;
199 case HPI_SUBSYS_DRIVER_UNLOAD:
200 HPI_COMMON(phm, phr);
201 HPIMSGX__cleanup(HPIMSGX_ALLADAPTERS, h_owner);
202 hpios_locked_mem_free_all();
203 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
204 HPI_SUBSYS_DRIVER_UNLOAD, 0);
205 return;
206
207 case HPI_SUBSYS_GET_INFO:
208 HPI_COMMON(phm, phr);
209 break;
210
211 case HPI_SUBSYS_FIND_ADAPTERS:
212 memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
213 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
214 break;
215 case HPI_SUBSYS_GET_NUM_ADAPTERS:
216 memcpy(phr, &gRESP_HPI_SUBSYS_FIND_ADAPTERS,
217 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS));
218 phr->function = HPI_SUBSYS_GET_NUM_ADAPTERS;
219 break;
220 case HPI_SUBSYS_GET_ADAPTER:
221 {
222 int count = phm->adapter_index;
223 int index = 0;
224 hpi_init_response(phr, HPI_OBJ_SUBSYSTEM,
225 HPI_SUBSYS_GET_ADAPTER, 0);
226
227 /* This is complicated by the fact that we want to
228 * "skip" 0's in the adapter list.
229 * First, make sure we are pointing to a
230 * non-zero adapter type.
231 */
232 while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
233 s.aw_adapter_list[index] == 0) {
234 index++;
235 if (index >= HPI_MAX_ADAPTERS)
236 break;
237 }
238 while (count) {
239 /* move on to the next adapter */
240 index++;
241 if (index >= HPI_MAX_ADAPTERS)
242 break;
243 while (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
244 s.aw_adapter_list[index] == 0) {
245 index++;
246 if (index >= HPI_MAX_ADAPTERS)
247 break;
248 }
249 count--;
250 }
251
252 if (index < HPI_MAX_ADAPTERS) {
253 phr->u.s.adapter_index = (u16)index;
254 phr->u.s.aw_adapter_list[0] =
255 gRESP_HPI_SUBSYS_FIND_ADAPTERS.
256 s.aw_adapter_list[index];
257 } else {
258 phr->u.s.adapter_index = 0;
259 phr->u.s.aw_adapter_list[0] = 0;
260 phr->error = HPI_ERROR_BAD_ADAPTER_NUMBER;
261 }
262 break;
263 }
264 case HPI_SUBSYS_CREATE_ADAPTER:
265 HPIMSGX__init(phm, phr);
266 break;
267 case HPI_SUBSYS_DELETE_ADAPTER:
268 HPIMSGX__cleanup(phm->adapter_index, h_owner);
269 {
270 struct hpi_message hm;
271 struct hpi_response hr;
272 /* call to HPI_ADAPTER_CLOSE */
273 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
274 HPI_ADAPTER_CLOSE);
275 hm.adapter_index = phm->adapter_index;
276 hw_entry_point(&hm, &hr);
277 }
278 hw_entry_point(phm, phr);
279 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.
280 aw_adapter_list[phm->adapter_index]
281 = 0;
282 hpi_entry_points[phm->adapter_index] = NULL;
283 break;
284 default:
285 hw_entry_point(phm, phr);
286 break;
287 }
288 }
289
290 static void adapter_message(struct hpi_message *phm, struct hpi_response *phr,
291 void *h_owner)
292 {
293 switch (phm->function) {
294 case HPI_ADAPTER_OPEN:
295 adapter_open(phm, phr);
296 break;
297 case HPI_ADAPTER_CLOSE:
298 adapter_close(phm, phr);
299 break;
300 default:
301 hw_entry_point(phm, phr);
302 break;
303 }
304 }
305
306 static void mixer_message(struct hpi_message *phm, struct hpi_response *phr)
307 {
308 switch (phm->function) {
309 case HPI_MIXER_OPEN:
310 mixer_open(phm, phr);
311 break;
312 case HPI_MIXER_CLOSE:
313 mixer_close(phm, phr);
314 break;
315 default:
316 hw_entry_point(phm, phr);
317 break;
318 }
319 }
320
321 static void outstream_message(struct hpi_message *phm,
322 struct hpi_response *phr, void *h_owner)
323 {
324 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_outstreams) {
325 hpi_init_response(phr, HPI_OBJ_OSTREAM, phm->function,
326 HPI_ERROR_INVALID_OBJ_INDEX);
327 return;
328 }
329
330 switch (phm->function) {
331 case HPI_OSTREAM_OPEN:
332 outstream_open(phm, phr, h_owner);
333 break;
334 case HPI_OSTREAM_CLOSE:
335 outstream_close(phm, phr, h_owner);
336 break;
337 default:
338 hw_entry_point(phm, phr);
339 break;
340 }
341 }
342
343 static void instream_message(struct hpi_message *phm,
344 struct hpi_response *phr, void *h_owner)
345 {
346 if (phm->obj_index >= aDAPTER_INFO[phm->adapter_index].num_instreams) {
347 hpi_init_response(phr, HPI_OBJ_ISTREAM, phm->function,
348 HPI_ERROR_INVALID_OBJ_INDEX);
349 return;
350 }
351
352 switch (phm->function) {
353 case HPI_ISTREAM_OPEN:
354 instream_open(phm, phr, h_owner);
355 break;
356 case HPI_ISTREAM_CLOSE:
357 instream_close(phm, phr, h_owner);
358 break;
359 default:
360 hw_entry_point(phm, phr);
361 break;
362 }
363 }
364
365 /* NOTE: HPI_Message() must be defined in the driver as a wrapper for
366 * HPI_MessageEx so that functions in hpifunc.c compile.
367 */
368 void hpi_send_recv_ex(struct hpi_message *phm, struct hpi_response *phr,
369 void *h_owner)
370 {
371 HPI_DEBUG_MESSAGE(DEBUG, phm);
372
373 if (phm->type != HPI_TYPE_MESSAGE) {
374 hpi_init_response(phr, phm->object, phm->function,
375 HPI_ERROR_INVALID_TYPE);
376 return;
377 }
378
379 if (phm->adapter_index >= HPI_MAX_ADAPTERS
380 && phm->adapter_index != HPIMSGX_ALLADAPTERS) {
381 hpi_init_response(phr, phm->object, phm->function,
382 HPI_ERROR_BAD_ADAPTER_NUMBER);
383 return;
384 }
385
386 switch (phm->object) {
387 case HPI_OBJ_SUBSYSTEM:
388 subsys_message(phm, phr, h_owner);
389 break;
390
391 case HPI_OBJ_ADAPTER:
392 adapter_message(phm, phr, h_owner);
393 break;
394
395 case HPI_OBJ_MIXER:
396 mixer_message(phm, phr);
397 break;
398
399 case HPI_OBJ_OSTREAM:
400 outstream_message(phm, phr, h_owner);
401 break;
402
403 case HPI_OBJ_ISTREAM:
404 instream_message(phm, phr, h_owner);
405 break;
406
407 default:
408 hw_entry_point(phm, phr);
409 break;
410 }
411 HPI_DEBUG_RESPONSE(phr);
412 #if 1
413 if (phr->error >= HPI_ERROR_BACKEND_BASE) {
414 void *ep = NULL;
415 char *ep_name;
416
417 HPI_DEBUG_MESSAGE(ERROR, phm);
418
419 if (phm->adapter_index < HPI_MAX_ADAPTERS)
420 ep = hpi_entry_points[phm->adapter_index];
421
422 /* Don't need this? Have adapter index in debug info
423 Know at driver load time index->backend mapping */
424 if (ep == HPI_6000)
425 ep_name = "HPI_6000";
426 else if (ep == HPI_6205)
427 ep_name = "HPI_6205";
428 else
429 ep_name = "unknown";
430
431 HPI_DEBUG_LOG(ERROR, "HPI %s response - error# %d\n", ep_name,
432 phr->error);
433
434 if (hpi_debug_level >= HPI_DEBUG_LEVEL_VERBOSE)
435 hpi_debug_data((u16 *)phm,
436 sizeof(*phm) / sizeof(u16));
437 }
438 #endif
439 }
440
441 static void adapter_open(struct hpi_message *phm, struct hpi_response *phr)
442 {
443 HPI_DEBUG_LOG(VERBOSE, "adapter_open\n");
444 memcpy(phr, &rESP_HPI_ADAPTER_OPEN[phm->adapter_index],
445 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
446 }
447
448 static void adapter_close(struct hpi_message *phm, struct hpi_response *phr)
449 {
450 HPI_DEBUG_LOG(VERBOSE, "adapter_close\n");
451 hpi_init_response(phr, HPI_OBJ_ADAPTER, HPI_ADAPTER_CLOSE, 0);
452 }
453
454 static void mixer_open(struct hpi_message *phm, struct hpi_response *phr)
455 {
456 memcpy(phr, &rESP_HPI_MIXER_OPEN[phm->adapter_index],
457 sizeof(rESP_HPI_MIXER_OPEN[0]));
458 }
459
460 static void mixer_close(struct hpi_message *phm, struct hpi_response *phr)
461 {
462 hpi_init_response(phr, HPI_OBJ_MIXER, HPI_MIXER_CLOSE, 0);
463 }
464
465 static void instream_open(struct hpi_message *phm, struct hpi_response *phr,
466 void *h_owner)
467 {
468
469 struct hpi_message hm;
470 struct hpi_response hr;
471
472 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_OPEN, 0);
473
474 hpios_msgxlock_lock(&msgx_lock);
475
476 if (instream_user_open[phm->adapter_index][phm->obj_index].open_flag)
477 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
478 else if (rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
479 [phm->obj_index].h.error)
480 memcpy(phr,
481 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index][phm->
482 obj_index],
483 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
484 else {
485 instream_user_open[phm->adapter_index][phm->
486 obj_index].open_flag = 1;
487 hpios_msgxlock_un_lock(&msgx_lock);
488
489 /* issue a reset */
490 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
491 HPI_ISTREAM_RESET);
492 hm.adapter_index = phm->adapter_index;
493 hm.obj_index = phm->obj_index;
494 hw_entry_point(&hm, &hr);
495
496 hpios_msgxlock_lock(&msgx_lock);
497 if (hr.error) {
498 instream_user_open[phm->adapter_index][phm->
499 obj_index].open_flag = 0;
500 phr->error = hr.error;
501 } else {
502 instream_user_open[phm->adapter_index][phm->
503 obj_index].open_flag = 1;
504 instream_user_open[phm->adapter_index][phm->
505 obj_index].h_owner = h_owner;
506 memcpy(phr,
507 &rESP_HPI_ISTREAM_OPEN[phm->adapter_index]
508 [phm->obj_index],
509 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
510 }
511 }
512 hpios_msgxlock_un_lock(&msgx_lock);
513 }
514
515 static void instream_close(struct hpi_message *phm, struct hpi_response *phr,
516 void *h_owner)
517 {
518
519 struct hpi_message hm;
520 struct hpi_response hr;
521
522 hpi_init_response(phr, HPI_OBJ_ISTREAM, HPI_ISTREAM_CLOSE, 0);
523
524 hpios_msgxlock_lock(&msgx_lock);
525 if (h_owner ==
526 instream_user_open[phm->adapter_index][phm->
527 obj_index].h_owner) {
528 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
529 "instream %d owned by %p\n",
530 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
531 instream_user_open[phm->adapter_index][phm->
532 obj_index].h_owner = NULL;
533 hpios_msgxlock_un_lock(&msgx_lock);
534 /* issue a reset */
535 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
536 HPI_ISTREAM_RESET);
537 hm.adapter_index = phm->adapter_index;
538 hm.obj_index = phm->obj_index;
539 hw_entry_point(&hm, &hr);
540 hpios_msgxlock_lock(&msgx_lock);
541 if (hr.error) {
542 instream_user_open[phm->adapter_index][phm->
543 obj_index].h_owner = h_owner;
544 phr->error = hr.error;
545 } else {
546 instream_user_open[phm->adapter_index][phm->
547 obj_index].open_flag = 0;
548 instream_user_open[phm->adapter_index][phm->
549 obj_index].h_owner = NULL;
550 }
551 } else {
552 HPI_DEBUG_LOG(WARNING,
553 "%p trying to close %d instream %d owned by %p\n",
554 h_owner, phm->adapter_index, phm->obj_index,
555 instream_user_open[phm->adapter_index][phm->
556 obj_index].h_owner);
557 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
558 }
559 hpios_msgxlock_un_lock(&msgx_lock);
560 }
561
562 static void outstream_open(struct hpi_message *phm, struct hpi_response *phr,
563 void *h_owner)
564 {
565
566 struct hpi_message hm;
567 struct hpi_response hr;
568
569 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_OPEN, 0);
570
571 hpios_msgxlock_lock(&msgx_lock);
572
573 if (outstream_user_open[phm->adapter_index][phm->obj_index].open_flag)
574 phr->error = HPI_ERROR_OBJ_ALREADY_OPEN;
575 else if (rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
576 [phm->obj_index].h.error)
577 memcpy(phr,
578 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index][phm->
579 obj_index],
580 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
581 else {
582 outstream_user_open[phm->adapter_index][phm->
583 obj_index].open_flag = 1;
584 hpios_msgxlock_un_lock(&msgx_lock);
585
586 /* issue a reset */
587 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
588 HPI_OSTREAM_RESET);
589 hm.adapter_index = phm->adapter_index;
590 hm.obj_index = phm->obj_index;
591 hw_entry_point(&hm, &hr);
592
593 hpios_msgxlock_lock(&msgx_lock);
594 if (hr.error) {
595 outstream_user_open[phm->adapter_index][phm->
596 obj_index].open_flag = 0;
597 phr->error = hr.error;
598 } else {
599 outstream_user_open[phm->adapter_index][phm->
600 obj_index].open_flag = 1;
601 outstream_user_open[phm->adapter_index][phm->
602 obj_index].h_owner = h_owner;
603 memcpy(phr,
604 &rESP_HPI_OSTREAM_OPEN[phm->adapter_index]
605 [phm->obj_index],
606 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
607 }
608 }
609 hpios_msgxlock_un_lock(&msgx_lock);
610 }
611
612 static void outstream_close(struct hpi_message *phm, struct hpi_response *phr,
613 void *h_owner)
614 {
615
616 struct hpi_message hm;
617 struct hpi_response hr;
618
619 hpi_init_response(phr, HPI_OBJ_OSTREAM, HPI_OSTREAM_CLOSE, 0);
620
621 hpios_msgxlock_lock(&msgx_lock);
622
623 if (h_owner ==
624 outstream_user_open[phm->adapter_index][phm->
625 obj_index].h_owner) {
626 /* HPI_DEBUG_LOG(INFO,"closing adapter %d "
627 "outstream %d owned by %p\n",
628 phm->wAdapterIndex, phm->wObjIndex, hOwner); */
629 outstream_user_open[phm->adapter_index][phm->
630 obj_index].h_owner = NULL;
631 hpios_msgxlock_un_lock(&msgx_lock);
632 /* issue a reset */
633 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
634 HPI_OSTREAM_RESET);
635 hm.adapter_index = phm->adapter_index;
636 hm.obj_index = phm->obj_index;
637 hw_entry_point(&hm, &hr);
638 hpios_msgxlock_lock(&msgx_lock);
639 if (hr.error) {
640 outstream_user_open[phm->adapter_index][phm->
641 obj_index].h_owner = h_owner;
642 phr->error = hr.error;
643 } else {
644 outstream_user_open[phm->adapter_index][phm->
645 obj_index].open_flag = 0;
646 outstream_user_open[phm->adapter_index][phm->
647 obj_index].h_owner = NULL;
648 }
649 } else {
650 HPI_DEBUG_LOG(WARNING,
651 "%p trying to close %d outstream %d owned by %p\n",
652 h_owner, phm->adapter_index, phm->obj_index,
653 outstream_user_open[phm->adapter_index][phm->
654 obj_index].h_owner);
655 phr->error = HPI_ERROR_OBJ_NOT_OPEN;
656 }
657 hpios_msgxlock_un_lock(&msgx_lock);
658 }
659
660 static u16 adapter_prepare(u16 adapter)
661 {
662 struct hpi_message hm;
663 struct hpi_response hr;
664
665 /* Open the adapter and streams */
666 u16 i;
667
668 /* call to HPI_ADAPTER_OPEN */
669 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
670 HPI_ADAPTER_OPEN);
671 hm.adapter_index = adapter;
672 hw_entry_point(&hm, &hr);
673 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
674 sizeof(rESP_HPI_ADAPTER_OPEN[0]));
675 if (hr.error)
676 return hr.error;
677
678 /* call to HPI_ADAPTER_GET_INFO */
679 hpi_init_message_response(&hm, &hr, HPI_OBJ_ADAPTER,
680 HPI_ADAPTER_GET_INFO);
681 hm.adapter_index = adapter;
682 hw_entry_point(&hm, &hr);
683 if (hr.error)
684 return hr.error;
685
686 aDAPTER_INFO[adapter].num_outstreams = hr.u.a.num_outstreams;
687 aDAPTER_INFO[adapter].num_instreams = hr.u.a.num_instreams;
688 aDAPTER_INFO[adapter].type = hr.u.a.adapter_type;
689
690 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list[adapter] =
691 hr.u.a.adapter_type;
692 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters++;
693 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters > HPI_MAX_ADAPTERS)
694 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters =
695 HPI_MAX_ADAPTERS;
696
697 /* call to HPI_OSTREAM_OPEN */
698 for (i = 0; i < aDAPTER_INFO[adapter].num_outstreams; i++) {
699 hpi_init_message_response(&hm, &hr, HPI_OBJ_OSTREAM,
700 HPI_OSTREAM_OPEN);
701 hm.adapter_index = adapter;
702 hm.obj_index = i;
703 hw_entry_point(&hm, &hr);
704 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i], &hr,
705 sizeof(rESP_HPI_OSTREAM_OPEN[0][0]));
706 outstream_user_open[adapter][i].open_flag = 0;
707 outstream_user_open[adapter][i].h_owner = NULL;
708 }
709
710 /* call to HPI_ISTREAM_OPEN */
711 for (i = 0; i < aDAPTER_INFO[adapter].num_instreams; i++) {
712 hpi_init_message_response(&hm, &hr, HPI_OBJ_ISTREAM,
713 HPI_ISTREAM_OPEN);
714 hm.adapter_index = adapter;
715 hm.obj_index = i;
716 hw_entry_point(&hm, &hr);
717 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i], &hr,
718 sizeof(rESP_HPI_ISTREAM_OPEN[0][0]));
719 instream_user_open[adapter][i].open_flag = 0;
720 instream_user_open[adapter][i].h_owner = NULL;
721 }
722
723 /* call to HPI_MIXER_OPEN */
724 hpi_init_message_response(&hm, &hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN);
725 hm.adapter_index = adapter;
726 hw_entry_point(&hm, &hr);
727 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
728 sizeof(rESP_HPI_MIXER_OPEN[0]));
729
730 return gRESP_HPI_SUBSYS_FIND_ADAPTERS.h.error;
731 }
732
733 static void HPIMSGX__reset(u16 adapter_index)
734 {
735 int i;
736 u16 adapter;
737 struct hpi_response hr;
738
739 if (adapter_index == HPIMSGX_ALLADAPTERS) {
740 /* reset all responses to contain errors */
741 hpi_init_response(&hr, HPI_OBJ_SUBSYSTEM,
742 HPI_SUBSYS_FIND_ADAPTERS, 0);
743 memcpy(&gRESP_HPI_SUBSYS_FIND_ADAPTERS, &hr,
744 sizeof(&gRESP_HPI_SUBSYS_FIND_ADAPTERS));
745
746 for (adapter = 0; adapter < HPI_MAX_ADAPTERS; adapter++) {
747
748 hpi_init_response(&hr, HPI_OBJ_ADAPTER,
749 HPI_ADAPTER_OPEN, HPI_ERROR_BAD_ADAPTER);
750 memcpy(&rESP_HPI_ADAPTER_OPEN[adapter], &hr,
751 sizeof(rESP_HPI_ADAPTER_OPEN[adapter]));
752
753 hpi_init_response(&hr, HPI_OBJ_MIXER, HPI_MIXER_OPEN,
754 HPI_ERROR_INVALID_OBJ);
755 memcpy(&rESP_HPI_MIXER_OPEN[adapter], &hr,
756 sizeof(rESP_HPI_MIXER_OPEN[adapter]));
757
758 for (i = 0; i < HPI_MAX_STREAMS; i++) {
759 hpi_init_response(&hr, HPI_OBJ_OSTREAM,
760 HPI_OSTREAM_OPEN,
761 HPI_ERROR_INVALID_OBJ);
762 memcpy(&rESP_HPI_OSTREAM_OPEN[adapter][i],
763 &hr,
764 sizeof(rESP_HPI_OSTREAM_OPEN[adapter]
765 [i]));
766 hpi_init_response(&hr, HPI_OBJ_ISTREAM,
767 HPI_ISTREAM_OPEN,
768 HPI_ERROR_INVALID_OBJ);
769 memcpy(&rESP_HPI_ISTREAM_OPEN[adapter][i],
770 &hr,
771 sizeof(rESP_HPI_ISTREAM_OPEN[adapter]
772 [i]));
773 }
774 }
775 } else if (adapter_index < HPI_MAX_ADAPTERS) {
776 rESP_HPI_ADAPTER_OPEN[adapter_index].h.error =
777 HPI_ERROR_BAD_ADAPTER;
778 rESP_HPI_MIXER_OPEN[adapter_index].h.error =
779 HPI_ERROR_INVALID_OBJ;
780 for (i = 0; i < HPI_MAX_STREAMS; i++) {
781 rESP_HPI_OSTREAM_OPEN[adapter_index][i].h.error =
782 HPI_ERROR_INVALID_OBJ;
783 rESP_HPI_ISTREAM_OPEN[adapter_index][i].h.error =
784 HPI_ERROR_INVALID_OBJ;
785 }
786 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.
787 s.aw_adapter_list[adapter_index]) {
788 gRESP_HPI_SUBSYS_FIND_ADAPTERS.
789 s.aw_adapter_list[adapter_index] = 0;
790 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters--;
791 }
792 }
793 }
794
795 static u16 HPIMSGX__init(struct hpi_message *phm,
796 /* HPI_SUBSYS_CREATE_ADAPTER structure with */
797 /* resource list or NULL=find all */
798 struct hpi_response *phr
799 /* response from HPI_ADAPTER_GET_INFO */
800 )
801 {
802 hpi_handler_func *entry_point_func;
803 struct hpi_response hr;
804
805 if (gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.num_adapters >= HPI_MAX_ADAPTERS)
806 return HPI_ERROR_BAD_ADAPTER_NUMBER;
807
808 /* Init response here so we can pass in previous adapter list */
809 hpi_init_response(&hr, phm->object, phm->function,
810 HPI_ERROR_INVALID_OBJ);
811 memcpy(hr.u.s.aw_adapter_list,
812 gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list,
813 sizeof(gRESP_HPI_SUBSYS_FIND_ADAPTERS.s.aw_adapter_list));
814
815 entry_point_func =
816 hpi_lookup_entry_point_function(phm->u.s.resource.r.pci);
817
818 if (entry_point_func) {
819 HPI_DEBUG_MESSAGE(DEBUG, phm);
820 entry_point_func(phm, &hr);
821 } else {
822 phr->error = HPI_ERROR_PROCESSING_MESSAGE;
823 return phr->error;
824 }
825 if (hr.error == 0) {
826 /* the adapter was created succesfully
827 save the mapping for future use */
828 hpi_entry_points[hr.u.s.adapter_index] = entry_point_func;
829 /* prepare adapter (pre-open streams etc.) */
830 HPI_DEBUG_LOG(DEBUG,
831 "HPI_SUBSYS_CREATE_ADAPTER successful,"
832 " preparing adapter\n");
833 adapter_prepare(hr.u.s.adapter_index);
834 }
835 memcpy(phr, &hr, hr.size);
836 return phr->error;
837 }
838
839 static void HPIMSGX__cleanup(u16 adapter_index, void *h_owner)
840 {
841 int i, adapter, adapter_limit;
842
843 if (!h_owner)
844 return;
845
846 if (adapter_index == HPIMSGX_ALLADAPTERS) {
847 adapter = 0;
848 adapter_limit = HPI_MAX_ADAPTERS;
849 } else {
850 adapter = adapter_index;
851 adapter_limit = adapter + 1;
852 }
853
854 for (; adapter < adapter_limit; adapter++) {
855 /* printk(KERN_INFO "Cleanup adapter #%d\n",wAdapter); */
856 for (i = 0; i < HPI_MAX_STREAMS; i++) {
857 if (h_owner ==
858 outstream_user_open[adapter][i].h_owner) {
859 struct hpi_message hm;
860 struct hpi_response hr;
861
862 HPI_DEBUG_LOG(DEBUG,
863 "close adapter %d ostream %d\n",
864 adapter, i);
865
866 hpi_init_message_response(&hm, &hr,
867 HPI_OBJ_OSTREAM, HPI_OSTREAM_RESET);
868 hm.adapter_index = (u16)adapter;
869 hm.obj_index = (u16)i;
870 hw_entry_point(&hm, &hr);
871
872 hm.function = HPI_OSTREAM_HOSTBUFFER_FREE;
873 hw_entry_point(&hm, &hr);
874
875 hm.function = HPI_OSTREAM_GROUP_RESET;
876 hw_entry_point(&hm, &hr);
877
878 outstream_user_open[adapter][i].open_flag = 0;
879 outstream_user_open[adapter][i].h_owner =
880 NULL;
881 }
882 if (h_owner == instream_user_open[adapter][i].h_owner) {
883 struct hpi_message hm;
884 struct hpi_response hr;
885
886 HPI_DEBUG_LOG(DEBUG,
887 "close adapter %d istream %d\n",
888 adapter, i);
889
890 hpi_init_message_response(&hm, &hr,
891 HPI_OBJ_ISTREAM, HPI_ISTREAM_RESET);
892 hm.adapter_index = (u16)adapter;
893 hm.obj_index = (u16)i;
894 hw_entry_point(&hm, &hr);
895
896 hm.function = HPI_ISTREAM_HOSTBUFFER_FREE;
897 hw_entry_point(&hm, &hr);
898
899 hm.function = HPI_ISTREAM_GROUP_RESET;
900 hw_entry_point(&hm, &hr);
901
902 instream_user_open[adapter][i].open_flag = 0;
903 instream_user_open[adapter][i].h_owner = NULL;
904 }
905 }
906 }
907 }