]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / westbridge / astoria / api / src / cyaslowlevel.c
1 /* Cypress West Bridge API source file (cyaslowlevel.c)
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
4 ##
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
20 */
21
22 #include "../../include/linux/westbridge/cyashal.h"
23 #include "../../include/linux/westbridge/cyascast.h"
24 #include "../../include/linux/westbridge/cyasdevice.h"
25 #include "../../include/linux/westbridge/cyaslowlevel.h"
26 #include "../../include/linux/westbridge/cyasintr.h"
27 #include "../../include/linux/westbridge/cyaserr.h"
28 #include "../../include/linux/westbridge/cyasregs.h"
29
30 static const uint32_t cy_as_low_level_timeout_count = 65536 * 4;
31
32 /* Forward declaration */
33 static cy_as_return_status_t cy_as_send_one(cy_as_device *dev_p,
34 cy_as_ll_request_response *req_p);
35
36 /*
37 * This array holds the size of the largest request we will ever recevie from
38 * the West Bridge device per context. The size is in 16 bit words. Note a
39 * size of 0xffff indicates that there will be no requests on this context
40 * from West Bridge.
41 */
42 static uint16_t max_request_length[CY_RQT_CONTEXT_COUNT] = {
43 8, /* CY_RQT_GENERAL_RQT_CONTEXT - CY_RQT_INITIALIZATION_COMPLETE */
44 8, /* CY_RQT_RESOURCE_RQT_CONTEXT - none */
45 8, /* CY_RQT_STORAGE_RQT_CONTEXT - CY_RQT_MEDIA_CHANGED */
46 128, /* CY_RQT_USB_RQT_CONTEXT - CY_RQT_USB_EVENT */
47 8 /* CY_RQT_TUR_RQT_CONTEXT - CY_RQT_TURBO_CMD_FROM_HOST */
48 };
49
50 /*
51 * For the given context, this function removes the request node at the head
52 * of the queue from the context. This is called after all processing has
53 * occurred on the given request and response and we are ready to remove this
54 * entry from the queue.
55 */
56 static void
57 cy_as_ll_remove_request_queue_head(cy_as_device *dev_p, cy_as_context *ctxt_p)
58 {
59 uint32_t mask, state;
60 cy_as_ll_request_list_node *node_p;
61
62 (void)dev_p;
63 cy_as_hal_assert(ctxt_p->request_queue_p != 0);
64
65 mask = cy_as_hal_disable_interrupts();
66 node_p = ctxt_p->request_queue_p;
67 ctxt_p->request_queue_p = node_p->next;
68 cy_as_hal_enable_interrupts(mask);
69
70 node_p->callback = 0;
71 node_p->rqt = 0;
72 node_p->resp = 0;
73
74 /*
75 * note that the caller allocates and destroys the request and
76 * response. generally the destroy happens in the callback for
77 * async requests and after the wait returns for sync. the
78 * request and response may not actually be destroyed but may be
79 * managed in other ways as well. it is the responsibilty of
80 * the caller to deal with these in any case. the caller can do
81 * this in the request/response callback function.
82 */
83 state = cy_as_hal_disable_interrupts();
84 cy_as_hal_c_b_free(node_p);
85 cy_as_hal_enable_interrupts(state);
86 }
87
88 /*
89 * For the context given, this function sends the next request to
90 * West Bridge via the mailbox register, if the next request is
91 * ready to be sent and has not already been sent.
92 */
93 static void
94 cy_as_ll_send_next_request(cy_as_device *dev_p, cy_as_context *ctxt_p)
95 {
96 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
97
98 /*
99 * ret == ret is equivalent to while (1) but eliminates compiler
100 * warnings for some compilers.
101 */
102 while (ret == ret) {
103 cy_as_ll_request_list_node *node_p = ctxt_p->request_queue_p;
104 if (node_p == 0)
105 break;
106
107 if (cy_as_request_get_node_state(node_p) !=
108 CY_AS_REQUEST_LIST_STATE_QUEUED)
109 break;
110
111 cy_as_request_set_node_state(node_p,
112 CY_AS_REQUEST_LIST_STATE_WAITING);
113 ret = cy_as_send_one(dev_p, node_p->rqt);
114 if (ret == CY_AS_ERROR_SUCCESS)
115 break;
116
117 /*
118 * if an error occurs in sending the request, tell the requester
119 * about the error and remove the request from the queue.
120 */
121 cy_as_request_set_node_state(node_p,
122 CY_AS_REQUEST_LIST_STATE_RECEIVED);
123 node_p->callback(dev_p, ctxt_p->number,
124 node_p->rqt, node_p->resp, ret);
125 cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
126
127 /*
128 * this falls through to the while loop to send the next request
129 * since the previous request did not get sent.
130 */
131 }
132 }
133
134 /*
135 * This method removes an entry from the request queue of a given context.
136 * The entry is removed only if it is not in transit.
137 */
138 cy_as_remove_request_result_t
139 cy_as_ll_remove_request(cy_as_device *dev_p, cy_as_context *ctxt_p,
140 cy_as_ll_request_response *req_p, cy_bool force)
141 {
142 uint32_t imask;
143 cy_as_ll_request_list_node *node_p;
144 cy_as_ll_request_list_node *tmp_p;
145 uint32_t state;
146
147 imask = cy_as_hal_disable_interrupts();
148 if (ctxt_p->request_queue_p != 0 &&
149 ctxt_p->request_queue_p->rqt == req_p) {
150 node_p = ctxt_p->request_queue_p;
151 if ((cy_as_request_get_node_state(node_p) ==
152 CY_AS_REQUEST_LIST_STATE_WAITING) && (!force)) {
153 cy_as_hal_enable_interrupts(imask);
154 return cy_as_remove_request_in_transit;
155 }
156
157 ctxt_p->request_queue_p = node_p->next;
158 } else {
159 tmp_p = ctxt_p->request_queue_p;
160 while (tmp_p != 0 && tmp_p->next != 0 &&
161 tmp_p->next->rqt != req_p)
162 tmp_p = tmp_p->next;
163
164 if (tmp_p == 0 || tmp_p->next == 0) {
165 cy_as_hal_enable_interrupts(imask);
166 return cy_as_remove_request_not_found;
167 }
168
169 node_p = tmp_p->next;
170 tmp_p->next = node_p->next;
171 }
172
173 if (node_p->callback)
174 node_p->callback(dev_p, ctxt_p->number, node_p->rqt,
175 node_p->resp, CY_AS_ERROR_CANCELED);
176
177 state = cy_as_hal_disable_interrupts();
178 cy_as_hal_c_b_free(node_p);
179 cy_as_hal_enable_interrupts(state);
180
181 cy_as_hal_enable_interrupts(imask);
182 return cy_as_remove_request_sucessful;
183 }
184
185 void
186 cy_as_ll_remove_all_requests(cy_as_device *dev_p, cy_as_context *ctxt_p)
187 {
188 cy_as_ll_request_list_node *node = ctxt_p->request_queue_p;
189
190 while (node) {
191 if (cy_as_request_get_node_state(ctxt_p->request_queue_p) !=
192 CY_AS_REQUEST_LIST_STATE_RECEIVED)
193 cy_as_ll_remove_request(dev_p, ctxt_p,
194 node->rqt, cy_true);
195 node = node->next;
196 }
197 }
198
199 static cy_bool
200 cy_as_ll_is_in_queue(cy_as_context *ctxt_p, cy_as_ll_request_response *req_p)
201 {
202 uint32_t mask;
203 cy_as_ll_request_list_node *node_p;
204
205 mask = cy_as_hal_disable_interrupts();
206 node_p = ctxt_p->request_queue_p;
207 while (node_p) {
208 if (node_p->rqt == req_p) {
209 cy_as_hal_enable_interrupts(mask);
210 return cy_true;
211 }
212 node_p = node_p->next;
213 }
214 cy_as_hal_enable_interrupts(mask);
215 return cy_false;
216 }
217
218 /*
219 * This is the handler for mailbox data when we are trying to send data
220 * to the West Bridge firmware. The firmware may be trying to send us
221 * data and we need to queue this data to allow the firmware to move
222 * forward and be in a state to receive our request. Here we just queue
223 * the data and it is processed at a later time by the mailbox interrupt
224 * handler.
225 */
226 void
227 cy_as_ll_queue_mailbox_data(cy_as_device *dev_p)
228 {
229 cy_as_context *ctxt_p;
230 uint8_t context;
231 uint16_t data[4];
232 int32_t i;
233
234 /* Read the data from mailbox 0 to determine what to do with the data */
235 for (i = 3; i >= 0; i--)
236 data[i] = cy_as_hal_read_register(dev_p->tag,
237 cy_cast_int2U_int16(CY_AS_MEM_P0_MAILBOX0 + i));
238
239 context = cy_as_mbox_get_context(data[0]);
240 if (context >= CY_RQT_CONTEXT_COUNT) {
241 cy_as_hal_print_message("mailbox request/response received "
242 "with invalid context value (%d)\n", context);
243 return;
244 }
245
246 ctxt_p = dev_p->context[context];
247
248 /*
249 * if we have queued too much data, drop future data.
250 */
251 cy_as_hal_assert(ctxt_p->queue_index * sizeof(uint16_t) +
252 sizeof(data) <= sizeof(ctxt_p->data_queue));
253
254 for (i = 0; i < 4; i++)
255 ctxt_p->data_queue[ctxt_p->queue_index++] = data[i];
256
257 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
258 dev_p->ll_queued_data = cy_true;
259 }
260
261 void
262 cy_as_mail_box_process_data(cy_as_device *dev_p, uint16_t *data)
263 {
264 cy_as_context *ctxt_p;
265 uint8_t context;
266 uint16_t *len_p;
267 cy_as_ll_request_response *rec_p;
268 uint8_t st;
269 uint16_t src, dest;
270
271 context = cy_as_mbox_get_context(data[0]);
272 if (context >= CY_RQT_CONTEXT_COUNT) {
273 cy_as_hal_print_message("mailbox request/response received "
274 "with invalid context value (%d)\n", context);
275 return;
276 }
277
278 ctxt_p = dev_p->context[context];
279
280 if (cy_as_mbox_is_request(data[0])) {
281 cy_as_hal_assert(ctxt_p->req_p != 0);
282 rec_p = ctxt_p->req_p;
283 len_p = &ctxt_p->request_length;
284
285 } else {
286 if (ctxt_p->request_queue_p == 0 ||
287 cy_as_request_get_node_state(ctxt_p->request_queue_p)
288 != CY_AS_REQUEST_LIST_STATE_WAITING) {
289 cy_as_hal_print_message("mailbox response received on "
290 "context that was not expecting a response\n");
291 cy_as_hal_print_message(" context: %d\n", context);
292 cy_as_hal_print_message(" contents: 0x%04x 0x%04x "
293 "0x%04x 0x%04x\n",
294 data[0], data[1], data[2], data[3]);
295 if (ctxt_p->request_queue_p != 0)
296 cy_as_hal_print_message(" state: 0x%02x\n",
297 ctxt_p->request_queue_p->state);
298 return;
299 }
300
301 /* Make sure the request has an associated response */
302 cy_as_hal_assert(ctxt_p->request_queue_p->resp != 0);
303
304 rec_p = ctxt_p->request_queue_p->resp;
305 len_p = &ctxt_p->request_queue_p->length;
306 }
307
308 if (rec_p->stored == 0) {
309 /*
310 * this is the first cycle of the response
311 */
312 cy_as_ll_request_response__set_code(rec_p,
313 cy_as_mbox_get_code(data[0]));
314 cy_as_ll_request_response__set_context(rec_p, context);
315
316 if (cy_as_mbox_is_last(data[0])) {
317 /* This is a single cycle response */
318 *len_p = rec_p->length;
319 st = 1;
320 } else {
321 /* Ensure that enough memory has been
322 * reserved for the response. */
323 cy_as_hal_assert(rec_p->length >= data[1]);
324 *len_p = (data[1] < rec_p->length) ?
325 data[1] : rec_p->length;
326 st = 2;
327 }
328 } else
329 st = 1;
330
331 /* Trasnfer the data from the mailboxes to the response */
332 while (rec_p->stored < *len_p && st < 4)
333 rec_p->data[rec_p->stored++] = data[st++];
334
335 if (cy_as_mbox_is_last(data[0])) {
336 /* NB: The call-back that is made below can cause the
337 * addition of more data in this queue, thus causing
338 * a recursive overflow of the queue. this is prevented
339 * by removing the request entry that is currently
340 * being passed up from the data queue. if this is done,
341 * the queue only needs to be as long as two request
342 * entries from west bridge.
343 */
344 if ((ctxt_p->rqt_index > 0) &&
345 (ctxt_p->rqt_index <= ctxt_p->queue_index)) {
346 dest = 0;
347 src = ctxt_p->rqt_index;
348
349 while (src < ctxt_p->queue_index)
350 ctxt_p->data_queue[dest++] =
351 ctxt_p->data_queue[src++];
352
353 ctxt_p->rqt_index = 0;
354 ctxt_p->queue_index = dest;
355 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
356 }
357
358 if (ctxt_p->request_queue_p != 0 && rec_p ==
359 ctxt_p->request_queue_p->resp) {
360 /*
361 * if this is the last cycle of the response, call the
362 * callback and reset for the next response.
363 */
364 cy_as_ll_request_response *resp_p =
365 ctxt_p->request_queue_p->resp;
366 resp_p->length = ctxt_p->request_queue_p->length;
367 cy_as_request_set_node_state(ctxt_p->request_queue_p,
368 CY_AS_REQUEST_LIST_STATE_RECEIVED);
369
370 cy_as_device_set_in_callback(dev_p);
371 ctxt_p->request_queue_p->callback(dev_p, context,
372 ctxt_p->request_queue_p->rqt,
373 resp_p, CY_AS_ERROR_SUCCESS);
374
375 cy_as_device_clear_in_callback(dev_p);
376
377 cy_as_ll_remove_request_queue_head(dev_p, ctxt_p);
378 cy_as_ll_send_next_request(dev_p, ctxt_p);
379 } else {
380 /* Send the request to the appropriate
381 * module to handle */
382 cy_as_ll_request_response *request_p = ctxt_p->req_p;
383 ctxt_p->req_p = 0;
384 if (ctxt_p->request_callback) {
385 cy_as_device_set_in_callback(dev_p);
386 ctxt_p->request_callback(dev_p, context,
387 request_p, 0, CY_AS_ERROR_SUCCESS);
388 cy_as_device_clear_in_callback(dev_p);
389 }
390 cy_as_ll_init_request(request_p, 0,
391 context, request_p->length);
392 ctxt_p->req_p = request_p;
393 }
394 }
395 }
396
397 /*
398 * This is the handler for processing queued mailbox data
399 */
400 void
401 cy_as_mail_box_queued_data_handler(cy_as_device *dev_p)
402 {
403 uint16_t i;
404
405 /*
406 * if more data gets queued in between our entering this call
407 * and the end of the iteration on all contexts; we should
408 * continue processing the queued data.
409 */
410 while (dev_p->ll_queued_data) {
411 dev_p->ll_queued_data = cy_false;
412 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
413 uint16_t offset;
414 cy_as_context *ctxt_p = dev_p->context[i];
415 cy_as_hal_assert((ctxt_p->queue_index % 4) == 0);
416
417 offset = 0;
418 while (offset < ctxt_p->queue_index) {
419 ctxt_p->rqt_index = offset + 4;
420 cy_as_mail_box_process_data(dev_p,
421 ctxt_p->data_queue + offset);
422 offset = ctxt_p->rqt_index;
423 }
424 ctxt_p->queue_index = 0;
425 }
426 }
427 }
428
429 /*
430 * This is the handler for the mailbox interrupt. This function reads
431 * data from the mailbox registers until a complete request or response
432 * is received. When a complete request is received, the callback
433 * associated with requests on that context is called. When a complete
434 * response is recevied, the callback associated with the request that
435 * generated the response is called.
436 */
437 void
438 cy_as_mail_box_interrupt_handler(cy_as_device *dev_p)
439 {
440 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
441
442 /*
443 * queue the mailbox data to preserve
444 * order for later processing.
445 */
446 cy_as_ll_queue_mailbox_data(dev_p);
447
448 /*
449 * process what was queued and anything that may be pending
450 */
451 cy_as_mail_box_queued_data_handler(dev_p);
452 }
453
454 cy_as_return_status_t
455 cy_as_ll_start(cy_as_device *dev_p)
456 {
457 uint16_t i;
458
459 if (cy_as_device_is_low_level_running(dev_p))
460 return CY_AS_ERROR_ALREADY_RUNNING;
461
462 dev_p->ll_sending_rqt = cy_false;
463 dev_p->ll_abort_curr_rqt = cy_false;
464
465 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
466 dev_p->context[i] = (cy_as_context *)
467 cy_as_hal_alloc(sizeof(cy_as_context));
468 if (dev_p->context[i] == 0)
469 return CY_AS_ERROR_OUT_OF_MEMORY;
470
471 dev_p->context[i]->number = (uint8_t)i;
472 dev_p->context[i]->request_callback = 0;
473 dev_p->context[i]->request_queue_p = 0;
474 dev_p->context[i]->last_node_p = 0;
475 dev_p->context[i]->req_p = cy_as_ll_create_request(dev_p,
476 0, (uint8_t)i, max_request_length[i]);
477 dev_p->context[i]->queue_index = 0;
478
479 if (!cy_as_hal_create_sleep_channel
480 (&dev_p->context[i]->channel))
481 return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED;
482 }
483
484 cy_as_device_set_low_level_running(dev_p);
485 return CY_AS_ERROR_SUCCESS;
486 }
487
488 /*
489 * Shutdown the low level communications module. This operation will
490 * also cancel any queued low level requests.
491 */
492 cy_as_return_status_t
493 cy_as_ll_stop(cy_as_device *dev_p)
494 {
495 uint8_t i;
496 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
497 cy_as_context *ctxt_p;
498 uint32_t mask;
499
500 for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) {
501 ctxt_p = dev_p->context[i];
502 if (!cy_as_hal_destroy_sleep_channel(&ctxt_p->channel))
503 return CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED;
504
505 /*
506 * now, free any queued requests and assocaited responses
507 */
508 while (ctxt_p->request_queue_p) {
509 uint32_t state;
510 cy_as_ll_request_list_node *node_p =
511 ctxt_p->request_queue_p;
512
513 /* Mark this pair as in a cancel operation */
514 cy_as_request_set_node_state(node_p,
515 CY_AS_REQUEST_LIST_STATE_CANCELING);
516
517 /* Tell the caller that we are canceling this request */
518 /* NB: The callback is responsible for destroying the
519 * request and the response. we cannot count on the
520 * contents of these two after calling the callback.
521 */
522 node_p->callback(dev_p, i, node_p->rqt,
523 node_p->resp, CY_AS_ERROR_CANCELED);
524
525 /* Remove the pair from the queue */
526 mask = cy_as_hal_disable_interrupts();
527 ctxt_p->request_queue_p = node_p->next;
528 cy_as_hal_enable_interrupts(mask);
529
530 /* Free the list node */
531 state = cy_as_hal_disable_interrupts();
532 cy_as_hal_c_b_free(node_p);
533 cy_as_hal_enable_interrupts(state);
534 }
535
536 cy_as_ll_destroy_request(dev_p, dev_p->context[i]->req_p);
537 cy_as_hal_free(dev_p->context[i]);
538 dev_p->context[i] = 0;
539
540 }
541 cy_as_device_set_low_level_stopped(dev_p);
542
543 return ret;
544 }
545
546 void
547 cy_as_ll_init_request(cy_as_ll_request_response *req_p,
548 uint16_t code, uint16_t context, uint16_t length)
549 {
550 uint16_t totallen = sizeof(cy_as_ll_request_response) +
551 (length - 1) * sizeof(uint16_t);
552
553 cy_as_hal_mem_set(req_p, 0, totallen);
554 req_p->length = length;
555 cy_as_ll_request_response__set_code(req_p, code);
556 cy_as_ll_request_response__set_context(req_p, context);
557 cy_as_ll_request_response__set_request(req_p);
558 }
559
560 /*
561 * Create a new request.
562 */
563 cy_as_ll_request_response *
564 cy_as_ll_create_request(cy_as_device *dev_p, uint16_t code,
565 uint8_t context, uint16_t length)
566 {
567 cy_as_ll_request_response *req_p;
568 uint32_t state;
569 uint16_t totallen = sizeof(cy_as_ll_request_response) +
570 (length - 1) * sizeof(uint16_t);
571
572 (void)dev_p;
573
574 state = cy_as_hal_disable_interrupts();
575 req_p = cy_as_hal_c_b_alloc(totallen);
576 cy_as_hal_enable_interrupts(state);
577 if (req_p)
578 cy_as_ll_init_request(req_p, code, context, length);
579
580 return req_p;
581 }
582
583 /*
584 * Destroy a request.
585 */
586 void
587 cy_as_ll_destroy_request(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
588 {
589 uint32_t state;
590 (void)dev_p;
591 (void)req_p;
592
593 state = cy_as_hal_disable_interrupts();
594 cy_as_hal_c_b_free(req_p);
595 cy_as_hal_enable_interrupts(state);
596
597 }
598
599 void
600 cy_as_ll_init_response(cy_as_ll_request_response *req_p, uint16_t length)
601 {
602 uint16_t totallen = sizeof(cy_as_ll_request_response) +
603 (length - 1) * sizeof(uint16_t);
604
605 cy_as_hal_mem_set(req_p, 0, totallen);
606 req_p->length = length;
607 cy_as_ll_request_response__set_response(req_p);
608 }
609
610 /*
611 * Create a new response
612 */
613 cy_as_ll_request_response *
614 cy_as_ll_create_response(cy_as_device *dev_p, uint16_t length)
615 {
616 cy_as_ll_request_response *req_p;
617 uint32_t state;
618 uint16_t totallen = sizeof(cy_as_ll_request_response) +
619 (length - 1) * sizeof(uint16_t);
620
621 (void)dev_p;
622
623 state = cy_as_hal_disable_interrupts();
624 req_p = cy_as_hal_c_b_alloc(totallen);
625 cy_as_hal_enable_interrupts(state);
626 if (req_p)
627 cy_as_ll_init_response(req_p, length);
628
629 return req_p;
630 }
631
632 /*
633 * Destroy the new response
634 */
635 void
636 cy_as_ll_destroy_response(cy_as_device *dev_p, cy_as_ll_request_response *req_p)
637 {
638 uint32_t state;
639 (void)dev_p;
640 (void)req_p;
641
642 state = cy_as_hal_disable_interrupts();
643 cy_as_hal_c_b_free(req_p);
644 cy_as_hal_enable_interrupts(state);
645 }
646
647 static uint16_t
648 cy_as_read_intr_status(
649 cy_as_device *dev_p)
650 {
651 uint32_t mask;
652 cy_bool bloop = cy_true;
653 uint16_t v = 0, last = 0xffff;
654
655 /*
656 * before determining if the mailboxes are ready for more data,
657 * we first check the mailbox interrupt to see if we need to
658 * receive data. this prevents a dead-lock condition that can
659 * occur when both sides are trying to receive data.
660 */
661 while (last == last) {
662 /*
663 * disable interrupts to be sure we don't process the mailbox
664 * here and have the interrupt routine try to read this data
665 * as well.
666 */
667 mask = cy_as_hal_disable_interrupts();
668
669 /*
670 * see if there is data to be read.
671 */
672 v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG);
673 if ((v & CY_AS_MEM_P0_INTR_REG_MBINT) == 0) {
674 cy_as_hal_enable_interrupts(mask);
675 break;
676 }
677
678 /*
679 * queue the mailbox data for later processing.
680 * this allows the firmware to move forward and
681 * service the requst from the P port.
682 */
683 cy_as_ll_queue_mailbox_data(dev_p);
684
685 /*
686 * enable interrupts again to service mailbox
687 * interrupts appropriately
688 */
689 cy_as_hal_enable_interrupts(mask);
690 }
691
692 /*
693 * now, all data is received
694 */
695 last = cy_as_hal_read_register(dev_p->tag,
696 CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
697 while (bloop) {
698 v = cy_as_hal_read_register(dev_p->tag,
699 CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD;
700 if (v == last)
701 break;
702
703 last = v;
704 }
705
706 return v;
707 }
708
709 /*
710 * Send a single request or response using the mail box register.
711 * This function does not deal with the internal queues at all,
712 * but only sends the request or response across to the firmware
713 */
714 static cy_as_return_status_t
715 cy_as_send_one(
716 cy_as_device *dev_p,
717 cy_as_ll_request_response *req_p)
718 {
719 int i;
720 uint16_t mb0, v;
721 int32_t loopcount;
722 uint32_t int_stat;
723
724 #ifdef _DEBUG
725 if (cy_as_ll_request_response__is_request(req_p)) {
726 switch (cy_as_ll_request_response__get_context(req_p)) {
727 case CY_RQT_GENERAL_RQT_CONTEXT:
728 cy_as_hal_assert(req_p->length * 2 + 2 <
729 CY_CTX_GEN_MAX_DATA_SIZE);
730 break;
731
732 case CY_RQT_RESOURCE_RQT_CONTEXT:
733 cy_as_hal_assert(req_p->length * 2 + 2 <
734 CY_CTX_RES_MAX_DATA_SIZE);
735 break;
736
737 case CY_RQT_STORAGE_RQT_CONTEXT:
738 cy_as_hal_assert(req_p->length * 2 + 2 <
739 CY_CTX_STR_MAX_DATA_SIZE);
740 break;
741
742 case CY_RQT_USB_RQT_CONTEXT:
743 cy_as_hal_assert(req_p->length * 2 + 2 <
744 CY_CTX_USB_MAX_DATA_SIZE);
745 break;
746 }
747 }
748 #endif
749
750 /* Write the request to the mail box registers */
751 if (req_p->length > 3) {
752 uint16_t length = req_p->length;
753 int which = 0;
754 int st = 1;
755
756 dev_p->ll_sending_rqt = cy_true;
757 while (which < length) {
758 loopcount = cy_as_low_level_timeout_count;
759 do {
760 v = cy_as_read_intr_status(dev_p);
761
762 } while (v && loopcount-- > 0);
763
764 if (v) {
765 cy_as_hal_print_message(
766 ">>>>>> LOW LEVEL TIMEOUT "
767 "%x %x %x %x\n",
768 cy_as_hal_read_register(dev_p->tag,
769 CY_AS_MEM_MCU_MAILBOX0),
770 cy_as_hal_read_register(dev_p->tag,
771 CY_AS_MEM_MCU_MAILBOX1),
772 cy_as_hal_read_register(dev_p->tag,
773 CY_AS_MEM_MCU_MAILBOX2),
774 cy_as_hal_read_register(dev_p->tag,
775 CY_AS_MEM_MCU_MAILBOX3));
776 return CY_AS_ERROR_TIMEOUT;
777 }
778
779 if (dev_p->ll_abort_curr_rqt) {
780 dev_p->ll_sending_rqt = cy_false;
781 dev_p->ll_abort_curr_rqt = cy_false;
782 return CY_AS_ERROR_CANCELED;
783 }
784
785 int_stat = cy_as_hal_disable_interrupts();
786
787 /*
788 * check again whether the mailbox is free.
789 * it is possible that an ISR came in and
790 * wrote into the mailboxes since we last
791 * checked the status.
792 */
793 v = cy_as_hal_read_register(dev_p->tag,
794 CY_AS_MEM_MCU_MB_STAT) &
795 CY_AS_MEM_P0_MCU_MBNOTRD;
796 if (v) {
797 /* Go back to the original check since
798 * the mailbox is not free. */
799 cy_as_hal_enable_interrupts(int_stat);
800 continue;
801 }
802
803 if (which == 0) {
804 cy_as_hal_write_register(dev_p->tag,
805 CY_AS_MEM_MCU_MAILBOX1, length);
806 st = 2;
807 } else {
808 st = 1;
809 }
810
811 while ((which < length) && (st < 4)) {
812 cy_as_hal_write_register(dev_p->tag,
813 cy_cast_int2U_int16
814 (CY_AS_MEM_MCU_MAILBOX0 + st),
815 req_p->data[which++]);
816 st++;
817 }
818
819 mb0 = req_p->box0;
820 if (which == length) {
821 dev_p->ll_sending_rqt = cy_false;
822 mb0 |= CY_AS_REQUEST_RESPONSE_LAST_MASK;
823 }
824
825 if (dev_p->ll_abort_curr_rqt) {
826 dev_p->ll_sending_rqt = cy_false;
827 dev_p->ll_abort_curr_rqt = cy_false;
828 cy_as_hal_enable_interrupts(int_stat);
829 return CY_AS_ERROR_CANCELED;
830 }
831
832 cy_as_hal_write_register(dev_p->tag,
833 CY_AS_MEM_MCU_MAILBOX0, mb0);
834
835 /* Wait for the MBOX interrupt to be high */
836 cy_as_hal_sleep150();
837 cy_as_hal_enable_interrupts(int_stat);
838 }
839 } else {
840 check_mailbox_availability:
841 /*
842 * wait for the mailbox registers to become available. this
843 * should be a very quick wait as the firmware is designed
844 * to accept requests at interrupt time and queue them for
845 * future processing.
846 */
847 loopcount = cy_as_low_level_timeout_count;
848 do {
849 v = cy_as_read_intr_status(dev_p);
850
851 } while (v && loopcount-- > 0);
852
853 if (v) {
854 cy_as_hal_print_message(
855 ">>>>>> LOW LEVEL TIMEOUT %x %x %x %x\n",
856 cy_as_hal_read_register(dev_p->tag,
857 CY_AS_MEM_MCU_MAILBOX0),
858 cy_as_hal_read_register(dev_p->tag,
859 CY_AS_MEM_MCU_MAILBOX1),
860 cy_as_hal_read_register(dev_p->tag,
861 CY_AS_MEM_MCU_MAILBOX2),
862 cy_as_hal_read_register(dev_p->tag,
863 CY_AS_MEM_MCU_MAILBOX3));
864 return CY_AS_ERROR_TIMEOUT;
865 }
866
867 int_stat = cy_as_hal_disable_interrupts();
868
869 /*
870 * check again whether the mailbox is free. it is
871 * possible that an ISR came in and wrote into the
872 * mailboxes since we last checked the status.
873 */
874 v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) &
875 CY_AS_MEM_P0_MCU_MBNOTRD;
876 if (v) {
877 /* Go back to the original check
878 * since the mailbox is not free. */
879 cy_as_hal_enable_interrupts(int_stat);
880 goto check_mailbox_availability;
881 }
882
883 /* Write the data associated with the request
884 * into the mbox registers 1 - 3 */
885 v = 0;
886 for (i = req_p->length - 1; i >= 0; i--)
887 cy_as_hal_write_register(dev_p->tag,
888 cy_cast_int2U_int16(CY_AS_MEM_MCU_MAILBOX1 + i),
889 req_p->data[i]);
890
891 /* Write the mbox register 0 to trigger the interrupt */
892 cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0,
893 req_p->box0 | CY_AS_REQUEST_RESPONSE_LAST_MASK);
894
895 cy_as_hal_sleep150();
896 cy_as_hal_enable_interrupts(int_stat);
897 }
898
899 return CY_AS_ERROR_SUCCESS;
900 }
901
902 /*
903 * This function queues a single request to be sent to the firmware.
904 */
905 extern cy_as_return_status_t
906 cy_as_ll_send_request(
907 cy_as_device *dev_p,
908 /* The request to send */
909 cy_as_ll_request_response *req,
910 /* Storage for a reply, must be sure
911 * it is of sufficient size */
912 cy_as_ll_request_response *resp,
913 /* If true, this is a synchronous request */
914 cy_bool sync,
915 /* Callback to call when reply is received */
916 cy_as_response_callback cb
917 )
918 {
919 cy_as_context *ctxt_p;
920 uint16_t box0 = req->box0;
921 uint8_t context;
922 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
923 cy_as_ll_request_list_node *node_p;
924 uint32_t mask, state;
925
926 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
927
928 context = cy_as_mbox_get_context(box0);
929 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
930 ctxt_p = dev_p->context[context];
931
932 /* Allocate the list node */
933 state = cy_as_hal_disable_interrupts();
934 node_p = cy_as_hal_c_b_alloc(sizeof(cy_as_ll_request_list_node));
935 cy_as_hal_enable_interrupts(state);
936
937 if (node_p == 0)
938 return CY_AS_ERROR_OUT_OF_MEMORY;
939
940 /* Initialize the list node */
941 node_p->callback = cb;
942 node_p->length = 0;
943 node_p->next = 0;
944 node_p->resp = resp;
945 node_p->rqt = req;
946 node_p->state = CY_AS_REQUEST_LIST_STATE_QUEUED;
947 if (sync)
948 cy_as_request_node_set_sync(node_p);
949
950 /* Put the request into the queue */
951 mask = cy_as_hal_disable_interrupts();
952 if (ctxt_p->request_queue_p == 0) {
953 /* Empty queue */
954 ctxt_p->request_queue_p = node_p;
955 ctxt_p->last_node_p = node_p;
956 } else {
957 ctxt_p->last_node_p->next = node_p;
958 ctxt_p->last_node_p = node_p;
959 }
960 cy_as_hal_enable_interrupts(mask);
961 cy_as_ll_send_next_request(dev_p, ctxt_p);
962
963 if (!cy_as_device_is_in_callback(dev_p)) {
964 mask = cy_as_hal_disable_interrupts();
965 cy_as_mail_box_queued_data_handler(dev_p);
966 cy_as_hal_enable_interrupts(mask);
967 }
968
969 return ret;
970 }
971
972 static void
973 cy_as_ll_send_callback(
974 cy_as_device *dev_p,
975 uint8_t context,
976 cy_as_ll_request_response *rqt,
977 cy_as_ll_request_response *resp,
978 cy_as_return_status_t ret)
979 {
980 (void)rqt;
981 (void)resp;
982 (void)ret;
983
984
985 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
986
987 /*
988 * storage the state to return to the caller
989 */
990 dev_p->ll_error = ret;
991
992 /*
993 * now wake the caller
994 */
995 cy_as_hal_wake(&dev_p->context[context]->channel);
996 }
997
998 cy_as_return_status_t
999 cy_as_ll_send_request_wait_reply(
1000 cy_as_device *dev_p,
1001 /* The request to send */
1002 cy_as_ll_request_response *req,
1003 /* Storage for a reply, must be
1004 * sure it is of sufficient size */
1005 cy_as_ll_request_response *resp
1006 )
1007 {
1008 cy_as_return_status_t ret;
1009 uint8_t context;
1010 /* Larger 8 sec time-out to handle the init
1011 * delay for slower storage devices in USB FS. */
1012 uint32_t loopcount = 800;
1013 cy_as_context *ctxt_p;
1014
1015 /* Get the context for the request */
1016 context = cy_as_ll_request_response__get_context(req);
1017 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
1018 ctxt_p = dev_p->context[context];
1019
1020 ret = cy_as_ll_send_request(dev_p, req, resp,
1021 cy_true, cy_as_ll_send_callback);
1022 if (ret != CY_AS_ERROR_SUCCESS)
1023 return ret;
1024
1025 while (loopcount-- > 0) {
1026 /*
1027 * sleep while we wait on the response. receiving the reply will
1028 * wake this thread. we will wait, at most 2 seconds (10 ms*200
1029 * tries) before we timeout. note if the reply arrives, we will
1030 * not sleep the entire 10 ms, just til the reply arrives.
1031 */
1032 cy_as_hal_sleep_on(&ctxt_p->channel, 10);
1033
1034 /*
1035 * if the request has left the queue, it means the request has
1036 * been sent and the reply has been received. this means we can
1037 * return to the caller and be sure the reply has been received.
1038 */
1039 if (!cy_as_ll_is_in_queue(ctxt_p, req))
1040 return dev_p->ll_error;
1041 }
1042
1043 /* Remove the QueueListNode for this request. */
1044 cy_as_ll_remove_request(dev_p, ctxt_p, req, cy_true);
1045
1046 return CY_AS_ERROR_TIMEOUT;
1047 }
1048
1049 cy_as_return_status_t
1050 cy_as_ll_register_request_callback(
1051 cy_as_device *dev_p,
1052 uint8_t context,
1053 cy_as_response_callback cb)
1054 {
1055 cy_as_context *ctxt_p;
1056 cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT);
1057 ctxt_p = dev_p->context[context];
1058
1059 ctxt_p->request_callback = cb;
1060 return CY_AS_ERROR_SUCCESS;
1061 }
1062
1063 void
1064 cy_as_ll_request_response__pack(
1065 cy_as_ll_request_response *req_p,
1066 uint32_t offset,
1067 uint32_t length,
1068 void *data_p)
1069 {
1070 uint16_t dt;
1071 uint8_t *dp = (uint8_t *)data_p;
1072
1073 while (length > 1) {
1074 dt = ((*dp++) << 8);
1075 dt |= (*dp++);
1076 cy_as_ll_request_response__set_word(req_p, offset, dt);
1077 offset++;
1078 length -= 2;
1079 }
1080
1081 if (length == 1) {
1082 dt = (*dp << 8);
1083 cy_as_ll_request_response__set_word(req_p, offset, dt);
1084 }
1085 }
1086
1087 void
1088 cy_as_ll_request_response__unpack(
1089 cy_as_ll_request_response *req_p,
1090 uint32_t offset,
1091 uint32_t length,
1092 void *data_p)
1093 {
1094 uint8_t *dp = (uint8_t *)data_p;
1095
1096 while (length-- > 0) {
1097 uint16_t val = cy_as_ll_request_response__get_word
1098 (req_p, offset++);
1099 *dp++ = (uint8_t)((val >> 8) & 0xff);
1100
1101 if (length) {
1102 length--;
1103 *dp++ = (uint8_t)(val & 0xff);
1104 }
1105 }
1106 }
1107
1108 extern cy_as_return_status_t
1109 cy_as_ll_send_status_response(
1110 cy_as_device *dev_p,
1111 uint8_t context,
1112 uint16_t code,
1113 uint8_t clear_storage)
1114 {
1115 cy_as_return_status_t ret;
1116 cy_as_ll_request_response resp;
1117 cy_as_ll_request_response *resp_p = &resp;
1118
1119 cy_as_hal_mem_set(resp_p, 0, sizeof(resp));
1120 resp_p->length = 1;
1121 cy_as_ll_request_response__set_response(resp_p);
1122 cy_as_ll_request_response__set_context(resp_p, context);
1123
1124 if (clear_storage)
1125 cy_as_ll_request_response__set_clear_storage_flag(resp_p);
1126
1127 cy_as_ll_request_response__set_code(resp_p, CY_RESP_SUCCESS_FAILURE);
1128 cy_as_ll_request_response__set_word(resp_p, 0, code);
1129
1130 ret = cy_as_send_one(dev_p, resp_p);
1131
1132 return ret;
1133 }
1134
1135 extern cy_as_return_status_t
1136 cy_as_ll_send_data_response(
1137 cy_as_device *dev_p,
1138 uint8_t context,
1139 uint16_t code,
1140 uint16_t length,
1141 void *data)
1142 {
1143 cy_as_ll_request_response *resp_p;
1144 uint16_t wlen;
1145 uint8_t respbuf[256];
1146
1147 if (length > 192)
1148 return CY_AS_ERROR_INVALID_SIZE;
1149
1150 /* Word length for bytes */
1151 wlen = length / 2;
1152
1153 /* If byte length odd, add one more */
1154 if (length % 2)
1155 wlen++;
1156
1157 /* One for the length of field */
1158 wlen++;
1159
1160 resp_p = (cy_as_ll_request_response *)respbuf;
1161 cy_as_hal_mem_set(resp_p, 0, sizeof(respbuf));
1162 resp_p->length = wlen;
1163 cy_as_ll_request_response__set_context(resp_p, context);
1164 cy_as_ll_request_response__set_code(resp_p, code);
1165
1166 cy_as_ll_request_response__set_word(resp_p, 0, length);
1167 cy_as_ll_request_response__pack(resp_p, 1, length, data);
1168
1169 return cy_as_send_one(dev_p, resp_p);
1170 }
1171
1172 static cy_bool
1173 cy_as_ll_is_e_p_transfer_related_request(cy_as_ll_request_response *rqt_p,
1174 cy_as_end_point_number_t ep)
1175 {
1176 uint16_t v;
1177 uint8_t type = cy_as_ll_request_response__get_code(rqt_p);
1178
1179 if (cy_as_ll_request_response__get_context(rqt_p) !=
1180 CY_RQT_USB_RQT_CONTEXT)
1181 return cy_false;
1182
1183 /*
1184 * when cancelling outstanding EP0 data transfers, any pending
1185 * setup ACK requests also need to be cancelled.
1186 */
1187 if ((ep == 0) && (type == CY_RQT_ACK_SETUP_PACKET))
1188 return cy_true;
1189
1190 if (type != CY_RQT_USB_EP_DATA)
1191 return cy_false;
1192
1193 v = cy_as_ll_request_response__get_word(rqt_p, 0);
1194 if ((cy_as_end_point_number_t)((v >> 13) & 1) != ep)
1195 return cy_false;
1196
1197 return cy_true;
1198 }
1199
1200 cy_as_return_status_t
1201 cy_as_ll_remove_ep_data_requests(cy_as_device *dev_p,
1202 cy_as_end_point_number_t ep)
1203 {
1204 cy_as_context *ctxt_p;
1205 cy_as_ll_request_list_node *node_p;
1206 uint32_t imask;
1207
1208 /*
1209 * first, remove any queued requests
1210 */
1211 ctxt_p = dev_p->context[CY_RQT_USB_RQT_CONTEXT];
1212 if (ctxt_p) {
1213 for (node_p = ctxt_p->request_queue_p; node_p;
1214 node_p = node_p->next) {
1215 if (cy_as_ll_is_e_p_transfer_related_request
1216 (node_p->rqt, ep)) {
1217 cy_as_ll_remove_request(dev_p, ctxt_p,
1218 node_p->rqt, cy_false);
1219 break;
1220 }
1221 }
1222
1223 /*
1224 * now, deal with any request that may be in transit
1225 */
1226 imask = cy_as_hal_disable_interrupts();
1227
1228 if (ctxt_p->request_queue_p != 0 &&
1229 cy_as_ll_is_e_p_transfer_related_request
1230 (ctxt_p->request_queue_p->rqt, ep) &&
1231 cy_as_request_get_node_state(ctxt_p->request_queue_p) ==
1232 CY_AS_REQUEST_LIST_STATE_WAITING) {
1233 cy_as_hal_print_message("need to remove an in-transit "
1234 "request to antioch\n");
1235
1236 /*
1237 * if the request has not been fully sent to west bridge
1238 * yet, abort sending. otherwise, terminate the request
1239 * with a CANCELED status. firmware will already have
1240 * terminated this transfer.
1241 */
1242 if (dev_p->ll_sending_rqt)
1243 dev_p->ll_abort_curr_rqt = cy_true;
1244 else {
1245 uint32_t state;
1246
1247 node_p = ctxt_p->request_queue_p;
1248 if (node_p->callback)
1249 node_p->callback(dev_p, ctxt_p->number,
1250 node_p->rqt, node_p->resp,
1251 CY_AS_ERROR_CANCELED);
1252
1253 ctxt_p->request_queue_p = node_p->next;
1254 state = cy_as_hal_disable_interrupts();
1255 cy_as_hal_c_b_free(node_p);
1256 cy_as_hal_enable_interrupts(state);
1257 }
1258 }
1259
1260 cy_as_hal_enable_interrupts(imask);
1261 }
1262
1263 return CY_AS_ERROR_SUCCESS;
1264 }