]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/staging/ced1401/ced_ioc.c
staging: ced1401: rename members of struct dmadesc
[mirror_ubuntu-artful-kernel.git] / drivers / staging / ced1401 / ced_ioc.c
1 /* ced_ioc.c
2 ioctl part of the 1401 usb device driver for linux.
3 Copyright (C) 2010 Cambridge Electronic Design Ltd
4 Author Greg P Smith (greg@ced.co.uk)
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/module.h>
24 #include <linux/kref.h>
25 #include <linux/uaccess.h>
26 #include <linux/usb.h>
27 #include <linux/mutex.h>
28 #include <linux/page-flags.h>
29 #include <linux/pagemap.h>
30 #include <linux/jiffies.h>
31
32 #include "usb1401.h"
33
34 /****************************************************************************
35 ** ced_flush_out_buff
36 **
37 ** Empties the Output buffer and sets int lines. Used from user level only
38 ****************************************************************************/
39 static void ced_flush_out_buff(struct ced_data *ced)
40 {
41 dev_dbg(&ced->interface->dev, "%s: currentState=%d\n",
42 __func__, ced->sCurrentState);
43 if (ced->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
44 return;
45 /* Kill off any pending I/O */
46 /* CharSend_Cancel(ced); */
47 spin_lock_irq(&ced->charOutLock);
48 ced->dwNumOutput = 0;
49 ced->dwOutBuffGet = 0;
50 ced->dwOutBuffPut = 0;
51 spin_unlock_irq(&ced->charOutLock);
52 }
53
54 /****************************************************************************
55 **
56 ** ced_flush_in_buff
57 **
58 ** Empties the input buffer and sets int lines
59 ****************************************************************************/
60 static void ced_flush_in_buff(struct ced_data *ced)
61 {
62 dev_dbg(&ced->interface->dev, "%s: currentState=%d\n",
63 __func__, ced->sCurrentState);
64 if (ced->sCurrentState == U14ERR_TIME) /* Do nothing if hardware in trouble */
65 return;
66 /* Kill off any pending I/O */
67 /* CharRead_Cancel(pDevObject); */
68 spin_lock_irq(&ced->charInLock);
69 ced->dwNumInput = 0;
70 ced->dwInBuffGet = 0;
71 ced->dwInBuffPut = 0;
72 spin_unlock_irq(&ced->charInLock);
73 }
74
75 /****************************************************************************
76 ** ced_put_chars
77 **
78 ** Utility routine to copy chars into the output buffer and fire them off.
79 ** called from user mode, holds charOutLock.
80 ****************************************************************************/
81 static int ced_put_chars(struct ced_data *ced, const char *pCh,
82 unsigned int uCount)
83 {
84 int iReturn;
85 spin_lock_irq(&ced->charOutLock); /* get the output spin lock */
86 if ((OUTBUF_SZ - ced->dwNumOutput) >= uCount) {
87 unsigned int u;
88 for (u = 0; u < uCount; u++) {
89 ced->outputBuffer[ced->dwOutBuffPut++] = pCh[u];
90 if (ced->dwOutBuffPut >= OUTBUF_SZ)
91 ced->dwOutBuffPut = 0;
92 }
93 ced->dwNumOutput += uCount;
94 spin_unlock_irq(&ced->charOutLock);
95 iReturn = ced_send_chars(ced); /* ...give a chance to transmit data */
96 } else {
97 iReturn = U14ERR_NOOUT; /* no room at the out (ha-ha) */
98 spin_unlock_irq(&ced->charOutLock);
99 }
100 return iReturn;
101 }
102
103 /*****************************************************************************
104 ** Add the data in pData (local pointer) of length n to the output buffer, and
105 ** trigger an output transfer if this is appropriate. User mode.
106 ** Holds the io_mutex
107 *****************************************************************************/
108 int ced_send_string(struct ced_data *ced, const char __user *pData,
109 unsigned int n)
110 {
111 int iReturn = U14ERR_NOERROR; /* assume all will be well */
112 char buffer[OUTBUF_SZ + 1]; /* space in our address space for characters */
113 if (n > OUTBUF_SZ) /* check space in local buffer... */
114 return U14ERR_NOOUT; /* ...too many characters */
115 if (copy_from_user(buffer, pData, n))
116 return -EFAULT;
117 buffer[n] = 0; /* terminate for debug purposes */
118
119 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
120 if (n > 0) { /* do nothing if nowt to do! */
121 dev_dbg(&ced->interface->dev, "%s: n=%d>%s<\n",
122 __func__, n, buffer);
123 iReturn = ced_put_chars(ced, buffer, n);
124 }
125
126 ced_allowi(ced); /* make sure we have input int */
127 mutex_unlock(&ced->io_mutex);
128
129 return iReturn;
130 }
131
132 /****************************************************************************
133 ** ced_send_char
134 **
135 ** Sends a single character to the 1401. User mode, holds io_mutex.
136 ****************************************************************************/
137 int ced_send_char(struct ced_data *ced, char c)
138 {
139 int iReturn;
140 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
141 iReturn = ced_put_chars(ced, &c, 1);
142 dev_dbg(&ced->interface->dev, "ced_send_char >%c< (0x%02x)\n", c, c);
143 ced_allowi(ced); /* Make sure char reads are running */
144 mutex_unlock(&ced->io_mutex);
145 return iReturn;
146 }
147
148 /***************************************************************************
149 **
150 ** ced_get_state
151 **
152 ** Retrieves state information from the 1401, adjusts the 1401 state held
153 ** in the device extension to indicate the current 1401 type.
154 **
155 ** *state is updated with information about the 1401 state as returned by the
156 ** 1401. The low byte is a code for what 1401 is doing:
157 **
158 ** 0 normal 1401 operation
159 ** 1 sending chars to host
160 ** 2 sending block data to host
161 ** 3 reading block data from host
162 ** 4 sending an escape sequence to the host
163 ** 0x80 1401 is executing self-test, in which case the upper word
164 ** is the last error code seen (or zero for no new error).
165 **
166 ** *error is updated with error information if a self-test error code
167 ** is returned in the upper word of state.
168 **
169 ** both state and error are set to -1 if there are comms problems, and
170 ** to zero if there is a simple failure.
171 **
172 ** return error code (U14ERR_NOERROR for OK)
173 */
174 int ced_get_state(struct ced_data *ced, __u32 *state, __u32 *error)
175 {
176 int nGot;
177 dev_dbg(&ced->interface->dev, "%s: entry\n", __func__);
178
179 *state = 0xFFFFFFFF; /* Start off with invalid state */
180 nGot = usb_control_msg(ced->udev, usb_rcvctrlpipe(ced->udev, 0),
181 GET_STATUS, (D_TO_H | VENDOR | DEVREQ), 0, 0,
182 ced->statBuf, sizeof(ced->statBuf), HZ);
183 if (nGot != sizeof(ced->statBuf)) {
184 dev_err(&ced->interface->dev,
185 "%s: FAILED, return code %d\n", __func__, nGot);
186 ced->sCurrentState = U14ERR_TIME; /* Indicate that things are very wrong indeed */
187 *state = 0; /* Force status values to a known state */
188 *error = 0;
189 } else {
190 int nDevice;
191 dev_dbg(&ced->interface->dev,
192 "%s: Success, state: 0x%x, 0x%x\n",
193 __func__, ced->statBuf[0], ced->statBuf[1]);
194
195 *state = ced->statBuf[0]; /* Return the state values to the calling code */
196 *error = ced->statBuf[1];
197
198 nDevice = ced->udev->descriptor.bcdDevice >> 8; /* 1401 type code value */
199 switch (nDevice) { /* so we can clean up current state */
200 case 0:
201 ced->sCurrentState = U14ERR_U1401;
202 break;
203
204 default: /* allow lots of device codes for future 1401s */
205 if ((nDevice >= 1) && (nDevice <= 23))
206 ced->sCurrentState = (short)(nDevice + 6);
207 else
208 ced->sCurrentState = U14ERR_ILL;
209 break;
210 }
211 }
212
213 return ced->sCurrentState >= 0 ? U14ERR_NOERROR : ced->sCurrentState;
214 }
215
216 /****************************************************************************
217 ** ced_read_write_cancel
218 **
219 ** Kills off staged read\write request from the USB if one is pending.
220 ****************************************************************************/
221 int ced_read_write_cancel(struct ced_data *ced)
222 {
223 dev_dbg(&ced->interface->dev, "%s: entry %d\n",
224 __func__, ced->bStagedUrbPending);
225 #ifdef NOT_WRITTEN_YET
226 int ntStatus = STATUS_SUCCESS;
227 bool bResult = false;
228 unsigned int i;
229 /* We can fill this in when we know how we will implement the staged transfer stuff */
230 spin_lock_irq(&ced->stagedLock);
231
232 if (ced->bStagedUrbPending) { /* anything to be cancelled? May need more... */
233 dev_info(&ced->interface - dev,
234 "ced_read_write_cancel about to cancel Urb\n");
235 /* Clear the staging done flag */
236 /* KeClearEvent(&ced->StagingDoneEvent); */
237 USB_ASSERT(ced->pStagedIrp != NULL);
238
239 /* Release the spinlock first otherwise the completion routine may hang */
240 /* on the spinlock while this function hands waiting for the event. */
241 spin_unlock_irq(&ced->stagedLock);
242 bResult = IoCancelIrp(ced->pStagedIrp); /* Actually do the cancel */
243 if (bResult) {
244 LARGE_INTEGER timeout;
245 timeout.QuadPart = -10000000; /* Use a timeout of 1 second */
246 dev_info(&ced->interface - dev,
247 "%s: about to wait till done\n", __func__);
248 ntStatus =
249 KeWaitForSingleObject(&ced->StagingDoneEvent,
250 Executive, KernelMode, FALSE,
251 &timeout);
252 } else {
253 dev_info(&ced->interface - dev,
254 "%s: cancellation failed\n", __func__);
255 ntStatus = U14ERR_FAIL;
256 }
257 USB_KdPrint(DBGLVL_DEFAULT,
258 ("ced_read_write_cancel ntStatus = 0x%x decimal %d\n",
259 ntStatus, ntStatus));
260 } else
261 spin_unlock_irq(&ced->stagedLock);
262
263 dev_info(&ced->interface - dev, "%s: done\n", __func__);
264 return ntStatus;
265 #else
266 return U14ERR_NOERROR;
267 #endif
268
269 }
270
271 /***************************************************************************
272 ** ced_in_self_test - utility to check in self test. Return 1 for ST, 0 for not or
273 ** a -ve error code if we failed for some reason.
274 ***************************************************************************/
275 static int ced_in_self_test(struct ced_data *ced, unsigned int *pState)
276 {
277 unsigned int state, error;
278 int iReturn = ced_get_state(ced, &state, &error); /* see if in self-test */
279 if (iReturn == U14ERR_NOERROR) /* if all still OK */
280 iReturn = (state == (unsigned int)-1) || /* TX problem or... */
281 ((state & 0xff) == 0x80); /* ...self test */
282 *pState = state; /* return actual state */
283 return iReturn;
284 }
285
286 /***************************************************************************
287 ** ced_is_1401 - ALWAYS CALLED HOLDING THE io_mutex
288 **
289 ** Tests for the current state of the 1401. Sets sCurrentState:
290 **
291 ** U14ERR_NOIF 1401 i/f card not installed (not done here)
292 ** U14ERR_OFF 1401 apparently not switched on
293 ** U14ERR_NC 1401 appears to be not connected
294 ** U14ERR_ILL 1401 if it is there its not very well at all
295 ** U14ERR_TIME 1401 appears OK, but doesn't communicate - very bad
296 ** U14ERR_STD 1401 OK and ready for use
297 ** U14ERR_PLUS 1401+ OK and ready for use
298 ** U14ERR_U1401 Micro1401 OK and ready for use
299 ** U14ERR_POWER Power1401 OK and ready for use
300 ** U14ERR_U14012 Micro1401 mkII OK and ready for use
301 **
302 ** Returns TRUE if a 1401 detected and OK, else FALSE
303 ****************************************************************************/
304 static bool ced_is_1401(struct ced_data *ced)
305 {
306 int iReturn;
307 dev_dbg(&ced->interface->dev, "%s\n", __func__);
308
309 ced_draw_down(ced); /* wait for, then kill outstanding Urbs */
310 ced_flush_in_buff(ced); /* Clear out input buffer & pipe */
311 ced_flush_out_buff(ced); /* Clear output buffer & pipe */
312
313 /* The next call returns 0 if OK, but has returned 1 in the past, meaning that */
314 /* usb_unlock_device() is needed... now it always is */
315 iReturn = usb_lock_device_for_reset(ced->udev, ced->interface);
316
317 /* release the io_mutex because if we don't, we will deadlock due to system */
318 /* calls back into the driver. */
319 mutex_unlock(&ced->io_mutex); /* locked, so we will not get system calls */
320 if (iReturn >= 0) { /* if we failed */
321 iReturn = usb_reset_device(ced->udev); /* try to do the reset */
322 usb_unlock_device(ced->udev); /* undo the lock */
323 }
324
325 mutex_lock(&ced->io_mutex); /* hold stuff off while we wait */
326 ced->dwDMAFlag = MODE_CHAR; /* Clear DMA mode flag regardless! */
327 if (iReturn == 0) { /* if all is OK still */
328 unsigned int state;
329 iReturn = ced_in_self_test(ced, &state); /* see if likely in self test */
330 if (iReturn > 0) { /* do we need to wait for self-test? */
331 unsigned long ulTimeOut = jiffies + 30 * HZ; /* when to give up */
332 while ((iReturn > 0) && time_before(jiffies, ulTimeOut)) {
333 schedule(); /* let other stuff run */
334 iReturn = ced_in_self_test(ced, &state); /* see if done yet */
335 }
336 }
337
338 if (iReturn == 0) /* if all is OK... */
339 iReturn = state == 0; /* then success is that the state is 0 */
340 } else
341 iReturn = 0; /* we failed */
342 ced->bForceReset = false; /* Clear forced reset flag now */
343
344 return iReturn > 0;
345 }
346
347 /****************************************************************************
348 ** ced_quick_check - ALWAYS CALLED HOLDING THE io_mutex
349 ** This is used to test for a 1401. It will try to do a quick check if all is
350 ** OK, that is the 1401 was OK the last time it was asked, and there is no DMA
351 ** in progress, and if the bTestBuff flag is set, the character buffers must be
352 ** empty too. If the quick check shows that the state is still the same, then
353 ** all is OK.
354 **
355 ** If any of the above conditions are not met, or if the state or type of the
356 ** 1401 has changed since the previous test, the full ced_is_1401 test is done, but
357 ** only if bCanReset is also TRUE.
358 **
359 ** The return value is TRUE if a useable 1401 is found, FALSE if not
360 */
361 static bool ced_quick_check(struct ced_data *ced, bool bTestBuff, bool bCanReset)
362 {
363 bool bRet = false; /* assume it will fail and we will reset */
364 bool bShortTest;
365
366 bShortTest = ((ced->dwDMAFlag == MODE_CHAR) && /* no DMA running */
367 (!ced->bForceReset) && /* Not had a real reset forced */
368 (ced->sCurrentState >= U14ERR_STD)); /* No 1401 errors stored */
369
370 dev_dbg(&ced->interface->dev,
371 "%s: DMAFlag:%d, state:%d, force:%d, testBuff:%d, short:%d\n",
372 __func__, ced->dwDMAFlag, ced->sCurrentState, ced->bForceReset,
373 bTestBuff, bShortTest);
374
375 if ((bTestBuff) && /* Buffer check requested, and... */
376 (ced->dwNumInput || ced->dwNumOutput)) { /* ...characters were in the buffer? */
377 bShortTest = false; /* Then do the full test */
378 dev_dbg(&ced->interface->dev,
379 "%s: will reset as buffers not empty\n", __func__);
380 }
381
382 if (bShortTest || !bCanReset) { /* Still OK to try the short test? */
383 /* Always test if no reset - we want state update */
384 unsigned int state, error;
385 dev_dbg(&ced->interface->dev, "%s: ced_get_state\n", __func__);
386 if (ced_get_state(ced, &state, &error) == U14ERR_NOERROR) { /* Check on the 1401 state */
387 if ((state & 0xFF) == 0) /* If call worked, check the status value */
388 bRet = true; /* If that was zero, all is OK, no reset needed */
389 }
390 }
391
392 if (!bRet && bCanReset) { /* If all not OK, then */
393 dev_info(&ced->interface->dev, "%s: ced_is_1401 %d %d %d %d\n",
394 __func__, bShortTest, ced->sCurrentState, bTestBuff,
395 ced->bForceReset);
396 bRet = ced_is_1401(ced); /* do full test */
397 }
398
399 return bRet;
400 }
401
402 /****************************************************************************
403 ** ced_reset
404 **
405 ** Resets the 1401 and empties the i/o buffers
406 *****************************************************************************/
407 int ced_reset(struct ced_data *ced)
408 {
409 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
410 dev_dbg(&ced->interface->dev, "%s: About to call ced_quick_check\n",
411 __func__);
412 ced_quick_check(ced, true, true); /* Check 1401, reset if not OK */
413 mutex_unlock(&ced->io_mutex);
414 return U14ERR_NOERROR;
415 }
416
417 /****************************************************************************
418 ** ced_get_char
419 **
420 ** Gets a single character from the 1401
421 ****************************************************************************/
422 int ced_get_char(struct ced_data *ced)
423 {
424 int iReturn = U14ERR_NOIN; /* assume we will get nothing */
425 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
426
427 dev_dbg(&ced->interface->dev, "%s\n", __func__);
428
429 ced_allowi(ced); /* Make sure char reads are running */
430 ced_send_chars(ced); /* and send any buffered chars */
431
432 spin_lock_irq(&ced->charInLock);
433 if (ced->dwNumInput > 0) { /* worth looking */
434 iReturn = ced->inputBuffer[ced->dwInBuffGet++];
435 if (ced->dwInBuffGet >= INBUF_SZ)
436 ced->dwInBuffGet = 0;
437 ced->dwNumInput--;
438 } else
439 iReturn = U14ERR_NOIN; /* no input data to read */
440 spin_unlock_irq(&ced->charInLock);
441
442 ced_allowi(ced); /* Make sure char reads are running */
443
444 mutex_unlock(&ced->io_mutex); /* Protect disconnect from new i/o */
445 return iReturn;
446 }
447
448 /****************************************************************************
449 ** ced_get_string
450 **
451 ** Gets a string from the 1401. Returns chars up to the next CR or when
452 ** there are no more to read or nowhere to put them. CR is translated to
453 ** 0 and counted as a character. If the string does not end in a 0, we will
454 ** add one, if there is room, but it is not counted as a character.
455 **
456 ** returns the count of characters (including the terminator, or 0 if none
457 ** or a negative error code.
458 ****************************************************************************/
459 int ced_get_string(struct ced_data *ced, char __user *pUser, int n)
460 {
461 int nAvailable; /* character in the buffer */
462 int iReturn = U14ERR_NOIN;
463 if (n <= 0)
464 return -ENOMEM;
465
466 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
467 ced_allowi(ced); /* Make sure char reads are running */
468 ced_send_chars(ced); /* and send any buffered chars */
469
470 spin_lock_irq(&ced->charInLock);
471 nAvailable = ced->dwNumInput; /* characters available now */
472 if (nAvailable > n) /* read max of space in pUser... */
473 nAvailable = n; /* ...or input characters */
474
475 if (nAvailable > 0) { /* worth looking? */
476 char buffer[INBUF_SZ + 1]; /* space for a linear copy of data */
477 int nGot = 0;
478 int nCopyToUser; /* number to copy to user */
479 char cData;
480 do {
481 cData = ced->inputBuffer[ced->dwInBuffGet++];
482 if (cData == CR_CHAR) /* replace CR with zero */
483 cData = (char)0;
484
485 if (ced->dwInBuffGet >= INBUF_SZ)
486 ced->dwInBuffGet = 0; /* wrap buffer pointer */
487
488 buffer[nGot++] = cData; /* save the output */
489 } while ((nGot < nAvailable) && cData);
490
491 nCopyToUser = nGot; /* what to copy... */
492 if (cData) { /* do we need null */
493 buffer[nGot] = (char)0; /* make it tidy */
494 if (nGot < n) /* if space in user buffer... */
495 ++nCopyToUser; /* ...copy the 0 as well. */
496 }
497
498 ced->dwNumInput -= nGot;
499 spin_unlock_irq(&ced->charInLock);
500
501 dev_dbg(&ced->interface->dev, "%s: read %d characters >%s<\n",
502 __func__, nGot, buffer);
503 if (copy_to_user(pUser, buffer, nCopyToUser))
504 iReturn = -EFAULT;
505 else
506 iReturn = nGot; /* report characters read */
507 } else
508 spin_unlock_irq(&ced->charInLock);
509
510 ced_allowi(ced); /* Make sure char reads are running */
511 mutex_unlock(&ced->io_mutex); /* Protect disconnect from new i/o */
512
513 return iReturn;
514 }
515
516 /*******************************************************************************
517 ** Get count of characters in the inout buffer.
518 *******************************************************************************/
519 int ced_stat_1401(struct ced_data *ced)
520 {
521 int iReturn;
522 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
523 ced_allowi(ced); /* make sure we allow pending chars */
524 ced_send_chars(ced); /* in both directions */
525 iReturn = ced->dwNumInput; /* no lock as single read */
526 mutex_unlock(&ced->io_mutex); /* Protect disconnect from new i/o */
527 return iReturn;
528 }
529
530 /****************************************************************************
531 ** ced_line_count
532 **
533 ** Returns the number of newline chars in the buffer. There is no need for
534 ** any fancy interlocks as we only read the interrupt routine data, and the
535 ** system is arranged so nothing can be destroyed.
536 ****************************************************************************/
537 int ced_line_count(struct ced_data *ced)
538 {
539 int iReturn = 0; /* will be count of line ends */
540
541 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
542 ced_allowi(ced); /* Make sure char reads are running */
543 ced_send_chars(ced); /* and send any buffered chars */
544 spin_lock_irq(&ced->charInLock); /* Get protection */
545
546 if (ced->dwNumInput > 0) { /* worth looking? */
547 unsigned int dwIndex = ced->dwInBuffGet; /* start at first available */
548 unsigned int dwEnd = ced->dwInBuffPut; /* Position for search end */
549 do {
550 if (ced->inputBuffer[dwIndex++] == CR_CHAR)
551 ++iReturn; /* inc count if CR */
552
553 if (dwIndex >= INBUF_SZ) /* see if we fall off buff */
554 dwIndex = 0;
555 } while (dwIndex != dwEnd); /* go to last available */
556 }
557
558 spin_unlock_irq(&ced->charInLock);
559 dev_dbg(&ced->interface->dev, "%s: returned %d\n", __func__, iReturn);
560 mutex_unlock(&ced->io_mutex); /* Protect disconnect from new i/o */
561 return iReturn;
562 }
563
564 /****************************************************************************
565 ** ced_get_out_buf_space
566 **
567 ** Gets the space in the output buffer. Called from user code.
568 *****************************************************************************/
569 int ced_get_out_buf_space(struct ced_data *ced)
570 {
571 int iReturn;
572 mutex_lock(&ced->io_mutex); /* Protect disconnect from new i/o */
573 ced_send_chars(ced); /* send any buffered chars */
574 iReturn = (int)(OUTBUF_SZ - ced->dwNumOutput); /* no lock needed for single read */
575 dev_dbg(&ced->interface->dev, "%s: %d\n", __func__, iReturn);
576 mutex_unlock(&ced->io_mutex); /* Protect disconnect from new i/o */
577 return iReturn;
578 }
579
580 /****************************************************************************
581 **
582 ** ced_clear_area
583 **
584 ** Clears up a transfer area. This is always called in the context of a user
585 ** request, never from a call-back.
586 ****************************************************************************/
587 int ced_clear_area(struct ced_data *ced, int nArea)
588 {
589 int iReturn = U14ERR_NOERROR;
590
591 if ((nArea < 0) || (nArea >= MAX_TRANSAREAS)) {
592 iReturn = U14ERR_BADAREA;
593 dev_err(&ced->interface->dev, "%s: Attempt to clear area %d\n",
594 __func__, nArea);
595 } else {
596 /* to save typing */
597 struct transarea *pTA = &ced->rTransDef[nArea];
598 if (!pTA->used) /* if not used... */
599 iReturn = U14ERR_NOTSET; /* ...nothing to be done */
600 else {
601 /* We must save the memory we return as we shouldn't mess with memory while */
602 /* holding a spin lock. */
603 struct page **pPages = NULL; /*save page address list*/
604 int nPages = 0; /* and number of pages */
605 int np;
606
607 dev_dbg(&ced->interface->dev, "%s: area %d\n",
608 __func__, nArea);
609 spin_lock_irq(&ced->stagedLock);
610 if ((ced->StagedId == nArea)
611 && (ced->dwDMAFlag > MODE_CHAR)) {
612 iReturn = U14ERR_UNLOCKFAIL; /* cannot delete as in use */
613 dev_err(&ced->interface->dev,
614 "%s: call on area %d while active\n",
615 __func__, nArea);
616 } else {
617 pPages = pTA->pages; /* save page address list */
618 nPages = pTA->n_pages; /* and page count */
619 if (pTA->event_sz) /* if events flagging in use */
620 wake_up_interruptible(&pTA->event); /* release anything that was waiting */
621
622 if (ced->bXFerWaiting
623 && (ced->rDMAInfo.ident == nArea))
624 ced->bXFerWaiting = false; /* Cannot have pending xfer if area cleared */
625
626 /* Clean out the struct transarea except for the wait queue, which is at the end */
627 /* This sets used to false and event_sz to 0 to say area not used and no events. */
628 memset(pTA, 0,
629 sizeof(struct transarea) -
630 sizeof(wait_queue_head_t));
631 }
632 spin_unlock_irq(&ced->stagedLock);
633
634 if (pPages) { /* if we decided to release the memory */
635 /* Now we must undo the pinning down of the pages. We will assume the worst and mark */
636 /* all the pages as dirty. Don't be tempted to move this up above as you must not be */
637 /* holding a spin lock to do this stuff as it is not atomic. */
638 dev_dbg(&ced->interface->dev, "%s: nPages=%d\n",
639 __func__, nPages);
640
641 for (np = 0; np < nPages; ++np) {
642 if (pPages[np]) {
643 SetPageDirty(pPages[np]);
644 page_cache_release(pPages[np]);
645 }
646 }
647
648 kfree(pPages);
649 dev_dbg(&ced->interface->dev,
650 "%s: kfree(pPages) done\n", __func__);
651 }
652 }
653 }
654
655 return iReturn;
656 }
657
658 /****************************************************************************
659 ** ced_set_area
660 **
661 ** Sets up a transfer area - the functional part. Called by both
662 ** ced_set_transfer and ced_set_circular.
663 ****************************************************************************/
664 static int ced_set_area(struct ced_data *ced, int nArea, char __user *puBuf,
665 unsigned int dwLength, bool bCircular, bool bCircToHost)
666 {
667 /* Start by working out the page aligned start of the area and the size */
668 /* of the area in pages, allowing for the start not being aligned and the */
669 /* end needing to be rounded up to a page boundary. */
670 unsigned long ulStart = ((unsigned long)puBuf) & PAGE_MASK;
671 unsigned int ulOffset = ((unsigned long)puBuf) & (PAGE_SIZE - 1);
672 int len = (dwLength + ulOffset + PAGE_SIZE - 1) >> PAGE_SHIFT;
673
674 struct transarea *pTA = &ced->rTransDef[nArea]; /* to save typing */
675 struct page **pPages = NULL; /* space for page tables */
676 int nPages = 0; /* and number of pages */
677
678 int iReturn = ced_clear_area(ced, nArea); /* see if OK to use this area */
679 if ((iReturn != U14ERR_NOTSET) && /* if not area unused and... */
680 (iReturn != U14ERR_NOERROR)) /* ...not all OK, then... */
681 return iReturn; /* ...we cannot use this area */
682
683 if (!access_ok(VERIFY_WRITE, puBuf, dwLength)) /* if we cannot access the memory... */
684 return -EFAULT; /* ...then we are done */
685
686 /* Now allocate space to hold the page pointer and virtual address pointer tables */
687 pPages = kmalloc(len * sizeof(struct page *), GFP_KERNEL);
688 if (!pPages) {
689 iReturn = U14ERR_NOMEMORY;
690 goto error;
691 }
692 dev_dbg(&ced->interface->dev, "%s: %p, length=%06x, circular %d\n",
693 __func__, puBuf, dwLength, bCircular);
694
695 /* To pin down user pages we must first acquire the mapping semaphore. */
696 nPages = get_user_pages_fast(ulStart, len, 1, pPages);
697 dev_dbg(&ced->interface->dev, "%s: nPages = %d\n", __func__, nPages);
698
699 if (nPages > 0) { /* if we succeeded */
700 /* If you are tempted to use page_address (form LDD3), forget it. You MUST use */
701 /* kmap() or kmap_atomic() to get a virtual address. page_address will give you */
702 /* (null) or at least it does in this context with an x86 machine. */
703 spin_lock_irq(&ced->stagedLock);
704 pTA->buff = puBuf; /* keep start of region (user address) */
705 pTA->base_offset = ulOffset; /* save offset in first page to start of xfer */
706 pTA->length = dwLength; /* Size if the region in bytes */
707 pTA->pages = pPages; /* list of pages that are used by buffer */
708 pTA->n_pages = nPages; /* number of pages */
709
710 pTA->circular = bCircular;
711 pTA->circ_to_host = bCircToHost;
712
713 pTA->blocks[0].offset = 0;
714 pTA->blocks[0].size = 0;
715 pTA->blocks[1].offset = 0;
716 pTA->blocks[1].size = 0;
717 pTA->used = true; /* This is now a used block */
718
719 spin_unlock_irq(&ced->stagedLock);
720 iReturn = U14ERR_NOERROR; /* say all was well */
721 } else {
722 iReturn = U14ERR_LOCKFAIL;
723 goto error;
724 }
725
726 return iReturn;
727
728 error:
729 kfree(pPages);
730 return iReturn;
731 }
732
733 /****************************************************************************
734 ** ced_set_transfer
735 **
736 ** Sets up a transfer area record. If the area is already set, we attempt to
737 ** unset it. Unsetting will fail if the area is booked, and a transfer to that
738 ** area is in progress. Otherwise, we will release the area and re-assign it.
739 ****************************************************************************/
740 int ced_set_transfer(struct ced_data *ced, struct transfer_area_desc __user *pTD)
741 {
742 int iReturn;
743 struct transfer_area_desc td;
744
745 if (copy_from_user(&td, pTD, sizeof(td)))
746 return -EFAULT;
747
748 mutex_lock(&ced->io_mutex);
749 dev_dbg(&ced->interface->dev, "%s: area:%d, size:%08x\n",
750 __func__, td.wAreaNum, td.dwLength);
751 /* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
752 /* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
753 /* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
754 iReturn =
755 ced_set_area(ced, td.wAreaNum,
756 (char __user *)((unsigned long)td.lpvBuff), td.dwLength,
757 false, false);
758 mutex_unlock(&ced->io_mutex);
759 return iReturn;
760 }
761
762 /****************************************************************************
763 ** UnSetTransfer
764 ** Erases a transfer area record
765 ****************************************************************************/
766 int ced_unset_transfer(struct ced_data *ced, int nArea)
767 {
768 int iReturn;
769 mutex_lock(&ced->io_mutex);
770 iReturn = ced_clear_area(ced, nArea);
771 mutex_unlock(&ced->io_mutex);
772 return iReturn;
773 }
774
775 /****************************************************************************
776 ** ced_set_event
777 ** Creates an event that we can test for based on a transfer to/from an area.
778 ** The area must be setup for a transfer. We attempt to simulate the Windows
779 ** driver behavior for events (as we don't actually use them), which is to
780 ** pretend that whatever the user asked for was achieved, so we return 1 if
781 ** try to create one, and 0 if they ask to remove (assuming all else was OK).
782 ****************************************************************************/
783 int ced_set_event(struct ced_data *ced, struct transfer_event __user *pTE)
784 {
785 int iReturn = U14ERR_NOERROR;
786 struct transfer_event te;
787
788 /* get a local copy of the data */
789 if (copy_from_user(&te, pTE, sizeof(te)))
790 return -EFAULT;
791
792 if (te.wAreaNum >= MAX_TRANSAREAS) /* the area must exist */
793 return U14ERR_BADAREA;
794 else {
795 struct transarea *pTA = &ced->rTransDef[te.wAreaNum];
796 mutex_lock(&ced->io_mutex); /* make sure we have no competitor */
797 spin_lock_irq(&ced->stagedLock);
798 if (pTA->used) { /* area must be in use */
799 pTA->event_st = te.dwStart; /* set area regions */
800 pTA->event_sz = te.dwLength; /* set size (0 cancels it) */
801 pTA->event_to_host = te.wFlags & 1; /* set the direction */
802 pTA->wake_up = 0; /* zero the wake up count */
803 } else
804 iReturn = U14ERR_NOTSET;
805 spin_unlock_irq(&ced->stagedLock);
806 mutex_unlock(&ced->io_mutex);
807 }
808 return iReturn ==
809 U14ERR_NOERROR ? (te.iSetEvent ? 1 : U14ERR_NOERROR) : iReturn;
810 }
811
812 /****************************************************************************
813 ** ced_wait_event
814 ** Sleep the process with a timeout waiting for an event. Returns the number
815 ** of times that a block met the event condition since we last cleared it or
816 ** 0 if timed out, or -ve error (bad area or not set, or signal).
817 ****************************************************************************/
818 int ced_wait_event(struct ced_data *ced, int nArea, int msTimeOut)
819 {
820 int iReturn;
821 if ((unsigned)nArea >= MAX_TRANSAREAS)
822 return U14ERR_BADAREA;
823 else {
824 int iWait;
825 struct transarea *pTA = &ced->rTransDef[nArea];
826 msTimeOut = (msTimeOut * HZ + 999) / 1000; /* convert timeout to jiffies */
827
828 /* We cannot wait holding the mutex, but we check the flags while holding */
829 /* it. This may well be pointless as another thread could get in between */
830 /* releasing it and the wait call. However, this would have to clear the */
831 /* iWakeUp flag. However, the !pTA-bUsed may help us in this case. */
832 mutex_lock(&ced->io_mutex); /* make sure we have no competitor */
833 if (!pTA->used || !pTA->event_sz) /* check something to wait for... */
834 return U14ERR_NOTSET; /* ...else we do nothing */
835 mutex_unlock(&ced->io_mutex);
836
837 if (msTimeOut)
838 iWait =
839 wait_event_interruptible_timeout(pTA->event,
840 pTA->wake_up
841 || !pTA->used,
842 msTimeOut);
843 else
844 iWait =
845 wait_event_interruptible(pTA->event, pTA->wake_up
846 || !pTA->used);
847 if (iWait)
848 iReturn = -ERESTARTSYS; /* oops - we have had a SIGNAL */
849 else
850 iReturn = pTA->wake_up; /* else the wakeup count */
851
852 spin_lock_irq(&ced->stagedLock);
853 pTA->wake_up = 0; /* clear the flag */
854 spin_unlock_irq(&ced->stagedLock);
855 }
856 return iReturn;
857 }
858
859 /****************************************************************************
860 ** ced_test_event
861 ** Test the event to see if a ced_wait_event would return immediately. Returns the
862 ** number of times a block completed since the last call, or 0 if none or a
863 ** negative error.
864 ****************************************************************************/
865 int ced_test_event(struct ced_data *ced, int nArea)
866 {
867 int iReturn;
868 if ((unsigned)nArea >= MAX_TRANSAREAS)
869 iReturn = U14ERR_BADAREA;
870 else {
871 struct transarea *pTA = &ced->rTransDef[nArea];
872 mutex_lock(&ced->io_mutex); /* make sure we have no competitor */
873 spin_lock_irq(&ced->stagedLock);
874 iReturn = pTA->wake_up; /* get wakeup count since last call */
875 pTA->wake_up = 0; /* clear the count */
876 spin_unlock_irq(&ced->stagedLock);
877 mutex_unlock(&ced->io_mutex);
878 }
879 return iReturn;
880 }
881
882 /****************************************************************************
883 ** ced_get_transferInfo
884 ** Puts the current state of the 1401 in a TGET_TX_BLOCK.
885 *****************************************************************************/
886 int ced_get_transfer(struct ced_data *ced, TGET_TX_BLOCK __user *pTX)
887 {
888 int iReturn = U14ERR_NOERROR;
889 unsigned int dwIdent;
890
891 mutex_lock(&ced->io_mutex);
892 dwIdent = ced->StagedId; /* area ident for last xfer */
893 if (dwIdent >= MAX_TRANSAREAS)
894 iReturn = U14ERR_BADAREA;
895 else {
896 /* Return the best information we have - we don't have physical addresses */
897 TGET_TX_BLOCK *tx;
898
899 tx = kzalloc(sizeof(*tx), GFP_KERNEL);
900 if (!tx) {
901 mutex_unlock(&ced->io_mutex);
902 return -ENOMEM;
903 }
904 tx->size = ced->rTransDef[dwIdent].length;
905 tx->linear = (long long)((long)ced->rTransDef[dwIdent].buff);
906 tx->avail = GET_TX_MAXENTRIES; /* how many blocks we could return */
907 tx->used = 1; /* number we actually return */
908 tx->entries[0].physical =
909 (long long)(tx->linear + ced->StagedOffset);
910 tx->entries[0].size = tx->size;
911
912 if (copy_to_user(pTX, tx, sizeof(*tx)))
913 iReturn = -EFAULT;
914 kfree(tx);
915 }
916 mutex_unlock(&ced->io_mutex);
917 return iReturn;
918 }
919
920 /****************************************************************************
921 ** ced_kill_io
922 **
923 ** Empties the host i/o buffers
924 ****************************************************************************/
925 int ced_kill_io(struct ced_data *ced)
926 {
927 dev_dbg(&ced->interface->dev, "%s\n", __func__);
928 mutex_lock(&ced->io_mutex);
929 ced_flush_out_buff(ced);
930 ced_flush_in_buff(ced);
931 mutex_unlock(&ced->io_mutex);
932 return U14ERR_NOERROR;
933 }
934
935 /****************************************************************************
936 ** ced_state_of_1401
937 **
938 ** Puts the current state of the 1401 in the Irp return buffer.
939 *****************************************************************************/
940 int ced_state_of_1401(struct ced_data *ced)
941 {
942 int iReturn;
943 mutex_lock(&ced->io_mutex);
944
945 ced_quick_check(ced, false, false); /* get state up to date, no reset */
946 iReturn = ced->sCurrentState;
947
948 mutex_unlock(&ced->io_mutex);
949 dev_dbg(&ced->interface->dev, "%s: %d\n", __func__, iReturn);
950
951 return iReturn;
952 }
953
954 /****************************************************************************
955 ** ced_start_self_test
956 **
957 ** Initiates a self-test cycle. The assumption is that we have no interrupts
958 ** active, so we should make sure that this is the case.
959 *****************************************************************************/
960 int ced_start_self_test(struct ced_data *ced)
961 {
962 int nGot;
963 mutex_lock(&ced->io_mutex);
964 dev_dbg(&ced->interface->dev, "%s\n", __func__);
965
966 ced_draw_down(ced); /* wait for, then kill outstanding Urbs */
967 ced_flush_in_buff(ced); /* Clear out input buffer & pipe */
968 ced_flush_out_buff(ced); /* Clear output buffer & pipe */
969 /* so things stay tidy */
970 /* ced_read_write_cancel(pDeviceObject); */
971 ced->dwDMAFlag = MODE_CHAR; /* Clear DMA mode flags here */
972
973 nGot = usb_control_msg(ced->udev, usb_rcvctrlpipe(ced->udev, 0),
974 DB_SELFTEST, (H_TO_D | VENDOR | DEVREQ),
975 0, 0, NULL, 0, HZ); /* allow 1 second timeout */
976 ced->ulSelfTestTime = jiffies + HZ * 30; /* 30 seconds into the future */
977
978 mutex_unlock(&ced->io_mutex);
979 if (nGot < 0)
980 dev_err(&ced->interface->dev, "%s: err=%d\n", __func__, nGot);
981 return nGot < 0 ? U14ERR_FAIL : U14ERR_NOERROR;
982 }
983
984 /****************************************************************************
985 ** ced_check_self_test
986 **
987 ** Check progress of a self-test cycle
988 ****************************************************************************/
989 int ced_check_self_test(struct ced_data *ced, TGET_SELFTEST __user *pGST)
990 {
991 unsigned int state, error;
992 int iReturn;
993 TGET_SELFTEST gst; /* local work space */
994 memset(&gst, 0, sizeof(gst)); /* clear out the space (sets code 0) */
995
996 mutex_lock(&ced->io_mutex);
997
998 dev_dbg(&ced->interface->dev, "%s\n", __func__);
999 iReturn = ced_get_state(ced, &state, &error);
1000 if (iReturn == U14ERR_NOERROR) /* Only accept zero if it happens twice */
1001 iReturn = ced_get_state(ced, &state, &error);
1002
1003 if (iReturn != U14ERR_NOERROR) { /* Self-test can cause comms errors */
1004 /* so we assume still testing */
1005 dev_err(&ced->interface->dev,
1006 "%s: ced_get_state=%d, assuming still testing\n",
1007 __func__, iReturn);
1008 state = 0x80; /* Force still-testing, no error */
1009 error = 0;
1010 iReturn = U14ERR_NOERROR;
1011 }
1012
1013 if ((state == -1) && (error == -1)) { /* If ced_get_state had problems */
1014 dev_err(&ced->interface->dev,
1015 "%s: ced_get_state failed, assuming still testing\n",
1016 __func__);
1017 state = 0x80; /* Force still-testing, no error */
1018 error = 0;
1019 }
1020
1021 if ((state & 0xFF) == 0x80) { /* If we are still in self-test */
1022 if (state & 0x00FF0000) { /* Have we got an error? */
1023 gst.code = (state & 0x00FF0000) >> 16; /* read the error code */
1024 gst.x = error & 0x0000FFFF; /* Error data X */
1025 gst.y = (error & 0xFFFF0000) >> 16; /* and data Y */
1026 dev_dbg(&ced->interface->dev,
1027 "Self-test error code %d\n", gst.code);
1028 } else { /* No error, check for timeout */
1029 unsigned long ulNow = jiffies; /* get current time */
1030 if (time_after(ulNow, ced->ulSelfTestTime)) {
1031 gst.code = -2; /* Flag the timeout */
1032 dev_dbg(&ced->interface->dev,
1033 "Self-test timed-out\n");
1034 } else
1035 dev_dbg(&ced->interface->dev,
1036 "Self-test on-going\n");
1037 }
1038 } else {
1039 gst.code = -1; /* Flag the test is done */
1040 dev_dbg(&ced->interface->dev, "Self-test done\n");
1041 }
1042
1043 if (gst.code < 0) { /* If we have a problem or finished */
1044 /* If using the 2890 we should reset properly */
1045 if ((ced->nPipes == 4) && (ced->s1401Type <= TYPEPOWER))
1046 ced_is_1401(ced); /* Get 1401 reset and OK */
1047 else
1048 ced_quick_check(ced, true, true); /* Otherwise check without reset unless problems */
1049 }
1050 mutex_unlock(&ced->io_mutex);
1051
1052 if (copy_to_user(pGST, &gst, sizeof(gst)))
1053 return -EFAULT;
1054
1055 return iReturn;
1056 }
1057
1058 /****************************************************************************
1059 ** ced_type_of_1401
1060 **
1061 ** Returns code for standard, plus, micro1401, power1401 or none
1062 ****************************************************************************/
1063 int ced_type_of_1401(struct ced_data *ced)
1064 {
1065 int iReturn = TYPEUNKNOWN;
1066 mutex_lock(&ced->io_mutex);
1067 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1068
1069 switch (ced->s1401Type) {
1070 case TYPE1401:
1071 iReturn = U14ERR_STD;
1072 break; /* Handle these types directly */
1073 case TYPEPLUS:
1074 iReturn = U14ERR_PLUS;
1075 break;
1076 case TYPEU1401:
1077 iReturn = U14ERR_U1401;
1078 break;
1079 default:
1080 if ((ced->s1401Type >= TYPEPOWER) && (ced->s1401Type <= 25))
1081 iReturn = ced->s1401Type + 4; /* We can calculate types */
1082 else /* for up-coming 1401 designs */
1083 iReturn = TYPEUNKNOWN; /* Don't know or not there */
1084 }
1085 dev_dbg(&ced->interface->dev, "%s %d\n", __func__, iReturn);
1086 mutex_unlock(&ced->io_mutex);
1087
1088 return iReturn;
1089 }
1090
1091 /****************************************************************************
1092 ** ced_transfer_flags
1093 **
1094 ** Returns flags on block transfer abilities
1095 ****************************************************************************/
1096 int ced_transfer_flags(struct ced_data *ced)
1097 {
1098 int iReturn = U14TF_MULTIA | U14TF_DIAG | /* we always have multiple DMA area */
1099 U14TF_NOTIFY | U14TF_CIRCTH; /* diagnostics, notify and circular */
1100 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1101 mutex_lock(&ced->io_mutex);
1102 if (ced->bIsUSB2) /* Set flag for USB2 if appropriate */
1103 iReturn |= U14TF_USB2;
1104 mutex_unlock(&ced->io_mutex);
1105
1106 return iReturn;
1107 }
1108
1109 /***************************************************************************
1110 ** ced_dbg_cmd
1111 ** Issues a debug\diagnostic command to the 1401 along with a 32-bit datum
1112 ** This is a utility command used for dbg operations.
1113 */
1114 static int ced_dbg_cmd(struct ced_data *ced, unsigned char cmd,
1115 unsigned int data)
1116 {
1117 int iReturn;
1118 dev_dbg(&ced->interface->dev, "%s: entry\n", __func__);
1119 iReturn = usb_control_msg(ced->udev, usb_sndctrlpipe(ced->udev, 0), cmd,
1120 (H_TO_D | VENDOR | DEVREQ),
1121 (unsigned short)data,
1122 (unsigned short)(data >> 16), NULL, 0, HZ);
1123 /* allow 1 second timeout */
1124 if (iReturn < 0)
1125 dev_err(&ced->interface->dev, "%s: fail code=%d\n",
1126 __func__, iReturn);
1127
1128 return iReturn;
1129 }
1130
1131 /****************************************************************************
1132 ** ced_dbg_peek
1133 **
1134 ** Execute the diagnostic peek operation. Uses address, width and repeats.
1135 ****************************************************************************/
1136 int ced_dbg_peek(struct ced_data *ced, TDBGBLOCK __user *pDB)
1137 {
1138 int iReturn;
1139 TDBGBLOCK db;
1140
1141 if (copy_from_user(&db, pDB, sizeof(db)))
1142 return -EFAULT;
1143
1144 mutex_lock(&ced->io_mutex);
1145 dev_dbg(&ced->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
1146
1147 iReturn = ced_dbg_cmd(ced, DB_SETADD, db.iAddr);
1148 if (iReturn == U14ERR_NOERROR)
1149 iReturn = ced_dbg_cmd(ced, DB_WIDTH, db.iWidth);
1150 if (iReturn == U14ERR_NOERROR)
1151 iReturn = ced_dbg_cmd(ced, DB_REPEATS, db.iRepeats);
1152 if (iReturn == U14ERR_NOERROR)
1153 iReturn = ced_dbg_cmd(ced, DB_PEEK, 0);
1154 mutex_unlock(&ced->io_mutex);
1155
1156 return iReturn;
1157 }
1158
1159 /****************************************************************************
1160 ** ced_dbg_poke
1161 **
1162 ** Execute the diagnostic poke operation. Parameters are in the CSBLOCK struct
1163 ** in order address, size, repeats and value to poke.
1164 ****************************************************************************/
1165 int ced_dbg_poke(struct ced_data *ced, TDBGBLOCK __user *pDB)
1166 {
1167 int iReturn;
1168 TDBGBLOCK db;
1169
1170 if (copy_from_user(&db, pDB, sizeof(db)))
1171 return -EFAULT;
1172
1173 mutex_lock(&ced->io_mutex);
1174 dev_dbg(&ced->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
1175
1176 iReturn = ced_dbg_cmd(ced, DB_SETADD, db.iAddr);
1177 if (iReturn == U14ERR_NOERROR)
1178 iReturn = ced_dbg_cmd(ced, DB_WIDTH, db.iWidth);
1179 if (iReturn == U14ERR_NOERROR)
1180 iReturn = ced_dbg_cmd(ced, DB_REPEATS, db.iRepeats);
1181 if (iReturn == U14ERR_NOERROR)
1182 iReturn = ced_dbg_cmd(ced, DB_POKE, db.iData);
1183 mutex_unlock(&ced->io_mutex);
1184
1185 return iReturn;
1186 }
1187
1188 /****************************************************************************
1189 ** ced_dbg_ramp_data
1190 **
1191 ** Execute the diagnostic ramp data operation. Parameters are in the CSBLOCK struct
1192 ** in order address, default, enable mask, size and repeats.
1193 ****************************************************************************/
1194 int ced_dbg_ramp_data(struct ced_data *ced, TDBGBLOCK __user *pDB)
1195 {
1196 int iReturn;
1197 TDBGBLOCK db;
1198
1199 if (copy_from_user(&db, pDB, sizeof(db)))
1200 return -EFAULT;
1201
1202 mutex_lock(&ced->io_mutex);
1203 dev_dbg(&ced->interface->dev, "%s: @ %08x\n", __func__, db.iAddr);
1204
1205 iReturn = ced_dbg_cmd(ced, DB_SETADD, db.iAddr);
1206 if (iReturn == U14ERR_NOERROR)
1207 iReturn = ced_dbg_cmd(ced, DB_SETDEF, db.iDefault);
1208 if (iReturn == U14ERR_NOERROR)
1209 iReturn = ced_dbg_cmd(ced, DB_SETMASK, db.iMask);
1210 if (iReturn == U14ERR_NOERROR)
1211 iReturn = ced_dbg_cmd(ced, DB_WIDTH, db.iWidth);
1212 if (iReturn == U14ERR_NOERROR)
1213 iReturn = ced_dbg_cmd(ced, DB_REPEATS, db.iRepeats);
1214 if (iReturn == U14ERR_NOERROR)
1215 iReturn = ced_dbg_cmd(ced, DB_RAMPD, 0);
1216 mutex_unlock(&ced->io_mutex);
1217
1218 return iReturn;
1219 }
1220
1221 /****************************************************************************
1222 ** ced_dbg_ramp_addr
1223 **
1224 ** Execute the diagnostic ramp address operation
1225 ****************************************************************************/
1226 int ced_dbg_ramp_addr(struct ced_data *ced, TDBGBLOCK __user *pDB)
1227 {
1228 int iReturn;
1229 TDBGBLOCK db;
1230
1231 if (copy_from_user(&db, pDB, sizeof(db)))
1232 return -EFAULT;
1233
1234 mutex_lock(&ced->io_mutex);
1235 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1236
1237 iReturn = ced_dbg_cmd(ced, DB_SETDEF, db.iDefault);
1238 if (iReturn == U14ERR_NOERROR)
1239 iReturn = ced_dbg_cmd(ced, DB_SETMASK, db.iMask);
1240 if (iReturn == U14ERR_NOERROR)
1241 iReturn = ced_dbg_cmd(ced, DB_WIDTH, db.iWidth);
1242 if (iReturn == U14ERR_NOERROR)
1243 iReturn = ced_dbg_cmd(ced, DB_REPEATS, db.iRepeats);
1244 if (iReturn == U14ERR_NOERROR)
1245 iReturn = ced_dbg_cmd(ced, DB_RAMPA, 0);
1246 mutex_unlock(&ced->io_mutex);
1247
1248 return iReturn;
1249 }
1250
1251 /****************************************************************************
1252 ** ced_dbg_get_data
1253 **
1254 ** Retrieve the data resulting from the last debug Peek operation
1255 ****************************************************************************/
1256 int ced_dbg_get_data(struct ced_data *ced, TDBGBLOCK __user *pDB)
1257 {
1258 int iReturn;
1259 TDBGBLOCK db;
1260 memset(&db, 0, sizeof(db)); /* fill returned block with 0s */
1261
1262 mutex_lock(&ced->io_mutex);
1263 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1264
1265 /* Read back the last peeked value from the 1401. */
1266 iReturn = usb_control_msg(ced->udev, usb_rcvctrlpipe(ced->udev, 0),
1267 DB_DATA, (D_TO_H | VENDOR | DEVREQ), 0, 0,
1268 &db.iData, sizeof(db.iData), HZ);
1269 if (iReturn == sizeof(db.iData)) {
1270 if (copy_to_user(pDB, &db, sizeof(db)))
1271 iReturn = -EFAULT;
1272 else
1273 iReturn = U14ERR_NOERROR;
1274 } else
1275 dev_err(&ced->interface->dev, "%s: failed, code %d\n",
1276 __func__, iReturn);
1277
1278 mutex_unlock(&ced->io_mutex);
1279
1280 return iReturn;
1281 }
1282
1283 /****************************************************************************
1284 ** ced_dbg_stop_loop
1285 **
1286 ** Stop any never-ending debug loop, we just call ced_get_state for USB
1287 **
1288 ****************************************************************************/
1289 int ced_dbg_stop_loop(struct ced_data *ced)
1290 {
1291 int iReturn;
1292 unsigned int uState, uErr;
1293
1294 mutex_lock(&ced->io_mutex);
1295 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1296 iReturn = ced_get_state(ced, &uState, &uErr);
1297 mutex_unlock(&ced->io_mutex);
1298
1299 return iReturn;
1300 }
1301
1302 /****************************************************************************
1303 ** ced_set_circular
1304 **
1305 ** Sets up a transfer area record for circular transfers. If the area is
1306 ** already set, we attempt to unset it. Unsetting will fail if the area is
1307 ** booked and a transfer to that area is in progress. Otherwise, we will
1308 ** release the area and re-assign it.
1309 ****************************************************************************/
1310 int ced_set_circular(struct ced_data *ced, struct transfer_area_desc __user *pTD)
1311 {
1312 int iReturn;
1313 bool bToHost;
1314 struct transfer_area_desc td;
1315
1316 if (copy_from_user(&td, pTD, sizeof(td)))
1317 return -EFAULT;
1318
1319 mutex_lock(&ced->io_mutex);
1320 dev_dbg(&ced->interface->dev, "%s: area:%d, size:%08x\n",
1321 __func__, td.wAreaNum, td.dwLength);
1322 bToHost = td.eSize != 0; /* this is used as the tohost flag */
1323
1324 /* The strange cast is done so that we don't get warnings in 32-bit linux about the size of the */
1325 /* pointer. The pointer is always passed as a 64-bit object so that we don't have problems using */
1326 /* a 32-bit program on a 64-bit system. unsigned long is 64-bits on a 64-bit system. */
1327 iReturn =
1328 ced_set_area(ced, td.wAreaNum,
1329 (char __user *)((unsigned long)td.lpvBuff), td.dwLength,
1330 true, bToHost);
1331 mutex_unlock(&ced->io_mutex);
1332 return iReturn;
1333 }
1334
1335 /****************************************************************************
1336 ** ced_get_circ_block
1337 **
1338 ** Return the next available block of circularly-transferred data.
1339 ****************************************************************************/
1340 int ced_get_circ_block(struct ced_data *ced, TCIRCBLOCK __user *pCB)
1341 {
1342 int iReturn = U14ERR_NOERROR;
1343 unsigned int nArea;
1344 TCIRCBLOCK cb;
1345
1346 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1347
1348 if (copy_from_user(&cb, pCB, sizeof(cb)))
1349 return -EFAULT;
1350
1351 mutex_lock(&ced->io_mutex);
1352
1353 nArea = cb.nArea; /* Retrieve parameters first */
1354 cb.dwOffset = 0; /* set default result (nothing) */
1355 cb.dwSize = 0;
1356
1357 if (nArea < MAX_TRANSAREAS) { /* The area number must be OK */
1358 /* Pointer to relevant info */
1359 struct transarea *pArea = &ced->rTransDef[nArea];
1360 spin_lock_irq(&ced->stagedLock); /* Lock others out */
1361
1362 if ((pArea->used) && (pArea->circular) && /* Must be circular area */
1363 (pArea->circ_to_host)) { /* For now at least must be to host */
1364 if (pArea->blocks[0].size > 0) { /* Got anything? */
1365 cb.dwOffset = pArea->blocks[0].offset;
1366 cb.dwSize = pArea->blocks[0].size;
1367 dev_dbg(&ced->interface->dev,
1368 "%s: return block 0: %d bytes at %d\n",
1369 __func__, cb.dwSize, cb.dwOffset);
1370 }
1371 } else
1372 iReturn = U14ERR_NOTSET;
1373
1374 spin_unlock_irq(&ced->stagedLock);
1375 } else
1376 iReturn = U14ERR_BADAREA;
1377
1378 if (copy_to_user(pCB, &cb, sizeof(cb)))
1379 iReturn = -EFAULT;
1380
1381 mutex_unlock(&ced->io_mutex);
1382 return iReturn;
1383 }
1384
1385 /****************************************************************************
1386 ** ced_free_circ_block
1387 **
1388 ** Frees a block of circularly-transferred data and returns the next one.
1389 ****************************************************************************/
1390 int ced_free_circ_block(struct ced_data *ced, TCIRCBLOCK __user *pCB)
1391 {
1392 int iReturn = U14ERR_NOERROR;
1393 unsigned int nArea, uStart, uSize;
1394 TCIRCBLOCK cb;
1395
1396 dev_dbg(&ced->interface->dev, "%s\n", __func__);
1397
1398 if (copy_from_user(&cb, pCB, sizeof(cb)))
1399 return -EFAULT;
1400
1401 mutex_lock(&ced->io_mutex);
1402
1403 nArea = cb.nArea; /* Retrieve parameters first */
1404 uStart = cb.dwOffset;
1405 uSize = cb.dwSize;
1406 cb.dwOffset = 0; /* then set default result (nothing) */
1407 cb.dwSize = 0;
1408
1409 if (nArea < MAX_TRANSAREAS) { /* The area number must be OK */
1410 /* Pointer to relevant info */
1411 struct transarea *pArea = &ced->rTransDef[nArea];
1412 spin_lock_irq(&ced->stagedLock); /* Lock others out */
1413
1414 if ((pArea->used) && (pArea->circular) && /* Must be circular area */
1415 (pArea->circ_to_host)) { /* For now at least must be to host */
1416 bool bWaiting = false;
1417
1418 if ((pArea->blocks[0].size >= uSize) && /* Got anything? */
1419 (pArea->blocks[0].offset == uStart)) { /* Must be legal data */
1420 pArea->blocks[0].size -= uSize;
1421 pArea->blocks[0].offset += uSize;
1422 if (pArea->blocks[0].size == 0) { /* Have we emptied this block? */
1423 if (pArea->blocks[1].size) { /* Is there a second block? */
1424 pArea->blocks[0] = pArea->blocks[1]; /* Copy down block 2 data */
1425 pArea->blocks[1].size = 0; /* and mark the second block as unused */
1426 pArea->blocks[1].offset = 0;
1427 } else
1428 pArea->blocks[0].offset = 0;
1429 }
1430
1431 dev_dbg(&ced->interface->dev,
1432 "%s: free %d bytes at %d, return %d bytes at %d, wait=%d\n",
1433 __func__, uSize, uStart,
1434 pArea->blocks[0].size,
1435 pArea->blocks[0].offset,
1436 ced->bXFerWaiting);
1437
1438 /* Return the next available block of memory as well */
1439 if (pArea->blocks[0].size > 0) { /* Got anything? */
1440 cb.dwOffset =
1441 pArea->blocks[0].offset;
1442 cb.dwSize = pArea->blocks[0].size;
1443 }
1444
1445 bWaiting = ced->bXFerWaiting;
1446 if (bWaiting && ced->bStagedUrbPending) {
1447 dev_err(&ced->interface->dev,
1448 "%s: ERROR: waiting xfer and staged Urb pending!\n",
1449 __func__);
1450 bWaiting = false;
1451 }
1452 } else {
1453 dev_err(&ced->interface->dev,
1454 "%s: ERROR: freeing %d bytes at %d, block 0 is %d bytes at %d\n",
1455 __func__, uSize, uStart,
1456 pArea->blocks[0].size,
1457 pArea->blocks[0].offset);
1458 iReturn = U14ERR_NOMEMORY;
1459 }
1460
1461 /* If we have one, kick off pending transfer */
1462 if (bWaiting) { /* Got a block xfer waiting? */
1463 int RWMStat =
1464 ced_read_write_mem(ced,
1465 !ced->rDMAInfo.outward,
1466 ced->rDMAInfo.ident,
1467 ced->rDMAInfo.offset,
1468 ced->rDMAInfo.size);
1469 if (RWMStat != U14ERR_NOERROR)
1470 dev_err(&ced->interface->dev,
1471 "%s: rw setup failed %d\n",
1472 __func__, RWMStat);
1473 }
1474 } else
1475 iReturn = U14ERR_NOTSET;
1476
1477 spin_unlock_irq(&ced->stagedLock);
1478 } else
1479 iReturn = U14ERR_BADAREA;
1480
1481 if (copy_to_user(pCB, &cb, sizeof(cb)))
1482 iReturn = -EFAULT;
1483
1484 mutex_unlock(&ced->io_mutex);
1485 return iReturn;
1486 }