Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / ced1401 / usb1401.c
1 /***********************************************************************************
2 CED1401 usb driver. This basic loading is based on the usb-skeleton.c code that is:
3 Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
4 Copyright (C) 2012 Alois Schloegl <alois.schloegl@ist.ac.at>
5 There is not a great deal of the skeleton left.
6
7 All the remainder dealing specifically with the CED1401 is based on drivers written
8 by CED for other systems (mainly Windows) and is:
9 Copyright (C) 2010 Cambridge Electronic Design Ltd
10 Author Greg P Smith (greg@ced.co.uk)
11
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License
14 as published by the Free Software Foundation; either version 2
15 of the License, or (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25
26 Endpoints
27 *********
28 There are 4 endpoints plus the control endpoint in the standard interface
29 provided by most 1401s. The control endpoint is used for standard USB requests,
30 plus various CED-specific transactions such as start self test, debug and get
31 the 1401 status. The other endpoints are:
32
33 1 Characters to the 1401
34 2 Characters from the 1401
35 3 Block data to the 1401
36 4 Block data to the host.
37
38 inside the driver these are indexed as an array from 0 to 3, transactions
39 over the control endpoint are carried out using a separate mechanism. The
40 use of the endpoints is mostly straightforward, with the driver issuing
41 IO request packets (IRPs) as required to transfer data to and from the 1401.
42 The handling of endpoint 2 is different because it is used for characters
43 from the 1401, which can appear spontaneously and without any other driver
44 activity - for example to repeatedly request DMA transfers in Spike2. The
45 desired effect is achieved by using an interrupt endpoint which can be
46 polled to see if it has data available, and writing the driver so that it
47 always maintains a pending read IRP from that endpoint which will read the
48 character data and terminate as soon as the 1401 makes data available. This
49 works very well, some care is taken with when you kick off this character
50 read IRP to avoid it being active when it is not wanted but generally it
51 is running all the time.
52
53 In the 2270, there are only three endpoints plus the control endpoint. In
54 addition to the transactions mentioned above, the control endpoint is used
55 to transfer character data to the 1401. The other endpoints are used as:
56
57 1 Characters from the 1401
58 2 Block data to the 1401
59 3 Block data to the host.
60
61 The type of interface available is specified by the interface subclass field
62 in the interface descriptor provided by the 1401. See the USB_INT_ constants
63 for the values that this field can hold.
64
65 ****************************************************************************
66 Linux implementation
67
68 Although Linux Device Drivers (3rd Edition) was a major source of information,
69 it is very out of date. A lot of information was gleaned from the latest
70 usb_skeleton.c code (you need to download the kernel sources to get this).
71
72 To match the Windows version, everything is done using ioctl calls. All the
73 device state is held in the DEVICE_EXTENSION (named to match Windows use).
74 Block transfers are done by using get_user_pages() to pin down a list of
75 pages that we hold a pointer to in the device driver. We also allocate a
76 coherent transfer buffer of size STAGED_SZ (this must be a multiple of the
77 bulk endpoint size so that the 1401 does not realise that we break large
78 transfers down into smaller pieces). We use kmap_atomic() to get a kernel
79 va for each page, as it is required, for copying; see CopyUserSpace().
80
81 All character and data transfers are done using asynchronous IO. All Urbs are
82 tracked by anchoring them. Status and debug ioctls are implemented with the
83 synchronous non-Urb based transfers.
84 */
85
86 #include <linux/kernel.h>
87 #include <linux/errno.h>
88 #include <linux/usb.h>
89 #include <linux/mutex.h>
90 #include <linux/mm.h>
91 #include <linux/highmem.h>
92 #include <linux/version.h>
93 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) )
94 #include <linux/init.h>
95 #include <linux/slab.h>
96 #include <linux/module.h>
97 #include <linux/kref.h>
98 #include <linux/uaccess.h>
99 #endif
100
101 #include "usb1401.h"
102
103 /* Define these values to match your devices */
104 #define USB_CED_VENDOR_ID 0x0525
105 #define USB_CED_PRODUCT_ID 0xa0f0
106
107 /* table of devices that work with this driver */
108 static const struct usb_device_id ced_table[] = {
109 {USB_DEVICE(USB_CED_VENDOR_ID, USB_CED_PRODUCT_ID)},
110 {} /* Terminating entry */
111 };
112
113 MODULE_DEVICE_TABLE(usb, ced_table);
114
115 /* Get a minor range for your devices from the usb maintainer */
116 #define USB_CED_MINOR_BASE 192
117
118 /* our private defines. if this grows any larger, use your own .h file */
119 #define MAX_TRANSFER (PAGE_SIZE - 512)
120 /* MAX_TRANSFER is chosen so that the VM is not stressed by
121 allocations > PAGE_SIZE and the number of packets in a page
122 is an integer 512 is the largest possible packet on EHCI */
123 #define WRITES_IN_FLIGHT 8
124 /* arbitrarily chosen */
125
126 /*
127 The cause for these errors is that the driver makes use of the functions usb_buffer_alloc() and usb_buffer_free() which got renamed in kernel 2.6.35. This is stated in the Changelog: USB: rename usb_buffer_alloc() and usb_buffer_free() users
128 For more clearance what the functions actually do,
129 usb_buffer_alloc() is renamed to usb_alloc_coherent()
130 usb_buffer_free() is renamed to usb_free_coherent()
131 This is needed on Debian 2.6.32-5-amd64
132 */
133 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
134 #define usb_alloc_coherent usb_buffer_alloc
135 #define usb_free_coherent usb_buffer_free
136 #define noop_llseek NULL
137 #endif
138
139 static struct usb_driver ced_driver;
140
141 static void ced_delete(struct kref *kref)
142 {
143 DEVICE_EXTENSION *pdx = to_DEVICE_EXTENSION(kref);
144
145 // Free up the output buffer, then free the output urb. Note that the interface member
146 // of pdx will probably be NULL, so cannot be used to get to dev.
147 usb_free_coherent(pdx->udev, OUTBUF_SZ, pdx->pCoherCharOut,
148 pdx->pUrbCharOut->transfer_dma);
149 usb_free_urb(pdx->pUrbCharOut);
150
151 // Do the same for chan input
152 usb_free_coherent(pdx->udev, INBUF_SZ, pdx->pCoherCharIn,
153 pdx->pUrbCharIn->transfer_dma);
154 usb_free_urb(pdx->pUrbCharIn);
155
156 // Do the same for the block transfers
157 usb_free_coherent(pdx->udev, STAGED_SZ, pdx->pCoherStagedIO,
158 pdx->pStagedUrb->transfer_dma);
159 usb_free_urb(pdx->pStagedUrb);
160
161 usb_put_dev(pdx->udev);
162 kfree(pdx);
163 }
164
165 // This is the driver end of the open() call from user space.
166 static int ced_open(struct inode *inode, struct file *file)
167 {
168 DEVICE_EXTENSION *pdx;
169 int retval = 0;
170 int subminor = iminor(inode);
171 struct usb_interface *interface =
172 usb_find_interface(&ced_driver, subminor);
173 if (!interface) {
174 pr_err("%s - error, can't find device for minor %d", __func__,
175 subminor);
176 retval = -ENODEV;
177 goto exit;
178 }
179
180 pdx = usb_get_intfdata(interface);
181 if (!pdx) {
182 retval = -ENODEV;
183 goto exit;
184 }
185
186 dev_dbg(&interface->dev, "%s got pdx", __func__);
187
188 /* increment our usage count for the device */
189 kref_get(&pdx->kref);
190
191 /* lock the device to allow correctly handling errors
192 * in resumption */
193 mutex_lock(&pdx->io_mutex);
194
195 if (!pdx->open_count++) {
196 retval = usb_autopm_get_interface(interface);
197 if (retval) {
198 pdx->open_count--;
199 mutex_unlock(&pdx->io_mutex);
200 kref_put(&pdx->kref, ced_delete);
201 goto exit;
202 }
203 } else { //uncomment this block if you want exclusive open
204 dev_err(&interface->dev, "%s fail: already open", __func__);
205 retval = -EBUSY;
206 pdx->open_count--;
207 mutex_unlock(&pdx->io_mutex);
208 kref_put(&pdx->kref, ced_delete);
209 goto exit;
210 }
211 /* prevent the device from being autosuspended */
212
213 /* save our object in the file's private structure */
214 file->private_data = pdx;
215 mutex_unlock(&pdx->io_mutex);
216
217 exit:
218 return retval;
219 }
220
221 static int ced_release(struct inode *inode, struct file *file)
222 {
223 DEVICE_EXTENSION *pdx = file->private_data;
224 if (pdx == NULL)
225 return -ENODEV;
226
227 dev_dbg(&pdx->interface->dev, "%s called", __func__);
228 mutex_lock(&pdx->io_mutex);
229 if (!--pdx->open_count && pdx->interface) // Allow autosuspend
230 usb_autopm_put_interface(pdx->interface);
231 mutex_unlock(&pdx->io_mutex);
232
233 kref_put(&pdx->kref, ced_delete); // decrement the count on our device
234 return 0;
235 }
236
237 static int ced_flush(struct file *file, fl_owner_t id)
238 {
239 int res;
240 DEVICE_EXTENSION *pdx = file->private_data;
241 if (pdx == NULL)
242 return -ENODEV;
243
244 dev_dbg(&pdx->interface->dev, "%s char in pend=%d", __func__,
245 pdx->bReadCharsPending);
246
247 /* wait for io to stop */
248 mutex_lock(&pdx->io_mutex);
249 dev_dbg(&pdx->interface->dev, "%s got io_mutex", __func__);
250 ced_draw_down(pdx);
251
252 /* read out errors, leave subsequent opens a clean slate */
253 spin_lock_irq(&pdx->err_lock);
254 res = pdx->errors ? (pdx->errors == -EPIPE ? -EPIPE : -EIO) : 0;
255 pdx->errors = 0;
256 spin_unlock_irq(&pdx->err_lock);
257
258 mutex_unlock(&pdx->io_mutex);
259 dev_dbg(&pdx->interface->dev, "%s exit reached", __func__);
260
261 return res;
262 }
263
264 /***************************************************************************
265 ** CanAcceptIoRequests
266 ** If the device is removed, interface is set NULL. We also clear our pointer
267 ** from the interface, so we should make sure that pdx is not NULL. This will
268 ** not help with a device extension held by a file.
269 ** return true if can accept new io requests, else false
270 */
271 static bool CanAcceptIoRequests(DEVICE_EXTENSION * pdx)
272 {
273 return pdx && pdx->interface; // Can we accept IO requests
274 }
275
276 /****************************************************************************
277 ** Callback routine to complete writes. This may need to fire off another
278 ** urb to complete the transfer.
279 ****************************************************************************/
280 static void ced_writechar_callback(struct urb *pUrb)
281 {
282 DEVICE_EXTENSION *pdx = pUrb->context;
283 int nGot = pUrb->actual_length; // what we transferred
284
285 if (pUrb->status) { // sync/async unlink faults aren't errors
286 if (!
287 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
288 || pUrb->status == -ESHUTDOWN)) {
289 dev_err(&pdx->interface->dev,
290 "%s - nonzero write bulk status received: %d",
291 __func__, pUrb->status);
292 }
293
294 spin_lock(&pdx->err_lock);
295 pdx->errors = pUrb->status;
296 spin_unlock(&pdx->err_lock);
297 nGot = 0; // and tidy up again if so
298
299 spin_lock(&pdx->charOutLock); // already at irq level
300 pdx->dwOutBuffGet = 0; // Reset the output buffer
301 pdx->dwOutBuffPut = 0;
302 pdx->dwNumOutput = 0; // Clear the char count
303 pdx->bPipeError[0] = 1; // Flag an error for later
304 pdx->bSendCharsPending = false; // Allow other threads again
305 spin_unlock(&pdx->charOutLock); // already at irq level
306 dev_dbg(&pdx->interface->dev,
307 "%s - char out done, 0 chars sent", __func__);
308 } else {
309 dev_dbg(&pdx->interface->dev,
310 "%s - char out done, %d chars sent", __func__, nGot);
311 spin_lock(&pdx->charOutLock); // already at irq level
312 pdx->dwNumOutput -= nGot; // Now adjust the char send buffer
313 pdx->dwOutBuffGet += nGot; // to match what we did
314 if (pdx->dwOutBuffGet >= OUTBUF_SZ) // Can't do this any earlier as data could be overwritten
315 pdx->dwOutBuffGet = 0;
316
317 if (pdx->dwNumOutput > 0) // if more to be done...
318 {
319 int nPipe = 0; // The pipe number to use
320 int iReturn;
321 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
322 unsigned int dwCount = pdx->dwNumOutput; // maximum to send
323 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
324 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
325 spin_unlock(&pdx->charOutLock); // we are done with stuff that changes
326 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
327 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
328 usb_sndbulkpipe(pdx->udev,
329 pdx->epAddr[0]),
330 pdx->pCoherCharOut, dwCount,
331 ced_writechar_callback, pdx);
332 pdx->pUrbCharOut->transfer_flags |=
333 URB_NO_TRANSFER_DMA_MAP;
334 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted); // in case we need to kill it
335 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_ATOMIC);
336 dev_dbg(&pdx->interface->dev, "%s n=%d>%s<", __func__,
337 dwCount, pDat);
338 spin_lock(&pdx->charOutLock); // grab lock for errors
339 if (iReturn) {
340 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
341 pdx->bSendCharsPending = false; // Allow other threads again
342 usb_unanchor_urb(pdx->pUrbCharOut);
343 dev_err(&pdx->interface->dev,
344 "%s usb_submit_urb() returned %d",
345 __func__, iReturn);
346 }
347 } else
348 pdx->bSendCharsPending = false; // Allow other threads again
349 spin_unlock(&pdx->charOutLock); // already at irq level
350 }
351 }
352
353 /****************************************************************************
354 ** SendChars
355 ** Transmit the characters in the output buffer to the 1401. This may need
356 ** breaking down into multiple transfers.
357 ****************************************************************************/
358 int SendChars(DEVICE_EXTENSION * pdx)
359 {
360 int iReturn = U14ERR_NOERROR;
361
362 spin_lock_irq(&pdx->charOutLock); // Protect ourselves
363
364 if ((!pdx->bSendCharsPending) && // Not currently sending
365 (pdx->dwNumOutput > 0) && // has characters to output
366 (CanAcceptIoRequests(pdx))) // and current activity is OK
367 {
368 unsigned int dwCount = pdx->dwNumOutput; // Get a copy of the character count
369 pdx->bSendCharsPending = true; // Set flag to lock out other threads
370
371 dev_dbg(&pdx->interface->dev,
372 "Send %d chars to 1401, EP0 flag %d\n", dwCount,
373 pdx->nPipes == 3);
374 // If we have only 3 end points we must send the characters to the 1401 using EP0.
375 if (pdx->nPipes == 3) {
376 // For EP0 character transmissions to the 1401, we have to hang about until they
377 // are gone, as otherwise without more character IO activity they will never go.
378 unsigned int count = dwCount; // Local char counter
379 unsigned int index = 0; // The index into the char buffer
380
381 spin_unlock_irq(&pdx->charOutLock); // Free spinlock as we call USBD
382
383 while ((count > 0) && (iReturn == U14ERR_NOERROR)) {
384 // We have to break the transfer up into 64-byte chunks because of a 2270 problem
385 int n = count > 64 ? 64 : count; // Chars for this xfer, max of 64
386 int nSent = usb_control_msg(pdx->udev,
387 usb_sndctrlpipe(pdx->udev, 0), // use end point 0
388 DB_CHARS, // bRequest
389 (H_TO_D | VENDOR | DEVREQ), // to the device, vendor request to the device
390 0, 0, // value and index are both 0
391 &pdx->outputBuffer[index], // where to send from
392 n, // how much to send
393 1000); // timeout in jiffies
394 if (nSent <= 0) {
395 iReturn = nSent ? nSent : -ETIMEDOUT; // if 0 chars says we timed out
396 dev_err(&pdx->interface->dev,
397 "Send %d chars by EP0 failed: %d",
398 n, iReturn);
399 } else {
400 dev_dbg(&pdx->interface->dev,
401 "Sent %d chars by EP0", n);
402 count -= nSent;
403 index += nSent;
404 }
405 }
406
407 spin_lock_irq(&pdx->charOutLock); // Protect pdx changes, released by general code
408 pdx->dwOutBuffGet = 0; // so reset the output buffer
409 pdx->dwOutBuffPut = 0;
410 pdx->dwNumOutput = 0; // and clear the buffer count
411 pdx->bSendCharsPending = false; // Allow other threads again
412 } else { // Here for sending chars normally - we hold the spin lock
413 int nPipe = 0; // The pipe number to use
414 char *pDat = &pdx->outputBuffer[pdx->dwOutBuffGet];
415
416 if ((pdx->dwOutBuffGet + dwCount) > OUTBUF_SZ) // does it cross buffer end?
417 dwCount = OUTBUF_SZ - pdx->dwOutBuffGet;
418 spin_unlock_irq(&pdx->charOutLock); // we are done with stuff that changes
419 memcpy(pdx->pCoherCharOut, pDat, dwCount); // copy output data to the buffer
420 usb_fill_bulk_urb(pdx->pUrbCharOut, pdx->udev,
421 usb_sndbulkpipe(pdx->udev,
422 pdx->epAddr[0]),
423 pdx->pCoherCharOut, dwCount,
424 ced_writechar_callback, pdx);
425 pdx->pUrbCharOut->transfer_flags |=
426 URB_NO_TRANSFER_DMA_MAP;
427 usb_anchor_urb(pdx->pUrbCharOut, &pdx->submitted);
428 iReturn = usb_submit_urb(pdx->pUrbCharOut, GFP_KERNEL);
429 spin_lock_irq(&pdx->charOutLock); // grab lock for errors
430 if (iReturn) {
431 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
432 pdx->bSendCharsPending = false; // Allow other threads again
433 usb_unanchor_urb(pdx->pUrbCharOut); // remove from list of active urbs
434 }
435 }
436 } else if (pdx->bSendCharsPending && (pdx->dwNumOutput > 0))
437 dev_dbg(&pdx->interface->dev,
438 "SendChars bSendCharsPending:true");
439
440 dev_dbg(&pdx->interface->dev, "SendChars exit code: %d", iReturn);
441 spin_unlock_irq(&pdx->charOutLock); // Now let go of the spinlock
442 return iReturn;
443 }
444
445 /***************************************************************************
446 ** CopyUserSpace
447 ** This moves memory between pinned down user space and the pCoherStagedIO
448 ** memory buffer we use for transfers. Copy n bytes in the directions that
449 ** is defined by pdx->StagedRead. The user space is determined by the area
450 ** in pdx->StagedId and the offset in pdx->StagedDone. The user
451 ** area may well not start on a page boundary, so allow for that.
452 **
453 ** We have a table of physical pages that describe the area, so we can use
454 ** this to get a virtual address that the kernel can use.
455 **
456 ** pdx Is our device extension which holds all we know about the transfer.
457 ** n The number of bytes to move one way or the other.
458 ***************************************************************************/
459 static void CopyUserSpace(DEVICE_EXTENSION * pdx, int n)
460 {
461 unsigned int nArea = pdx->StagedId;
462 if (nArea < MAX_TRANSAREAS) {
463 TRANSAREA *pArea = &pdx->rTransDef[nArea]; // area to be used
464 unsigned int dwOffset =
465 pdx->StagedDone + pdx->StagedOffset + pArea->dwBaseOffset;
466 char *pCoherBuf = pdx->pCoherStagedIO; // coherent buffer
467 if (!pArea->bUsed) {
468 dev_err(&pdx->interface->dev, "%s area %d unused",
469 __func__, nArea);
470 return;
471 }
472
473 while (n) {
474 int nPage = dwOffset >> PAGE_SHIFT; // page number in table
475 if (nPage < pArea->nPages) {
476 char *pvAddress =
477 (char *)kmap_atomic(pArea->pPages[nPage]);
478 if (pvAddress) {
479 unsigned int uiPageOff = dwOffset & (PAGE_SIZE - 1); // offset into the page
480 size_t uiXfer = PAGE_SIZE - uiPageOff; // max to transfer on this page
481 if (uiXfer > n) // limit byte count if too much
482 uiXfer = n; // for the page
483 if (pdx->StagedRead)
484 memcpy(pvAddress + uiPageOff,
485 pCoherBuf, uiXfer);
486 else
487 memcpy(pCoherBuf,
488 pvAddress + uiPageOff,
489 uiXfer);
490 kunmap_atomic(pvAddress);
491 dwOffset += uiXfer;
492 pCoherBuf += uiXfer;
493 n -= uiXfer;
494 } else {
495 dev_err(&pdx->interface->dev,
496 "%s did not map page %d",
497 __func__, nPage);
498 return;
499 }
500
501 } else {
502 dev_err(&pdx->interface->dev,
503 "%s exceeded pages %d", __func__,
504 nPage);
505 return;
506 }
507 }
508 } else
509 dev_err(&pdx->interface->dev, "%s bad area %d", __func__,
510 nArea);
511 }
512
513 // Forward declarations for stuff used circularly
514 static int StageChunk(DEVICE_EXTENSION * pdx);
515 /***************************************************************************
516 ** ReadWrite_Complete
517 **
518 ** Completion routine for our staged read/write Irps
519 */
520 static void staged_callback(struct urb *pUrb)
521 {
522 DEVICE_EXTENSION *pdx = pUrb->context;
523 unsigned int nGot = pUrb->actual_length; // what we transferred
524 bool bCancel = false;
525 bool bRestartCharInput; // used at the end
526
527 spin_lock(&pdx->stagedLock); // stop ReadWriteMem() action while this routine is running
528 pdx->bStagedUrbPending = false; // clear the flag for staged IRP pending
529
530 if (pUrb->status) { // sync/async unlink faults aren't errors
531 if (!
532 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
533 || pUrb->status == -ESHUTDOWN)) {
534 dev_err(&pdx->interface->dev,
535 "%s - nonzero write bulk status received: %d",
536 __func__, pUrb->status);
537 } else
538 dev_info(&pdx->interface->dev,
539 "%s - staged xfer cancelled", __func__);
540
541 spin_lock(&pdx->err_lock);
542 pdx->errors = pUrb->status;
543 spin_unlock(&pdx->err_lock);
544 nGot = 0; // and tidy up again if so
545 bCancel = true;
546 } else {
547 dev_dbg(&pdx->interface->dev, "%s %d chars xferred", __func__,
548 nGot);
549 if (pdx->StagedRead) // if reading, save to user space
550 CopyUserSpace(pdx, nGot); // copy from buffer to user
551 if (nGot == 0)
552 dev_dbg(&pdx->interface->dev, "%s ZLP", __func__);
553 }
554
555 // Update the transfer length based on the TransferBufferLength value in the URB
556 pdx->StagedDone += nGot;
557
558 dev_dbg(&pdx->interface->dev, "%s, done %d bytes of %d", __func__,
559 pdx->StagedDone, pdx->StagedLength);
560
561 if ((pdx->StagedDone == pdx->StagedLength) || // If no more to do
562 (bCancel)) // or this IRP was cancelled
563 {
564 TRANSAREA *pArea = &pdx->rTransDef[pdx->StagedId]; // Transfer area info
565 dev_dbg(&pdx->interface->dev,
566 "%s transfer done, bytes %d, cancel %d", __func__,
567 pdx->StagedDone, bCancel);
568
569 // Here is where we sort out what to do with this transfer if using a circular buffer. We have
570 // a completed transfer that can be assumed to fit into the transfer area. We should be able to
571 // add this to the end of a growing block or to use it to start a new block unless the code
572 // that calculates the offset to use (in ReadWriteMem) is totally duff.
573 if ((pArea->bCircular) && (pArea->bCircToHost) && (!bCancel) && // Time to sort out circular buffer info?
574 (pdx->StagedRead)) // Only for tohost transfers for now
575 {
576 if (pArea->aBlocks[1].dwSize > 0) // If block 1 is in use we must append to it
577 {
578 if (pdx->StagedOffset ==
579 (pArea->aBlocks[1].dwOffset +
580 pArea->aBlocks[1].dwSize)) {
581 pArea->aBlocks[1].dwSize +=
582 pdx->StagedLength;
583 dev_dbg(&pdx->interface->dev,
584 "RWM_Complete, circ block 1 now %d bytes at %d",
585 pArea->aBlocks[1].dwSize,
586 pArea->aBlocks[1].dwOffset);
587 } else {
588 // Here things have gone very, very, wrong, but I cannot see how this can actually be achieved
589 pArea->aBlocks[1].dwOffset =
590 pdx->StagedOffset;
591 pArea->aBlocks[1].dwSize =
592 pdx->StagedLength;
593 dev_err(&pdx->interface->dev,
594 "%s ERROR, circ block 1 re-started %d bytes at %d",
595 __func__,
596 pArea->aBlocks[1].dwSize,
597 pArea->aBlocks[1].dwOffset);
598 }
599 } else // If block 1 is not used, we try to add to block 0
600 {
601 if (pArea->aBlocks[0].dwSize > 0) // Got stored block 0 information?
602 { // Must append onto the existing block 0
603 if (pdx->StagedOffset ==
604 (pArea->aBlocks[0].dwOffset +
605 pArea->aBlocks[0].dwSize)) {
606 pArea->aBlocks[0].dwSize += pdx->StagedLength; // Just add this transfer in
607 dev_dbg(&pdx->interface->dev,
608 "RWM_Complete, circ block 0 now %d bytes at %d",
609 pArea->aBlocks[0].
610 dwSize,
611 pArea->aBlocks[0].
612 dwOffset);
613 } else // If it doesn't append, put into new block 1
614 {
615 pArea->aBlocks[1].dwOffset =
616 pdx->StagedOffset;
617 pArea->aBlocks[1].dwSize =
618 pdx->StagedLength;
619 dev_dbg(&pdx->interface->dev,
620 "RWM_Complete, circ block 1 started %d bytes at %d",
621 pArea->aBlocks[1].
622 dwSize,
623 pArea->aBlocks[1].
624 dwOffset);
625 }
626 } else // No info stored yet, just save in block 0
627 {
628 pArea->aBlocks[0].dwOffset =
629 pdx->StagedOffset;
630 pArea->aBlocks[0].dwSize =
631 pdx->StagedLength;
632 dev_dbg(&pdx->interface->dev,
633 "RWM_Complete, circ block 0 started %d bytes at %d",
634 pArea->aBlocks[0].dwSize,
635 pArea->aBlocks[0].dwOffset);
636 }
637 }
638 }
639
640 if (!bCancel) // Don't generate an event if cancelled
641 {
642 dev_dbg(&pdx->interface->dev,
643 "RWM_Complete, bCircular %d, bToHost %d, eStart %d, eSize %d",
644 pArea->bCircular, pArea->bEventToHost,
645 pArea->dwEventSt, pArea->dwEventSz);
646 if ((pArea->dwEventSz) && // Set a user-mode event...
647 (pdx->StagedRead == pArea->bEventToHost)) // ...on transfers in this direction?
648 {
649 int iWakeUp = 0; // assume
650 // If we have completed the right sort of DMA transfer then set the event to notify
651 // the user code to wake up anyone that is waiting.
652 if ((pArea->bCircular) && // Circular areas use a simpler test
653 (pArea->bCircToHost)) // only in supported direction
654 { // Is total data waiting up to size limit?
655 unsigned int dwTotal =
656 pArea->aBlocks[0].dwSize +
657 pArea->aBlocks[1].dwSize;
658 iWakeUp = (dwTotal >= pArea->dwEventSz);
659 } else {
660 unsigned int transEnd =
661 pdx->StagedOffset +
662 pdx->StagedLength;
663 unsigned int eventEnd =
664 pArea->dwEventSt + pArea->dwEventSz;
665 iWakeUp = (pdx->StagedOffset < eventEnd)
666 && (transEnd > pArea->dwEventSt);
667 }
668
669 if (iWakeUp) {
670 dev_dbg(&pdx->interface->dev,
671 "About to set event to notify app");
672 wake_up_interruptible(&pArea->wqEvent); // wake up waiting processes
673 ++pArea->iWakeUp; // increment wakeup count
674 }
675 }
676 }
677
678 pdx->dwDMAFlag = MODE_CHAR; // Switch back to char mode before ReadWriteMem call
679
680 if (!bCancel) // Don't look for waiting transfer if cancelled
681 {
682 // If we have a transfer waiting, kick it off
683 if (pdx->bXFerWaiting) // Got a block xfer waiting?
684 {
685 int iReturn;
686 dev_info(&pdx->interface->dev,
687 "*** RWM_Complete *** pending transfer will now be set up!!!");
688 iReturn =
689 ReadWriteMem(pdx, !pdx->rDMAInfo.bOutWard,
690 pdx->rDMAInfo.wIdent,
691 pdx->rDMAInfo.dwOffset,
692 pdx->rDMAInfo.dwSize);
693
694 if (iReturn)
695 dev_err(&pdx->interface->dev,
696 "RWM_Complete rw setup failed %d",
697 iReturn);
698 }
699 }
700
701 } else // Here for more to do
702 StageChunk(pdx); // fire off the next bit
703
704 // While we hold the stagedLock, see if we should reallow character input ints
705 // Don't allow if cancelled, or if a new block has started or if there is a waiting block.
706 // This feels wrong as we should ask which spin lock protects dwDMAFlag.
707 bRestartCharInput = !bCancel && (pdx->dwDMAFlag == MODE_CHAR)
708 && !pdx->bXFerWaiting;
709
710 spin_unlock(&pdx->stagedLock); // Finally release the lock again
711
712 // This is not correct as dwDMAFlag is protected by the staged lock, but it is treated
713 // in Allowi as if it were protected by the char lock. In any case, most systems will
714 // not be upset by char input during DMA... sigh. Needs sorting out.
715 if (bRestartCharInput) // may be out of date, but...
716 Allowi(pdx, true); // ...Allowi tests a lock too.
717 dev_dbg(&pdx->interface->dev, "%s done", __func__);
718 }
719
720 /****************************************************************************
721 ** StageChunk
722 **
723 ** Generates the next chunk of data making up a staged transfer.
724 **
725 ** The calling code must have acquired the staging spinlock before calling
726 ** this function, and is responsible for releasing it. We are at callback level.
727 ****************************************************************************/
728 static int StageChunk(DEVICE_EXTENSION * pdx)
729 {
730 int iReturn = U14ERR_NOERROR;
731 unsigned int ChunkSize;
732 int nPipe = pdx->StagedRead ? 3 : 2; // The pipe number to use for reads or writes
733 if (pdx->nPipes == 3)
734 nPipe--; // Adjust for the 3-pipe case
735 if (nPipe < 0) // and trap case that should never happen
736 return U14ERR_FAIL;
737
738 if (!CanAcceptIoRequests(pdx)) // got sudden remove?
739 {
740 dev_info(&pdx->interface->dev, "%s sudden remove, giving up",
741 __func__);
742 return U14ERR_FAIL; // could do with a better error
743 }
744
745 ChunkSize = (pdx->StagedLength - pdx->StagedDone); // transfer length remaining
746 if (ChunkSize > STAGED_SZ) // make sure to keep legal
747 ChunkSize = STAGED_SZ; // limit to max allowed
748
749 if (!pdx->StagedRead) // if writing...
750 CopyUserSpace(pdx, ChunkSize); // ...copy data into the buffer
751
752 usb_fill_bulk_urb(pdx->pStagedUrb, pdx->udev,
753 pdx->StagedRead ? usb_rcvbulkpipe(pdx->udev,
754 pdx->
755 epAddr[nPipe]) :
756 usb_sndbulkpipe(pdx->udev, pdx->epAddr[nPipe]),
757 pdx->pCoherStagedIO, ChunkSize, staged_callback, pdx);
758 pdx->pStagedUrb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
759 usb_anchor_urb(pdx->pStagedUrb, &pdx->submitted); // in case we need to kill it
760 iReturn = usb_submit_urb(pdx->pStagedUrb, GFP_ATOMIC);
761 if (iReturn) {
762 usb_unanchor_urb(pdx->pStagedUrb); // kill it
763 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
764 dev_err(&pdx->interface->dev, "%s submit urb failed, code %d",
765 __func__, iReturn);
766 } else
767 pdx->bStagedUrbPending = true; // Set the flag for staged URB pending
768 dev_dbg(&pdx->interface->dev, "%s done so far:%d, this size:%d",
769 __func__, pdx->StagedDone, ChunkSize);
770
771 return iReturn;
772 }
773
774 /***************************************************************************
775 ** ReadWriteMem
776 **
777 ** This routine is used generally for block read and write operations.
778 ** Breaks up a read or write in to specified sized chunks, as specified by pipe
779 ** information on maximum transfer size.
780 **
781 ** Any code that calls this must be holding the stagedLock
782 **
783 ** Arguments:
784 ** DeviceObject - pointer to our FDO (Functional Device Object)
785 ** Read - TRUE for read, FALSE for write. This is from POV of the driver
786 ** wIdent - the transfer area number - defines memory area and more.
787 ** dwOffs - the start offset within the transfer area of the start of this
788 ** transfer.
789 ** dwLen - the number of bytes to transfer.
790 */
791 int ReadWriteMem(DEVICE_EXTENSION * pdx, bool Read, unsigned short wIdent,
792 unsigned int dwOffs, unsigned int dwLen)
793 {
794 TRANSAREA *pArea = &pdx->rTransDef[wIdent]; // Transfer area info
795
796 if (!CanAcceptIoRequests(pdx)) // Are we in a state to accept new requests?
797 {
798 dev_err(&pdx->interface->dev, "%s can't accept requests",
799 __func__);
800 return U14ERR_FAIL;
801 }
802
803 dev_dbg(&pdx->interface->dev,
804 "%s xfer %d bytes to %s, offset %d, area %d", __func__, dwLen,
805 Read ? "host" : "1401", dwOffs, wIdent);
806
807 // Amazingly, we can get an escape sequence back before the current staged Urb is done, so we
808 // have to check for this situation and, if so, wait until all is OK.
809 if (pdx->bStagedUrbPending) {
810 pdx->bXFerWaiting = true; // Flag we are waiting
811 dev_info(&pdx->interface->dev,
812 "%s xfer is waiting, as previous staged pending",
813 __func__);
814 return U14ERR_NOERROR;
815 }
816
817 if (dwLen == 0) // allow 0-len read or write; just return success
818 {
819 dev_dbg(&pdx->interface->dev,
820 "%s OK; zero-len read/write request", __func__);
821 return U14ERR_NOERROR;
822 }
823
824 if ((pArea->bCircular) && // Circular transfer?
825 (pArea->bCircToHost) && (Read)) // In a supported direction
826 { // If so, we sort out offset ourself
827 bool bWait = false; // Flag for transfer having to wait
828
829 dev_dbg(&pdx->interface->dev,
830 "Circular buffers are %d at %d and %d at %d",
831 pArea->aBlocks[0].dwSize, pArea->aBlocks[0].dwOffset,
832 pArea->aBlocks[1].dwSize, pArea->aBlocks[1].dwOffset);
833 if (pArea->aBlocks[1].dwSize > 0) // Using the second block already?
834 {
835 dwOffs = pArea->aBlocks[1].dwOffset + pArea->aBlocks[1].dwSize; // take offset from that
836 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
837 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
838 } else // Area 1 not in use, try to use area 0
839 {
840 if (pArea->aBlocks[0].dwSize == 0) // Reset block 0 if not in use
841 pArea->aBlocks[0].dwOffset = 0;
842 dwOffs =
843 pArea->aBlocks[0].dwOffset +
844 pArea->aBlocks[0].dwSize;
845 if ((dwOffs + dwLen) > pArea->dwLength) // Off the end of the buffer?
846 {
847 pArea->aBlocks[1].dwOffset = 0; // Set up to use second block
848 dwOffs = 0;
849 bWait = (dwOffs + dwLen) > pArea->aBlocks[0].dwOffset; // Wait if will overwrite block 0?
850 bWait |= (dwOffs + dwLen) > pArea->dwLength; // or if it overflows the buffer
851 }
852 }
853
854 if (bWait) // This transfer will have to wait?
855 {
856 pdx->bXFerWaiting = true; // Flag we are waiting
857 dev_dbg(&pdx->interface->dev,
858 "%s xfer waiting for circular buffer space",
859 __func__);
860 return U14ERR_NOERROR;
861 }
862
863 dev_dbg(&pdx->interface->dev,
864 "%s circular xfer, %d bytes starting at %d", __func__,
865 dwLen, dwOffs);
866 }
867 // Save the parameters for the read\write transfer
868 pdx->StagedRead = Read; // Save the parameters for this read
869 pdx->StagedId = wIdent; // ID allows us to get transfer area info
870 pdx->StagedOffset = dwOffs; // The area within the transfer area
871 pdx->StagedLength = dwLen;
872 pdx->StagedDone = 0; // Initialise the byte count
873 pdx->dwDMAFlag = MODE_LINEAR; // Set DMA mode flag at this point
874 pdx->bXFerWaiting = false; // Clearly not a transfer waiting now
875
876 // KeClearEvent(&pdx->StagingDoneEvent); // Clear the transfer done event
877 StageChunk(pdx); // fire off the first chunk
878
879 return U14ERR_NOERROR;
880 }
881
882 /****************************************************************************
883 **
884 ** ReadChar
885 **
886 ** Reads a character a buffer. If there is no more
887 ** data we return FALSE. Used as part of decoding a DMA request.
888 **
889 ****************************************************************************/
890 static bool ReadChar(unsigned char *pChar, char *pBuf, unsigned int *pdDone,
891 unsigned int dGot)
892 {
893 bool bRead = false;
894 unsigned int dDone = *pdDone;
895
896 if (dDone < dGot) // If there is more data
897 {
898 *pChar = (unsigned char)pBuf[dDone]; // Extract the next char
899 dDone++; // Increment the done count
900 *pdDone = dDone;
901 bRead = true; // and flag success
902 }
903
904 return bRead;
905 }
906
907 #ifdef NOTUSED
908 /****************************************************************************
909 **
910 ** ReadWord
911 **
912 ** Reads a word from the 1401, just uses ReadChar twice; passes on any error
913 **
914 *****************************************************************************/
915 static bool ReadWord(unsigned short *pWord, char *pBuf, unsigned int *pdDone,
916 unsigned int dGot)
917 {
918 if (ReadChar((unsigned char *)pWord, pBuf, pdDone, dGot))
919 return ReadChar(((unsigned char *)pWord) + 1, pBuf, pdDone,
920 dGot);
921 else
922 return false;
923 }
924 #endif
925
926 /****************************************************************************
927 ** ReadHuff
928 **
929 ** Reads a coded number in and returns it, Code is:
930 ** If data is in range 0..127 we recieve 1 byte. If data in range 128-16383
931 ** we recieve two bytes, top bit of first indicates another on its way. If
932 ** data in range 16383-4194303 we get three bytes, top two bits of first set
933 ** to indicate three byte total.
934 **
935 *****************************************************************************/
936 static bool ReadHuff(volatile unsigned int *pDWord, char *pBuf,
937 unsigned int *pdDone, unsigned int dGot)
938 {
939 unsigned char ucData; /* for each read to ReadChar */
940 bool bReturn = true; /* assume we will succeed */
941 unsigned int dwData = 0; /* Accumulator for the data */
942
943 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
944 dwData = ucData; /* copy the data */
945 if ((dwData & 0x00000080) != 0) { /* Bit set for more data ? */
946 dwData &= 0x0000007F; /* Clear the relevant bit */
947 if (ReadChar(&ucData, pBuf, pdDone, dGot)) {
948 dwData = (dwData << 8) | ucData;
949 if ((dwData & 0x00004000) != 0) { /* three byte sequence ? */
950 dwData &= 0x00003FFF; /* Clear the relevant bit */
951 if (ReadChar
952 (&ucData, pBuf, pdDone, dGot))
953 dwData = (dwData << 8) | ucData;
954 else
955 bReturn = false;
956 }
957 } else
958 bReturn = false; /* couldn't read data */
959 }
960 } else
961 bReturn = false;
962
963 *pDWord = dwData; /* return the data */
964 return bReturn;
965 }
966
967 /***************************************************************************
968 **
969 ** ReadDMAInfo
970 **
971 ** Tries to read info about the dma request from the 1401 and decode it into
972 ** the dma descriptor block. We have at this point had the escape character
973 ** from the 1401 and now we must read in the rest of the information about
974 ** the transfer request. Returns FALSE if 1401 fails to respond or obselete
975 ** code from 1401 or bad parameters.
976 **
977 ** The pBuf char pointer does not include the initial escape character, so
978 ** we start handling the data at offset zero.
979 **
980 *****************************************************************************/
981 static bool ReadDMAInfo(volatile DMADESC * pDmaDesc, DEVICE_EXTENSION * pdx,
982 char *pBuf, unsigned int dwCount)
983 {
984 bool bResult = false; // assume we won't succeed
985 unsigned char ucData;
986 unsigned int dDone = 0; // We haven't parsed anything so far
987
988 dev_dbg(&pdx->interface->dev, "%s", __func__);
989
990 if (ReadChar(&ucData, pBuf, &dDone, dwCount)) {
991 unsigned char ucTransCode = (ucData & 0x0F); // get code for transfer type
992 unsigned short wIdent = ((ucData >> 4) & 0x07); // and area identifier
993
994 // fill in the structure we were given
995 pDmaDesc->wTransType = ucTransCode; // type of transfer
996 pDmaDesc->wIdent = wIdent; // area to use
997 pDmaDesc->dwSize = 0; // initialise other bits
998 pDmaDesc->dwOffset = 0;
999
1000 dev_dbg(&pdx->interface->dev, "%s type: %d ident: %d", __func__,
1001 pDmaDesc->wTransType, pDmaDesc->wIdent);
1002
1003 pDmaDesc->bOutWard = (ucTransCode != TM_EXTTOHOST); // set transfer direction
1004
1005 switch (ucTransCode) {
1006 case TM_EXTTOHOST: // Extended linear transfer modes (the only ones!)
1007 case TM_EXTTO1401:
1008 {
1009 bResult =
1010 ReadHuff(&(pDmaDesc->dwOffset), pBuf,
1011 &dDone, dwCount)
1012 && ReadHuff(&(pDmaDesc->dwSize), pBuf,
1013 &dDone, dwCount);
1014 if (bResult) {
1015 dev_dbg(&pdx->interface->dev,
1016 "%s xfer offset & size %d %d",
1017 __func__, pDmaDesc->dwOffset,
1018 pDmaDesc->dwSize);
1019
1020 if ((wIdent >= MAX_TRANSAREAS) || // Illegal area number, or...
1021 (!pdx->rTransDef[wIdent].bUsed) || // area not set up, or...
1022 (pDmaDesc->dwOffset > pdx->rTransDef[wIdent].dwLength) || // range/size
1023 ((pDmaDesc->dwOffset +
1024 pDmaDesc->dwSize) >
1025 (pdx->rTransDef[wIdent].
1026 dwLength))) {
1027 bResult = false; // bad parameter(s)
1028 dev_dbg(&pdx->interface->dev,
1029 "%s bad param - id %d, bUsed %d, offset %d, size %d, area length %d",
1030 __func__, wIdent,
1031 pdx->rTransDef[wIdent].
1032 bUsed,
1033 pDmaDesc->dwOffset,
1034 pDmaDesc->dwSize,
1035 pdx->rTransDef[wIdent].
1036 dwLength);
1037 }
1038 }
1039 break;
1040 }
1041 default:
1042 break;
1043 }
1044 } else
1045 bResult = false;
1046
1047 if (!bResult) // now check parameters for validity
1048 dev_err(&pdx->interface->dev, "%s error reading Esc sequence",
1049 __func__);
1050
1051 return bResult;
1052 }
1053
1054 /****************************************************************************
1055 **
1056 ** Handle1401Esc
1057 **
1058 ** Deals with an escape sequence coming from the 1401. This can either be
1059 ** a DMA transfer request of various types or a response to an escape sequence
1060 ** sent to the 1401. This is called from a callback.
1061 **
1062 ** Parameters are
1063 **
1064 ** dwCount - the number of characters in the device extension char in buffer,
1065 ** this is known to be at least 2 or we will not be called.
1066 **
1067 ****************************************************************************/
1068 static int Handle1401Esc(DEVICE_EXTENSION * pdx, char *pCh,
1069 unsigned int dwCount)
1070 {
1071 int iReturn = U14ERR_FAIL;
1072
1073 // I have no idea what this next test is about. '?' is 0x3f, which is area 3, code
1074 // 15. At the moment, this is not used, so it does no harm, but unless someone can
1075 // tell me what this is for, it should be removed from this and the Windows driver.
1076 if (pCh[0] == '?') // Is this an information response
1077 { // Parse and save the information
1078 } else {
1079 spin_lock(&pdx->stagedLock); // Lock others out
1080
1081 if (ReadDMAInfo(&pdx->rDMAInfo, pdx, pCh, dwCount)) // Get DMA parameters
1082 {
1083 unsigned short wTransType = pdx->rDMAInfo.wTransType; // check transfer type
1084
1085 dev_dbg(&pdx->interface->dev,
1086 "%s xfer to %s, offset %d, length %d", __func__,
1087 pdx->rDMAInfo.bOutWard ? "1401" : "host",
1088 pdx->rDMAInfo.dwOffset, pdx->rDMAInfo.dwSize);
1089
1090 if (pdx->bXFerWaiting) // Check here for badly out of kilter...
1091 { // This can never happen, really
1092 dev_err(&pdx->interface->dev,
1093 "ERROR: DMA setup while transfer still waiting");
1094 spin_unlock(&pdx->stagedLock);
1095 } else {
1096 if ((wTransType == TM_EXTTOHOST)
1097 || (wTransType == TM_EXTTO1401)) {
1098 iReturn =
1099 ReadWriteMem(pdx,
1100 !pdx->rDMAInfo.
1101 bOutWard,
1102 pdx->rDMAInfo.wIdent,
1103 pdx->rDMAInfo.dwOffset,
1104 pdx->rDMAInfo.dwSize);
1105 if (iReturn != U14ERR_NOERROR)
1106 dev_err(&pdx->interface->dev,
1107 "%s ReadWriteMem() failed %d",
1108 __func__, iReturn);
1109 } else // This covers non-linear transfer setup
1110 dev_err(&pdx->interface->dev,
1111 "%s Unknown block xfer type %d",
1112 __func__, wTransType);
1113 }
1114 } else // Failed to read parameters
1115 dev_err(&pdx->interface->dev, "%s ReadDMAInfo() fail",
1116 __func__);
1117
1118 spin_unlock(&pdx->stagedLock); // OK here
1119 }
1120
1121 dev_dbg(&pdx->interface->dev, "%s returns %d", __func__, iReturn);
1122
1123 return iReturn;
1124 }
1125
1126 /****************************************************************************
1127 ** Callback for the character read complete or error
1128 ****************************************************************************/
1129 static void ced_readchar_callback(struct urb *pUrb)
1130 {
1131 DEVICE_EXTENSION *pdx = pUrb->context;
1132 int nGot = pUrb->actual_length; // what we transferred
1133
1134 if (pUrb->status) // Do we have a problem to handle?
1135 {
1136 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use for error
1137 // sync/async unlink faults aren't errors... just saying device removed or stopped
1138 if (!
1139 (pUrb->status == -ENOENT || pUrb->status == -ECONNRESET
1140 || pUrb->status == -ESHUTDOWN)) {
1141 dev_err(&pdx->interface->dev,
1142 "%s - nonzero write bulk status received: %d",
1143 __func__, pUrb->status);
1144 } else
1145 dev_dbg(&pdx->interface->dev,
1146 "%s - 0 chars pUrb->status=%d (shutdown?)",
1147 __func__, pUrb->status);
1148
1149 spin_lock(&pdx->err_lock);
1150 pdx->errors = pUrb->status;
1151 spin_unlock(&pdx->err_lock);
1152 nGot = 0; // and tidy up again if so
1153
1154 spin_lock(&pdx->charInLock); // already at irq level
1155 pdx->bPipeError[nPipe] = 1; // Flag an error for later
1156 } else {
1157 if ((nGot > 1) && ((pdx->pCoherCharIn[0] & 0x7f) == 0x1b)) // Esc sequence?
1158 {
1159 Handle1401Esc(pdx, &pdx->pCoherCharIn[1], nGot - 1); // handle it
1160 spin_lock(&pdx->charInLock); // already at irq level
1161 } else {
1162 spin_lock(&pdx->charInLock); // already at irq level
1163 if (nGot > 0) {
1164 unsigned int i;
1165 if (nGot < INBUF_SZ) {
1166 pdx->pCoherCharIn[nGot] = 0; // tidy the string
1167 dev_dbg(&pdx->interface->dev,
1168 "%s got %d chars >%s<",
1169 __func__, nGot,
1170 pdx->pCoherCharIn);
1171 }
1172 // We know that whatever we read must fit in the input buffer
1173 for (i = 0; i < nGot; i++) {
1174 pdx->inputBuffer[pdx->dwInBuffPut++] =
1175 pdx->pCoherCharIn[i] & 0x7F;
1176 if (pdx->dwInBuffPut >= INBUF_SZ)
1177 pdx->dwInBuffPut = 0;
1178 }
1179
1180 if ((pdx->dwNumInput + nGot) <= INBUF_SZ)
1181 pdx->dwNumInput += nGot; // Adjust the buffer count accordingly
1182 } else
1183 dev_dbg(&pdx->interface->dev, "%s read ZLP",
1184 __func__);
1185 }
1186 }
1187
1188 pdx->bReadCharsPending = false; // No longer have a pending read
1189 spin_unlock(&pdx->charInLock); // already at irq level
1190
1191 Allowi(pdx, true); // see if we can do the next one
1192 }
1193
1194 /****************************************************************************
1195 ** Allowi
1196 **
1197 ** This is used to make sure that there is always a pending input transfer so
1198 ** we can pick up any inward transfers. This can be called in multiple contexts
1199 ** so we use the irqsave version of the spinlock.
1200 ****************************************************************************/
1201 int Allowi(DEVICE_EXTENSION * pdx, bool bInCallback)
1202 {
1203 int iReturn = U14ERR_NOERROR;
1204 unsigned long flags;
1205 spin_lock_irqsave(&pdx->charInLock, flags); // can be called in multiple contexts
1206
1207 // We don't want char input running while DMA is in progress as we know that this
1208 // can cause sequencing problems for the 2270. So don't. It will also allow the
1209 // ERR response to get back to the host code too early on some PCs, even if there
1210 // is no actual driver failure, so we don't allow this at all.
1211 if (!pdx->bInDrawDown && // stop input if
1212 !pdx->bReadCharsPending && // If no read request outstanding
1213 (pdx->dwNumInput < (INBUF_SZ / 2)) && // and there is some space
1214 (pdx->dwDMAFlag == MODE_CHAR) && // not doing any DMA
1215 (!pdx->bXFerWaiting) && // no xfer waiting to start
1216 (CanAcceptIoRequests(pdx))) // and activity is generally OK
1217 { // then off we go
1218 unsigned int nMax = INBUF_SZ - pdx->dwNumInput; // max we could read
1219 int nPipe = pdx->nPipes == 4 ? 1 : 0; // The pipe number to use
1220
1221 dev_dbg(&pdx->interface->dev, "%s %d chars in input buffer",
1222 __func__, pdx->dwNumInput);
1223
1224 usb_fill_int_urb(pdx->pUrbCharIn, pdx->udev,
1225 usb_rcvintpipe(pdx->udev, pdx->epAddr[nPipe]),
1226 pdx->pCoherCharIn, nMax, ced_readchar_callback,
1227 pdx, pdx->bInterval);
1228 pdx->pUrbCharIn->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; // short xfers are OK by default
1229 usb_anchor_urb(pdx->pUrbCharIn, &pdx->submitted); // in case we need to kill it
1230 iReturn =
1231 usb_submit_urb(pdx->pUrbCharIn,
1232 bInCallback ? GFP_ATOMIC : GFP_KERNEL);
1233 if (iReturn) {
1234 usb_unanchor_urb(pdx->pUrbCharIn); // remove from list of active Urbs
1235 pdx->bPipeError[nPipe] = 1; // Flag an error to be handled later
1236 dev_err(&pdx->interface->dev,
1237 "%s submit urb failed: %d", __func__, iReturn);
1238 } else
1239 pdx->bReadCharsPending = true; // Flag that we are active here
1240 }
1241
1242 spin_unlock_irqrestore(&pdx->charInLock, flags);
1243
1244 return iReturn;
1245
1246 }
1247
1248 /*****************************************************************************
1249 ** The ioctl entry point to the driver that is used by us to talk to it.
1250 ** inode The device node (no longer in 3.0.0 kernels)
1251 ** file The file that is open, which holds our pdx pointer
1252 ** ulArg The argument passed in. Note that long is 64-bits in 64-bit system, i.e. it is big
1253 ** enough for a 64-bit pointer.
1254 *****************************************************************************/
1255 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
1256 static long ced_ioctl(struct file *file, unsigned int cmd, unsigned long ulArg)
1257 #else
1258 static int ced_ioctl(struct inode *node, struct file *file, unsigned int cmd,
1259 unsigned long ulArg)
1260 #endif
1261 {
1262 int err = 0;
1263 DEVICE_EXTENSION *pdx = file->private_data;
1264 if (!CanAcceptIoRequests(pdx)) // check we still exist
1265 return -ENODEV;
1266
1267 // Check that access is allowed, where is is needed. Anything that would have an indeterminate
1268 // size will be checked by the specific command.
1269 if (_IOC_DIR(cmd) & _IOC_READ) // read from point of view of user...
1270 err = !access_ok(VERIFY_WRITE, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel write
1271 else if (_IOC_DIR(cmd) & _IOC_WRITE) // and write from point of view of user...
1272 err = !access_ok(VERIFY_READ, (void __user *)ulArg, _IOC_SIZE(cmd)); // is kernel read
1273 if (err)
1274 return -EFAULT;
1275
1276 switch (_IOC_NR(cmd)) {
1277 case _IOC_NR(IOCTL_CED_SENDSTRING(0)):
1278 return SendString(pdx, (const char __user *)ulArg,
1279 _IOC_SIZE(cmd));
1280
1281 case _IOC_NR(IOCTL_CED_RESET1401):
1282 return Reset1401(pdx);
1283
1284 case _IOC_NR(IOCTL_CED_GETCHAR):
1285 return GetChar(pdx);
1286
1287 case _IOC_NR(IOCTL_CED_SENDCHAR):
1288 return SendChar(pdx, (char)ulArg);
1289
1290 case _IOC_NR(IOCTL_CED_STAT1401):
1291 return Stat1401(pdx);
1292
1293 case _IOC_NR(IOCTL_CED_LINECOUNT):
1294 return LineCount(pdx);
1295
1296 case _IOC_NR(IOCTL_CED_GETSTRING(0)):
1297 return GetString(pdx, (char __user *)ulArg, _IOC_SIZE(cmd));
1298
1299 case _IOC_NR(IOCTL_CED_SETTRANSFER):
1300 return SetTransfer(pdx, (TRANSFERDESC __user *) ulArg);
1301
1302 case _IOC_NR(IOCTL_CED_UNSETTRANSFER):
1303 return UnsetTransfer(pdx, (int)ulArg);
1304
1305 case _IOC_NR(IOCTL_CED_SETEVENT):
1306 return SetEvent(pdx, (TRANSFEREVENT __user *) ulArg);
1307
1308 case _IOC_NR(IOCTL_CED_GETOUTBUFSPACE):
1309 return GetOutBufSpace(pdx);
1310
1311 case _IOC_NR(IOCTL_CED_GETBASEADDRESS):
1312 return -1;
1313
1314 case _IOC_NR(IOCTL_CED_GETDRIVERREVISION):
1315 return (2 << 24) | (DRIVERMAJREV << 16) | DRIVERMINREV; // USB | MAJOR | MINOR
1316
1317 case _IOC_NR(IOCTL_CED_GETTRANSFER):
1318 return GetTransfer(pdx, (TGET_TX_BLOCK __user *) ulArg);
1319
1320 case _IOC_NR(IOCTL_CED_KILLIO1401):
1321 return KillIO1401(pdx);
1322
1323 case _IOC_NR(IOCTL_CED_STATEOF1401):
1324 return StateOf1401(pdx);
1325
1326 case _IOC_NR(IOCTL_CED_GRAB1401):
1327 case _IOC_NR(IOCTL_CED_FREE1401):
1328 return U14ERR_NOERROR;
1329
1330 case _IOC_NR(IOCTL_CED_STARTSELFTEST):
1331 return StartSelfTest(pdx);
1332
1333 case _IOC_NR(IOCTL_CED_CHECKSELFTEST):
1334 return CheckSelfTest(pdx, (TGET_SELFTEST __user *) ulArg);
1335
1336 case _IOC_NR(IOCTL_CED_TYPEOF1401):
1337 return TypeOf1401(pdx);
1338
1339 case _IOC_NR(IOCTL_CED_TRANSFERFLAGS):
1340 return TransferFlags(pdx);
1341
1342 case _IOC_NR(IOCTL_CED_DBGPEEK):
1343 return DbgPeek(pdx, (TDBGBLOCK __user *) ulArg);
1344
1345 case _IOC_NR(IOCTL_CED_DBGPOKE):
1346 return DbgPoke(pdx, (TDBGBLOCK __user *) ulArg);
1347
1348 case _IOC_NR(IOCTL_CED_DBGRAMPDATA):
1349 return DbgRampData(pdx, (TDBGBLOCK __user *) ulArg);
1350
1351 case _IOC_NR(IOCTL_CED_DBGRAMPADDR):
1352 return DbgRampAddr(pdx, (TDBGBLOCK __user *) ulArg);
1353
1354 case _IOC_NR(IOCTL_CED_DBGGETDATA):
1355 return DbgGetData(pdx, (TDBGBLOCK __user *) ulArg);
1356
1357 case _IOC_NR(IOCTL_CED_DBGSTOPLOOP):
1358 return DbgStopLoop(pdx);
1359
1360 case _IOC_NR(IOCTL_CED_FULLRESET):
1361 pdx->bForceReset = true; // Set a flag for a full reset
1362 break;
1363
1364 case _IOC_NR(IOCTL_CED_SETCIRCULAR):
1365 return SetCircular(pdx, (TRANSFERDESC __user *) ulArg);
1366
1367 case _IOC_NR(IOCTL_CED_GETCIRCBLOCK):
1368 return GetCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1369
1370 case _IOC_NR(IOCTL_CED_FREECIRCBLOCK):
1371 return FreeCircBlock(pdx, (TCIRCBLOCK __user *) ulArg);
1372
1373 case _IOC_NR(IOCTL_CED_WAITEVENT):
1374 return WaitEvent(pdx, (int)(ulArg & 0xff), (int)(ulArg >> 8));
1375
1376 case _IOC_NR(IOCTL_CED_TESTEVENT):
1377 return TestEvent(pdx, (int)ulArg);
1378
1379 default:
1380 return U14ERR_NO_SUCH_FN;
1381 }
1382 return U14ERR_NOERROR;
1383 }
1384
1385 static const struct file_operations ced_fops = {
1386 .owner = THIS_MODULE,
1387 .open = ced_open,
1388 .release = ced_release,
1389 .flush = ced_flush,
1390 .llseek = noop_llseek,
1391 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)
1392 .unlocked_ioctl = ced_ioctl,
1393 #else
1394 .ioctl = ced_ioctl,
1395 #endif
1396 };
1397
1398 /*
1399 * usb class driver info in order to get a minor number from the usb core,
1400 * and to have the device registered with the driver core
1401 */
1402 static struct usb_class_driver ced_class = {
1403 .name = "cedusb%d",
1404 .fops = &ced_fops,
1405 .minor_base = USB_CED_MINOR_BASE,
1406 };
1407
1408 // Check that the device that matches a 1401 vendor and product ID is OK to use and
1409 // initialise our DEVICE_EXTENSION.
1410 static int ced_probe(struct usb_interface *interface,
1411 const struct usb_device_id *id)
1412 {
1413 DEVICE_EXTENSION *pdx;
1414 struct usb_host_interface *iface_desc;
1415 struct usb_endpoint_descriptor *endpoint;
1416 int i, bcdDevice;
1417 int retval = -ENOMEM;
1418
1419 // allocate memory for our device extension and initialize it
1420 pdx = kzalloc(sizeof(*pdx), GFP_KERNEL);
1421 if (!pdx) {
1422 dev_err(&interface->dev, "Out of memory\n");
1423 goto error;
1424 }
1425
1426 for (i = 0; i < MAX_TRANSAREAS; ++i) // Initialise the wait queues
1427 {
1428 init_waitqueue_head(&pdx->rTransDef[i].wqEvent);
1429 }
1430
1431 // Put initialises for our stuff here. Note that all of *pdx is zero, so
1432 // no need to explicitly zero it.
1433 spin_lock_init(&pdx->charOutLock);
1434 spin_lock_init(&pdx->charInLock);
1435 spin_lock_init(&pdx->stagedLock);
1436
1437 // Initialises from the skeleton stuff
1438 kref_init(&pdx->kref);
1439 mutex_init(&pdx->io_mutex);
1440 spin_lock_init(&pdx->err_lock);
1441 init_usb_anchor(&pdx->submitted);
1442
1443 pdx->udev = usb_get_dev(interface_to_usbdev(interface));
1444 pdx->interface = interface;
1445
1446 // Attempt to identify the device
1447 bcdDevice = pdx->udev->descriptor.bcdDevice;
1448 i = (bcdDevice >> 8);
1449 if (i == 0)
1450 pdx->s1401Type = TYPEU1401;
1451 else if ((i >= 1) && (i <= 23))
1452 pdx->s1401Type = i + 2;
1453 else {
1454 dev_err(&interface->dev, "%s Unknown device. bcdDevice = %d",
1455 __func__, bcdDevice);
1456 goto error;
1457 }
1458 // set up the endpoint information. We only care about the number of EP as
1459 // we know that we are dealing with a 1401 device.
1460 iface_desc = interface->cur_altsetting;
1461 pdx->nPipes = iface_desc->desc.bNumEndpoints;
1462 dev_info(&interface->dev, "1401Type=%d with %d End Points",
1463 pdx->s1401Type, pdx->nPipes);
1464 if ((pdx->nPipes < 3) || (pdx->nPipes > 4))
1465 goto error;
1466
1467 // Allocate the URBs we hold for performing transfers
1468 pdx->pUrbCharOut = usb_alloc_urb(0, GFP_KERNEL); // character output URB
1469 pdx->pUrbCharIn = usb_alloc_urb(0, GFP_KERNEL); // character input URB
1470 pdx->pStagedUrb = usb_alloc_urb(0, GFP_KERNEL); // block transfer URB
1471 if (!pdx->pUrbCharOut || !pdx->pUrbCharIn || !pdx->pStagedUrb) {
1472 dev_err(&interface->dev, "%s URB alloc failed", __func__);
1473 goto error;
1474 }
1475
1476 pdx->pCoherStagedIO =
1477 usb_alloc_coherent(pdx->udev, STAGED_SZ, GFP_KERNEL,
1478 &pdx->pStagedUrb->transfer_dma);
1479 pdx->pCoherCharOut =
1480 usb_alloc_coherent(pdx->udev, OUTBUF_SZ, GFP_KERNEL,
1481 &pdx->pUrbCharOut->transfer_dma);
1482 pdx->pCoherCharIn =
1483 usb_alloc_coherent(pdx->udev, INBUF_SZ, GFP_KERNEL,
1484 &pdx->pUrbCharIn->transfer_dma);
1485 if (!pdx->pCoherCharOut || !pdx->pCoherCharIn || !pdx->pCoherStagedIO) {
1486 dev_err(&interface->dev, "%s Coherent buffer alloc failed",
1487 __func__);
1488 goto error;
1489 }
1490
1491 for (i = 0; i < pdx->nPipes; ++i) {
1492 endpoint = &iface_desc->endpoint[i].desc;
1493 pdx->epAddr[i] = endpoint->bEndpointAddress;
1494 dev_info(&interface->dev, "Pipe %d, ep address %02x", i,
1495 pdx->epAddr[i]);
1496 if (((pdx->nPipes == 3) && (i == 0)) || // if char input end point
1497 ((pdx->nPipes == 4) && (i == 1))) {
1498 pdx->bInterval = endpoint->bInterval; // save the endpoint interrupt interval
1499 dev_info(&interface->dev, "Pipe %d, bInterval = %d", i,
1500 pdx->bInterval);
1501 }
1502 // Detect USB2 by checking last ep size (64 if USB1)
1503 if (i == pdx->nPipes - 1) // if this is the last ep (bulk)
1504 {
1505 pdx->bIsUSB2 =
1506 le16_to_cpu(endpoint->wMaxPacketSize) > 64;
1507 dev_info(&pdx->interface->dev, "USB%d",
1508 pdx->bIsUSB2 + 1);
1509 }
1510 }
1511
1512 /* save our data pointer in this interface device */
1513 usb_set_intfdata(interface, pdx);
1514
1515 /* we can register the device now, as it is ready */
1516 retval = usb_register_dev(interface, &ced_class);
1517 if (retval) {
1518 /* something prevented us from registering this driver */
1519 dev_err(&interface->dev,
1520 "Not able to get a minor for this device.\n");
1521 usb_set_intfdata(interface, NULL);
1522 goto error;
1523 }
1524
1525 /* let the user know what node this device is now attached to */
1526 dev_info(&interface->dev,
1527 "USB CEDUSB device now attached to cedusb #%d",
1528 interface->minor);
1529 return 0;
1530
1531 error:
1532 if (pdx)
1533 kref_put(&pdx->kref, ced_delete); // frees allocated memory
1534 return retval;
1535 }
1536
1537 static void ced_disconnect(struct usb_interface *interface)
1538 {
1539 DEVICE_EXTENSION *pdx = usb_get_intfdata(interface);
1540 int minor = interface->minor; // save for message at the end
1541 int i;
1542
1543 usb_set_intfdata(interface, NULL); // remove the pdx from the interface
1544 usb_deregister_dev(interface, &ced_class); // give back our minor device number
1545
1546 mutex_lock(&pdx->io_mutex); // stop more I/O starting while...
1547 ced_draw_down(pdx); // ...wait for then kill any io
1548 for (i = 0; i < MAX_TRANSAREAS; ++i) {
1549 int iErr = ClearArea(pdx, i); // ...release any used memory
1550 if (iErr == U14ERR_UNLOCKFAIL)
1551 dev_err(&pdx->interface->dev, "%s Area %d was in used",
1552 __func__, i);
1553 }
1554 pdx->interface = NULL; // ...we kill off link to interface
1555 mutex_unlock(&pdx->io_mutex);
1556
1557 usb_kill_anchored_urbs(&pdx->submitted);
1558
1559 kref_put(&pdx->kref, ced_delete); // decrement our usage count
1560
1561 dev_info(&interface->dev, "USB cedusb #%d now disconnected", minor);
1562 }
1563
1564 // Wait for all the urbs we know of to be done with, then kill off any that
1565 // are left. NBNB we will need to have a mechanism to stop circular xfers
1566 // from trying to fire off more urbs. We will wait up to 3 seconds for Urbs
1567 // to be done.
1568 void ced_draw_down(DEVICE_EXTENSION * pdx)
1569 {
1570 int time;
1571 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1572
1573 pdx->bInDrawDown = true;
1574 time = usb_wait_anchor_empty_timeout(&pdx->submitted, 3000);
1575 if (!time) // if we timed out we kill the urbs
1576 {
1577 usb_kill_anchored_urbs(&pdx->submitted);
1578 dev_err(&pdx->interface->dev, "%s timed out", __func__);
1579 }
1580 pdx->bInDrawDown = false;
1581 }
1582
1583 static int ced_suspend(struct usb_interface *intf, pm_message_t message)
1584 {
1585 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1586 if (!pdx)
1587 return 0;
1588 ced_draw_down(pdx);
1589
1590 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1591 return 0;
1592 }
1593
1594 static int ced_resume(struct usb_interface *intf)
1595 {
1596 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1597 if (!pdx)
1598 return 0;
1599 dev_dbg(&pdx->interface->dev, "%s called", __func__);
1600 return 0;
1601 }
1602
1603 static int ced_pre_reset(struct usb_interface *intf)
1604 {
1605 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1606 dev_dbg(&pdx->interface->dev, "%s", __func__);
1607 mutex_lock(&pdx->io_mutex);
1608 ced_draw_down(pdx);
1609 return 0;
1610 }
1611
1612 static int ced_post_reset(struct usb_interface *intf)
1613 {
1614 DEVICE_EXTENSION *pdx = usb_get_intfdata(intf);
1615 dev_dbg(&pdx->interface->dev, "%s", __func__);
1616
1617 /* we are sure no URBs are active - no locking needed */
1618 pdx->errors = -EPIPE;
1619 mutex_unlock(&pdx->io_mutex);
1620
1621 return 0;
1622 }
1623
1624 static struct usb_driver ced_driver = {
1625 .name = "cedusb",
1626 .probe = ced_probe,
1627 .disconnect = ced_disconnect,
1628 .suspend = ced_suspend,
1629 .resume = ced_resume,
1630 .pre_reset = ced_pre_reset,
1631 .post_reset = ced_post_reset,
1632 .id_table = ced_table,
1633 .supports_autosuspend = 1,
1634 };
1635
1636 module_usb_driver(ced_driver);
1637 MODULE_LICENSE("GPL");