Fix common misspellings
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / staging / westbridge / astoria / api / src / cyasdma.c
1 /* Cypress West Bridge API source file (cyasdma.c)
2 ## ===========================
3 ## Copyright (C) 2010 Cypress Semiconductor
4 ##
5 ## This program is free software; you can redistribute it and/or
6 ## modify it under the terms of the GNU General Public License
7 ## as published by the Free Software Foundation; either version 2
8 ## of the License, or (at your option) any later version.
9 ##
10 ## This program is distributed in the hope that it will be useful,
11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of
12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 ## GNU General Public License for more details.
14 ##
15 ## You should have received a copy of the GNU General Public License
16 ## along with this program; if not, write to the Free Software
17 ## Foundation, Inc., 51 Franklin Street, Fifth Floor
18 ## Boston, MA 02110-1301, USA.
19 ## ===========================
20 */
21
22 #include "../../include/linux/westbridge/cyashal.h"
23 #include "../../include/linux/westbridge/cyasdma.h"
24 #include "../../include/linux/westbridge/cyaslowlevel.h"
25 #include "../../include/linux/westbridge/cyaserr.h"
26 #include "../../include/linux/westbridge/cyasregs.h"
27
28 /*
29 * Add the DMA queue entry to the free list to be re-used later
30 */
31 static void
32 cy_as_dma_add_request_to_free_queue(cy_as_device *dev_p,
33 cy_as_dma_queue_entry *req_p)
34 {
35 uint32_t imask;
36 imask = cy_as_hal_disable_interrupts();
37
38 req_p->next_p = dev_p->dma_freelist_p;
39 dev_p->dma_freelist_p = req_p;
40
41 cy_as_hal_enable_interrupts(imask);
42 }
43
44 /*
45 * Get a DMA queue entry from the free list.
46 */
47 static cy_as_dma_queue_entry *
48 cy_as_dma_get_dma_queue_entry(cy_as_device *dev_p)
49 {
50 cy_as_dma_queue_entry *req_p;
51 uint32_t imask;
52
53 cy_as_hal_assert(dev_p->dma_freelist_p != 0);
54
55 imask = cy_as_hal_disable_interrupts();
56 req_p = dev_p->dma_freelist_p;
57 dev_p->dma_freelist_p = req_p->next_p;
58 cy_as_hal_enable_interrupts(imask);
59
60 return req_p;
61 }
62
63 /*
64 * Set the maximum size that the West Bridge hardware
65 * can handle in a single DMA operation. This size
66 * may change for the P <-> U endpoints as a function
67 * of the endpoint type and whether we are running
68 * at full speed or high speed.
69 */
70 cy_as_return_status_t
71 cy_as_dma_set_max_dma_size(cy_as_device *dev_p,
72 cy_as_end_point_number_t ep, uint32_t size)
73 {
74 /* In MTP mode, EP2 is allowed to have all max sizes. */
75 if ((!dev_p->is_mtp_firmware) || (ep != 0x02)) {
76 if (size < 64 || size > 1024)
77 return CY_AS_ERROR_INVALID_SIZE;
78 }
79
80 CY_AS_NUM_EP(dev_p, ep)->maxhwdata = (uint16_t)size;
81 return CY_AS_ERROR_SUCCESS;
82 }
83
84 /*
85 * The callback for requests sent to West Bridge
86 * to relay endpoint data. Endpoint data for EP0
87 * and EP1 are sent using mailbox requests. This
88 * is the callback that is called when a response
89 * to a mailbox request to send data is received.
90 */
91 static void
92 cy_as_dma_request_callback(
93 cy_as_device *dev_p,
94 uint8_t context,
95 cy_as_ll_request_response *req_p,
96 cy_as_ll_request_response *resp_p,
97 cy_as_return_status_t ret)
98 {
99 uint16_t v;
100 uint16_t datacnt;
101 cy_as_end_point_number_t ep;
102
103 (void)context;
104
105 cy_as_log_debug_message(5, "cy_as_dma_request_callback called");
106
107 /*
108 * extract the return code from the firmware
109 */
110 if (ret == CY_AS_ERROR_SUCCESS) {
111 if (cy_as_ll_request_response__get_code(resp_p) !=
112 CY_RESP_SUCCESS_FAILURE)
113 ret = CY_AS_ERROR_INVALID_RESPONSE;
114 else
115 ret = cy_as_ll_request_response__get_word(resp_p, 0);
116 }
117
118 /*
119 * extract the endpoint number and the transferred byte count
120 * from the request.
121 */
122 v = cy_as_ll_request_response__get_word(req_p, 0);
123 ep = (cy_as_end_point_number_t)((v >> 13) & 0x01);
124
125 if (ret == CY_AS_ERROR_SUCCESS) {
126 /*
127 * if the firmware returns success,
128 * all of the data requested was
129 * transferred. there are no partial
130 * transfers.
131 */
132 datacnt = v & 0x3FF;
133 } else {
134 /*
135 * if the firmware returned an error, no data was transferred.
136 */
137 datacnt = 0;
138 }
139
140 /*
141 * queue the request and response data structures for use with the
142 * next EP0 or EP1 request.
143 */
144 if (ep == 0) {
145 dev_p->usb_ep0_dma_req = req_p;
146 dev_p->usb_ep0_dma_resp = resp_p;
147 } else {
148 dev_p->usb_ep1_dma_req = req_p;
149 dev_p->usb_ep1_dma_resp = resp_p;
150 }
151
152 /*
153 * call the DMA complete function so we can
154 * signal that this portion of the transfer
155 * has completed. if the low level request
156 * was canceled, we do not need to signal
157 * the completed function as the only way a
158 * cancel can happen is via the DMA cancel
159 * function.
160 */
161 if (ret != CY_AS_ERROR_CANCELED)
162 cy_as_dma_completed_callback(dev_p->tag, ep, datacnt, ret);
163 }
164
165 /*
166 * Set the DRQ mask register for the given endpoint number. If state is
167 * CyTrue, the DRQ interrupt for the given endpoint is enabled, otherwise
168 * it is disabled.
169 */
170 static void
171 cy_as_dma_set_drq(cy_as_device *dev_p,
172 cy_as_end_point_number_t ep, cy_bool state)
173 {
174 uint16_t mask;
175 uint16_t v;
176 uint32_t intval;
177
178 /*
179 * there are not DRQ register bits for EP0 and EP1
180 */
181 if (ep == 0 || ep == 1)
182 return;
183
184 /*
185 * disable interrupts while we do this to be sure the state of the
186 * DRQ mask register is always well defined.
187 */
188 intval = cy_as_hal_disable_interrupts();
189
190 /*
191 * set the DRQ bit to the given state for the ep given
192 */
193 mask = (1 << ep);
194 v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK);
195
196 if (state)
197 v |= mask;
198 else
199 v &= ~mask;
200
201 cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_P0_DRQ_MASK, v);
202 cy_as_hal_enable_interrupts(intval);
203 }
204
205 /*
206 * Send the next DMA request for the endpoint given
207 */
208 static void
209 cy_as_dma_send_next_dma_request(cy_as_device *dev_p, cy_as_dma_end_point *ep_p)
210 {
211 uint32_t datacnt;
212 void *buf_p;
213 cy_as_dma_queue_entry *dma_p;
214
215 cy_as_log_debug_message(6, "cy_as_dma_send_next_dma_request called");
216
217 /* If the queue is empty, nothing to do */
218 dma_p = ep_p->queue_p;
219 if (dma_p == 0) {
220 /*
221 * there are no pending DMA requests
222 * for this endpoint. disable the DRQ
223 * mask bits to insure no interrupts
224 * will be triggered by this endpoint
225 * until someone is interested in the data.
226 */
227 cy_as_dma_set_drq(dev_p, ep_p->ep, cy_false);
228 return;
229 }
230
231 cy_as_dma_end_point_set_running(ep_p);
232
233 /*
234 * get the number of words that still
235 * need to be xferred in this request.
236 */
237 datacnt = dma_p->size - dma_p->offset;
238 cy_as_hal_assert(datacnt >= 0);
239
240 /*
241 * the HAL layer should never limit the size
242 * of the transfer to something less than the
243 * maxhwdata otherwise, the data will be sent
244 * in packets that are not correct in size.
245 */
246 cy_as_hal_assert(ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE
247 || ep_p->maxhaldata >= ep_p->maxhwdata);
248
249 /*
250 * update the number of words that need to be xferred yet
251 * based on the limits of the HAL layer.
252 */
253 if (ep_p->maxhaldata == CY_AS_DMA_MAX_SIZE_HW_SIZE) {
254 if (datacnt > ep_p->maxhwdata)
255 datacnt = ep_p->maxhwdata;
256 } else {
257 if (datacnt > ep_p->maxhaldata)
258 datacnt = ep_p->maxhaldata;
259 }
260
261 /*
262 * find a pointer to the data that needs to be transferred
263 */
264 buf_p = (((char *)dma_p->buf_p) + dma_p->offset);
265
266 /*
267 * mark a request in transit
268 */
269 cy_as_dma_end_point_set_in_transit(ep_p);
270
271 if (ep_p->ep == 0 || ep_p->ep == 1) {
272 /*
273 * if this is a WRITE request on EP0 and EP1
274 * we write the data via an EP_DATA request
275 * to west bridge via the mailbox registers.
276 * if this is a READ request, we do nothing
277 * and the data will arrive via an EP_DATA
278 * request from west bridge. in the request
279 * handler for the USB context we will pass
280 * the data back into the DMA module.
281 */
282 if (dma_p->readreq == cy_false) {
283 uint16_t v;
284 uint16_t len;
285 cy_as_ll_request_response *resp_p;
286 cy_as_ll_request_response *req_p;
287 cy_as_return_status_t ret;
288
289 len = (uint16_t)(datacnt / 2);
290 if (datacnt % 2)
291 len++;
292
293 len++;
294
295 if (ep_p->ep == 0) {
296 req_p = dev_p->usb_ep0_dma_req;
297 resp_p = dev_p->usb_ep0_dma_resp;
298 dev_p->usb_ep0_dma_req = 0;
299 dev_p->usb_ep0_dma_resp = 0;
300 } else {
301 req_p = dev_p->usb_ep1_dma_req;
302 resp_p = dev_p->usb_ep1_dma_resp;
303 dev_p->usb_ep1_dma_req = 0;
304 dev_p->usb_ep1_dma_resp = 0;
305 }
306
307 cy_as_hal_assert(req_p != 0);
308 cy_as_hal_assert(resp_p != 0);
309 cy_as_hal_assert(len <= 64);
310
311 cy_as_ll_init_request(req_p, CY_RQT_USB_EP_DATA,
312 CY_RQT_USB_RQT_CONTEXT, len);
313
314 v = (uint16_t)(datacnt | (ep_p->ep << 13) | (1 << 14));
315 if (dma_p->offset == 0)
316 v |= (1 << 12);/* Set the first packet bit */
317 if (dma_p->offset + datacnt == dma_p->size)
318 v |= (1 << 11);/* Set the last packet bit */
319
320 cy_as_ll_request_response__set_word(req_p, 0, v);
321 cy_as_ll_request_response__pack(req_p,
322 1, datacnt, buf_p);
323
324 cy_as_ll_init_response(resp_p, 1);
325
326 ret = cy_as_ll_send_request(dev_p, req_p, resp_p,
327 cy_false, cy_as_dma_request_callback);
328 if (ret == CY_AS_ERROR_SUCCESS)
329 cy_as_log_debug_message(5,
330 "+++ send EP 0/1 data via mailbox registers");
331 else
332 cy_as_log_debug_message(5,
333 "+++ error sending EP 0/1 data via mailbox "
334 "registers - CY_AS_ERROR_TIMEOUT");
335
336 if (ret != CY_AS_ERROR_SUCCESS)
337 cy_as_dma_completed_callback(dev_p->tag,
338 ep_p->ep, 0, ret);
339 }
340 } else {
341 /*
342 * this is a DMA request on an endpoint that is accessible
343 * via the P port. ask the HAL DMA capabilities to
344 * perform this. the amount of data sent is limited by the
345 * HAL max size as well as what we need to send. if the
346 * ep_p->maxhaldata is set to a value larger than the
347 * endpoint buffer size, then we will pass more than a
348 * single buffer worth of data to the HAL layer and expect
349 * the HAL layer to divide the data into packets. the last
350 * parameter here (ep_p->maxhwdata) gives the packet size for
351 * the data so the HAL layer knows what the packet size should
352 * be.
353 */
354 if (cy_as_dma_end_point_is_direction_in(ep_p))
355 cy_as_hal_dma_setup_write(dev_p->tag,
356 ep_p->ep, buf_p, datacnt, ep_p->maxhwdata);
357 else
358 cy_as_hal_dma_setup_read(dev_p->tag,
359 ep_p->ep, buf_p, datacnt, ep_p->maxhwdata);
360
361 /*
362 * the DRQ interrupt for this endpoint should be enabled
363 * so that the data transfer progresses at interrupt time.
364 */
365 cy_as_dma_set_drq(dev_p, ep_p->ep, cy_true);
366 }
367 }
368
369 /*
370 * This function is called when the HAL layer has
371 * completed the last requested DMA operation.
372 * This function sends/receives the next batch of
373 * data associated with the current DMA request,
374 * or it is is complete, moves to the next DMA request.
375 */
376 void
377 cy_as_dma_completed_callback(cy_as_hal_device_tag tag,
378 cy_as_end_point_number_t ep, uint32_t cnt, cy_as_return_status_t status)
379 {
380 uint32_t mask;
381 cy_as_dma_queue_entry *req_p;
382 cy_as_dma_end_point *ep_p;
383 cy_as_device *dev_p = cy_as_device_find_from_tag(tag);
384
385 /* Make sure the HAL layer gave us good parameters */
386 cy_as_hal_assert(dev_p != 0);
387 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
388 cy_as_hal_assert(ep < 16);
389
390
391 /* Get the endpoint ptr */
392 ep_p = CY_AS_NUM_EP(dev_p, ep);
393 cy_as_hal_assert(ep_p->queue_p != 0);
394
395 /* Get a pointer to the current entry in the queue */
396 mask = cy_as_hal_disable_interrupts();
397 req_p = ep_p->queue_p;
398
399 /* Update the offset to reflect the data actually received or sent */
400 req_p->offset += cnt;
401
402 /*
403 * if we are still sending/receiving the current packet,
404 * send/receive the next chunk basically we keep going
405 * if we have not sent/received enough data, and we are
406 * not doing a packet operation, and the last packet
407 * sent or received was a full sized packet. in other
408 * words, when we are NOT doing a packet operation, a
409 * less than full size packet (a short packet) will
410 * terminate the operation.
411 *
412 * note: if this is EP1 request and the request has
413 * timed out, it means the buffer is not free.
414 * we have to resend the data.
415 *
416 * note: for the MTP data transfers, the DMA transfer
417 * for the next packet can only be started asynchronously,
418 * after a firmware event notifies that the device is ready.
419 */
420 if (((req_p->offset != req_p->size) && (req_p->packet == cy_false) &&
421 ((cnt == ep_p->maxhaldata) || ((cnt == ep_p->maxhwdata) &&
422 ((ep != CY_AS_MTP_READ_ENDPOINT) ||
423 (cnt == dev_p->usb_max_tx_size)))))
424 || ((ep == 1) && (status == CY_AS_ERROR_TIMEOUT))) {
425 cy_as_hal_enable_interrupts(mask);
426
427 /*
428 * and send the request again to send the next block of
429 * data. special handling for MTP transfers on E_ps 2
430 * and 6. the send_next_request will be processed based
431 * on the event sent by the firmware.
432 */
433 if ((ep == CY_AS_MTP_WRITE_ENDPOINT) || (
434 (ep == CY_AS_MTP_READ_ENDPOINT) &&
435 (!cy_as_dma_end_point_is_direction_in(ep_p))))
436 cy_as_dma_end_point_set_stopped(ep_p);
437 else
438 cy_as_dma_send_next_dma_request(dev_p, ep_p);
439 } else {
440 /*
441 * we get here if ...
442 * we have sent or received all of the data
443 * or
444 * we are doing a packet operation
445 * or
446 * we receive a short packet
447 */
448
449 /*
450 * remove this entry from the DMA queue for this endpoint.
451 */
452 cy_as_dma_end_point_clear_in_transit(ep_p);
453 ep_p->queue_p = req_p->next_p;
454 if (ep_p->last_p == req_p) {
455 /*
456 * we have removed the last packet from the DMA queue,
457 * disable the interrupt associated with this interrupt.
458 */
459 ep_p->last_p = 0;
460 cy_as_hal_enable_interrupts(mask);
461 cy_as_dma_set_drq(dev_p, ep, cy_false);
462 } else
463 cy_as_hal_enable_interrupts(mask);
464
465 if (req_p->cb) {
466 /*
467 * if the request has a callback associated with it,
468 * call the callback to tell the interested party that
469 * this DMA request has completed.
470 *
471 * note, we set the in_callback bit to insure that we
472 * cannot recursively call an API function that is
473 * synchronous only from a callback.
474 */
475 cy_as_device_set_in_callback(dev_p);
476 (*req_p->cb)(dev_p, ep, req_p->buf_p,
477 req_p->offset, status);
478 cy_as_device_clear_in_callback(dev_p);
479 }
480
481 /*
482 * we are done with this request, put it on the freelist to be
483 * reused at a later time.
484 */
485 cy_as_dma_add_request_to_free_queue(dev_p, req_p);
486
487 if (ep_p->queue_p == 0) {
488 /*
489 * if the endpoint is out of DMA entries, set the
490 * endpoint as stopped.
491 */
492 cy_as_dma_end_point_set_stopped(ep_p);
493
494 /*
495 * the DMA queue is empty, wake any task waiting on
496 * the QUEUE to drain.
497 */
498 if (cy_as_dma_end_point_is_sleeping(ep_p)) {
499 cy_as_dma_end_point_set_wake_state(ep_p);
500 cy_as_hal_wake(&ep_p->channel);
501 }
502 } else {
503 /*
504 * if the queued operation is a MTP transfer,
505 * wait until firmware event before sending
506 * down the next DMA request.
507 */
508 if ((ep == CY_AS_MTP_WRITE_ENDPOINT) ||
509 ((ep == CY_AS_MTP_READ_ENDPOINT) &&
510 (!cy_as_dma_end_point_is_direction_in(ep_p))) ||
511 ((ep == dev_p->storage_read_endpoint) &&
512 (!cy_as_device_is_p2s_dma_start_recvd(dev_p)))
513 || ((ep == dev_p->storage_write_endpoint) &&
514 (!cy_as_device_is_p2s_dma_start_recvd(dev_p))))
515 cy_as_dma_end_point_set_stopped(ep_p);
516 else
517 cy_as_dma_send_next_dma_request(dev_p, ep_p);
518 }
519 }
520 }
521
522 /*
523 * This function is used to kick start DMA on a given
524 * channel. If DMA is already running on the given
525 * endpoint, nothing happens. If DMA is not running,
526 * the first entry is pulled from the DMA queue and
527 * sent/recevied to/from the West Bridge device.
528 */
529 cy_as_return_status_t
530 cy_as_dma_kick_start(cy_as_device *dev_p, cy_as_end_point_number_t ep)
531 {
532 cy_as_dma_end_point *ep_p;
533 cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE);
534
535 ep_p = CY_AS_NUM_EP(dev_p, ep);
536
537 /* We are already running */
538 if (cy_as_dma_end_point_is_running(ep_p))
539 return CY_AS_ERROR_SUCCESS;
540
541 cy_as_dma_send_next_dma_request(dev_p, ep_p);
542 return CY_AS_ERROR_SUCCESS;
543 }
544
545 /*
546 * This function stops the given endpoint. Stopping and endpoint cancels
547 * any pending DMA operations and frees all resources associated with the
548 * given endpoint.
549 */
550 static cy_as_return_status_t
551 cy_as_dma_stop_end_point(cy_as_device *dev_p, cy_as_end_point_number_t ep)
552 {
553 cy_as_return_status_t ret;
554 cy_as_dma_end_point *ep_p = CY_AS_NUM_EP(dev_p, ep);
555
556 /*
557 * cancel any pending DMA requests associated with this endpoint. this
558 * cancels any DMA requests at the HAL layer as well as dequeues any
559 * request that is currently pending.
560 */
561 ret = cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_CANCELED);
562 if (ret != CY_AS_ERROR_SUCCESS)
563 return ret;
564
565 /*
566 * destroy the sleep channel
567 */
568 if (!cy_as_hal_destroy_sleep_channel(&ep_p->channel)
569 && ret == CY_AS_ERROR_SUCCESS)
570 ret = CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED;
571
572 /*
573 * free the memory associated with this endpoint
574 */
575 cy_as_hal_free(ep_p);
576
577 /*
578 * set the data structure ptr to something sane since the
579 * previous pointer is now free.
580 */
581 dev_p->endp[ep] = 0;
582
583 return ret;
584 }
585
586 /*
587 * This method stops the USB stack. This is an internal function that does
588 * all of the work of destroying the USB stack without the protections that
589 * we provide to the API (i.e. stopping at stack that is not running).
590 */
591 static cy_as_return_status_t
592 cy_as_dma_stop_internal(cy_as_device *dev_p)
593 {
594 cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS;
595 cy_as_return_status_t lret;
596 cy_as_end_point_number_t i;
597
598 /*
599 * stop all of the endpoints. this cancels all DMA requests, and
600 * frees all resources associated with each endpoint.
601 */
602 for (i = 0; i < sizeof(dev_p->endp)/(sizeof(dev_p->endp[0])); i++) {
603 lret = cy_as_dma_stop_end_point(dev_p, i);
604 if (lret != CY_AS_ERROR_SUCCESS && ret == CY_AS_ERROR_SUCCESS)
605 ret = lret;
606 }
607
608 /*
609 * now, free the list of DMA requests structures that we use to manage
610 * DMA requests.
611 */
612 while (dev_p->dma_freelist_p) {
613 cy_as_dma_queue_entry *req_p;
614 uint32_t imask = cy_as_hal_disable_interrupts();
615
616 req_p = dev_p->dma_freelist_p;
617 dev_p->dma_freelist_p = req_p->next_p;
618
619 cy_as_hal_enable_interrupts(imask);
620
621 cy_as_hal_free(req_p);
622 }
623
624 cy_as_ll_destroy_request(dev_p, dev_p->usb_ep0_dma_req);
625 cy_as_ll_destroy_request(dev_p, dev_p->usb_ep1_dma_req);
626 cy_as_ll_destroy_response(dev_p, dev_p->usb_ep0_dma_resp);
627 cy_as_ll_destroy_response(dev_p, dev_p->usb_ep1_dma_resp);
628
629 return ret;
630 }
631
632
633 /*
634 * CyAsDmaStop()
635 *
636 * This function shuts down the DMA module. All resources
637 * associated with the DMA module will be freed. This
638 * routine is the API stop function. It insures that we
639 * are stopping a stack that is actually running and then
640 * calls the internal function to do the work.
641 */
642 cy_as_return_status_t
643 cy_as_dma_stop(cy_as_device *dev_p)
644 {
645 cy_as_return_status_t ret;
646
647 ret = cy_as_dma_stop_internal(dev_p);
648 cy_as_device_set_dma_stopped(dev_p);
649
650 return ret;
651 }
652
653 /*
654 * CyAsDmaStart()
655 *
656 * This function initializes the DMA module to insure it is up and running.
657 */
658 cy_as_return_status_t
659 cy_as_dma_start(cy_as_device *dev_p)
660 {
661 cy_as_end_point_number_t i;
662 uint16_t cnt;
663
664 if (cy_as_device_is_dma_running(dev_p))
665 return CY_AS_ERROR_ALREADY_RUNNING;
666
667 /*
668 * pre-allocate DMA queue structures to be used in the interrupt context
669 */
670 for (cnt = 0; cnt < 32; cnt++) {
671 cy_as_dma_queue_entry *entry_p = (cy_as_dma_queue_entry *)
672 cy_as_hal_alloc(sizeof(cy_as_dma_queue_entry));
673 if (entry_p == 0) {
674 cy_as_dma_stop_internal(dev_p);
675 return CY_AS_ERROR_OUT_OF_MEMORY;
676 }
677 cy_as_dma_add_request_to_free_queue(dev_p, entry_p);
678 }
679
680 /*
681 * pre-allocate the DMA requests for sending EP0
682 * and EP1 data to west bridge
683 */
684 dev_p->usb_ep0_dma_req = cy_as_ll_create_request(dev_p,
685 CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64);
686 dev_p->usb_ep1_dma_req = cy_as_ll_create_request(dev_p,
687 CY_RQT_USB_EP_DATA, CY_RQT_USB_RQT_CONTEXT, 64);
688
689 if (dev_p->usb_ep0_dma_req == 0 || dev_p->usb_ep1_dma_req == 0) {
690 cy_as_dma_stop_internal(dev_p);
691 return CY_AS_ERROR_OUT_OF_MEMORY;
692 }
693 dev_p->usb_ep0_dma_req_save = dev_p->usb_ep0_dma_req;
694
695 dev_p->usb_ep0_dma_resp = cy_as_ll_create_response(dev_p, 1);
696 dev_p->usb_ep1_dma_resp = cy_as_ll_create_response(dev_p, 1);
697 if (dev_p->usb_ep0_dma_resp == 0 || dev_p->usb_ep1_dma_resp == 0) {
698 cy_as_dma_stop_internal(dev_p);
699 return CY_AS_ERROR_OUT_OF_MEMORY;
700 }
701 dev_p->usb_ep0_dma_resp_save = dev_p->usb_ep0_dma_resp;
702
703 /*
704 * set the dev_p->endp to all zeros to insure cleanup is possible if
705 * an error occurs during initialization.
706 */
707 cy_as_hal_mem_set(dev_p->endp, 0, sizeof(dev_p->endp));
708
709 /*
710 * now, iterate through each of the endpoints and initialize each
711 * one.
712 */
713 for (i = 0; i < sizeof(dev_p->endp)/sizeof(dev_p->endp[0]); i++) {
714 dev_p->endp[i] = (cy_as_dma_end_point *)
715 cy_as_hal_alloc(sizeof(cy_as_dma_end_point));
716 if (dev_p->endp[i] == 0) {
717 cy_as_dma_stop_internal(dev_p);
718 return CY_AS_ERROR_OUT_OF_MEMORY;
719 }
720 cy_as_hal_mem_set(dev_p->endp[i], 0,
721 sizeof(cy_as_dma_end_point));
722
723 dev_p->endp[i]->ep = i;
724 dev_p->endp[i]->queue_p = 0;
725 dev_p->endp[i]->last_p = 0;
726
727 cy_as_dma_set_drq(dev_p, i, cy_false);
728
729 if (!cy_as_hal_create_sleep_channel(&dev_p->endp[i]->channel))
730 return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED;
731 }
732
733 /*
734 * tell the HAL layer who to call when the
735 * HAL layer completes a DMA request
736 */
737 cy_as_hal_dma_register_callback(dev_p->tag,
738 cy_as_dma_completed_callback);
739
740 /*
741 * mark DMA as up and running on this device
742 */
743 cy_as_device_set_dma_running(dev_p);
744
745 return CY_AS_ERROR_SUCCESS;
746 }
747
748 /*
749 * Wait for all entries in the DMA queue associated
750 * the given endpoint to be drained. This function
751 * will not return until all the DMA data has been
752 * transferred.
753 */
754 cy_as_return_status_t
755 cy_as_dma_drain_queue(cy_as_device *dev_p,
756 cy_as_end_point_number_t ep, cy_bool kickstart)
757 {
758 cy_as_dma_end_point *ep_p;
759 int loopcount = 1000;
760 uint32_t mask;
761
762 /*
763 * make sure the endpoint is valid
764 */
765 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
766 return CY_AS_ERROR_INVALID_ENDPOINT;
767
768 /* Get the endpoint pointer based on the endpoint number */
769 ep_p = CY_AS_NUM_EP(dev_p, ep);
770
771 /*
772 * if the endpoint is empty of traffic, we return
773 * with success immediately
774 */
775 mask = cy_as_hal_disable_interrupts();
776 if (ep_p->queue_p == 0) {
777 cy_as_hal_enable_interrupts(mask);
778 return CY_AS_ERROR_SUCCESS;
779 } else {
780 /*
781 * add 10 seconds to the time out value for each 64 KB segment
782 * of data to be transferred.
783 */
784 if (ep_p->queue_p->size > 0x10000)
785 loopcount += ((ep_p->queue_p->size / 0x10000) * 1000);
786 }
787 cy_as_hal_enable_interrupts(mask);
788
789 /* If we are already sleeping on this endpoint, it is an error */
790 if (cy_as_dma_end_point_is_sleeping(ep_p))
791 return CY_AS_ERROR_NESTED_SLEEP;
792
793 /*
794 * we disable the endpoint while the queue drains to
795 * prevent any additional requests from being queued while we are waiting
796 */
797 cy_as_dma_enable_end_point(dev_p, ep,
798 cy_false, cy_as_direction_dont_change);
799
800 if (kickstart) {
801 /*
802 * now, kick start the DMA if necessary
803 */
804 cy_as_dma_kick_start(dev_p, ep);
805 }
806
807 /*
808 * check one last time before we begin sleeping to see if the
809 * queue is drained.
810 */
811 if (ep_p->queue_p == 0) {
812 cy_as_dma_enable_end_point(dev_p, ep, cy_true,
813 cy_as_direction_dont_change);
814 return CY_AS_ERROR_SUCCESS;
815 }
816
817 while (loopcount-- > 0) {
818 /*
819 * sleep for 10 ms maximum (per loop) while
820 * waiting for the transfer to complete.
821 */
822 cy_as_dma_end_point_set_sleep_state(ep_p);
823 cy_as_hal_sleep_on(&ep_p->channel, 10);
824
825 /* If we timed out, the sleep bit will still be set */
826 cy_as_dma_end_point_set_wake_state(ep_p);
827
828 /* Check the queue to see if is drained */
829 if (ep_p->queue_p == 0) {
830 /*
831 * clear the endpoint running and in transit flags
832 * for the endpoint, now that its DMA queue is empty.
833 */
834 cy_as_dma_end_point_clear_in_transit(ep_p);
835 cy_as_dma_end_point_set_stopped(ep_p);
836
837 cy_as_dma_enable_end_point(dev_p, ep,
838 cy_true, cy_as_direction_dont_change);
839 return CY_AS_ERROR_SUCCESS;
840 }
841 }
842
843 /*
844 * the DMA operation that has timed out can be cancelled, so that later
845 * operations on this queue can proceed.
846 */
847 cy_as_dma_cancel(dev_p, ep, CY_AS_ERROR_TIMEOUT);
848 cy_as_dma_enable_end_point(dev_p, ep,
849 cy_true, cy_as_direction_dont_change);
850 return CY_AS_ERROR_TIMEOUT;
851 }
852
853 /*
854 * This function queues a write request in the DMA queue
855 * for a given endpoint. The direction of the
856 * entry will be inferred from the endpoint direction.
857 */
858 cy_as_return_status_t
859 cy_as_dma_queue_request(cy_as_device *dev_p,
860 cy_as_end_point_number_t ep, void *mem_p,
861 uint32_t size, cy_bool pkt, cy_bool readreq, cy_as_dma_callback cb)
862 {
863 uint32_t mask;
864 cy_as_dma_queue_entry *entry_p;
865 cy_as_dma_end_point *ep_p;
866
867 /*
868 * make sure the endpoint is valid
869 */
870 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
871 return CY_AS_ERROR_INVALID_ENDPOINT;
872
873 /* Get the endpoint pointer based on the endpoint number */
874 ep_p = CY_AS_NUM_EP(dev_p, ep);
875
876 if (!cy_as_dma_end_point_is_enabled(ep_p))
877 return CY_AS_ERROR_ENDPOINT_DISABLED;
878
879 entry_p = cy_as_dma_get_dma_queue_entry(dev_p);
880
881 entry_p->buf_p = mem_p;
882 entry_p->cb = cb;
883 entry_p->size = size;
884 entry_p->offset = 0;
885 entry_p->packet = pkt;
886 entry_p->readreq = readreq;
887
888 mask = cy_as_hal_disable_interrupts();
889 entry_p->next_p = 0;
890 if (ep_p->last_p)
891 ep_p->last_p->next_p = entry_p;
892 ep_p->last_p = entry_p;
893 if (ep_p->queue_p == 0)
894 ep_p->queue_p = entry_p;
895 cy_as_hal_enable_interrupts(mask);
896
897 return CY_AS_ERROR_SUCCESS;
898 }
899
900 /*
901 * This function enables or disables and endpoint for DMA
902 * queueing. If an endpoint is disabled, any queue requests
903 * continue to be processed, but no new requests can be queued.
904 */
905 cy_as_return_status_t
906 cy_as_dma_enable_end_point(cy_as_device *dev_p,
907 cy_as_end_point_number_t ep, cy_bool enable, cy_as_dma_direction dir)
908 {
909 cy_as_dma_end_point *ep_p;
910
911 /*
912 * make sure the endpoint is valid
913 */
914 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
915 return CY_AS_ERROR_INVALID_ENDPOINT;
916
917 /* Get the endpoint pointer based on the endpoint number */
918 ep_p = CY_AS_NUM_EP(dev_p, ep);
919
920 if (dir == cy_as_direction_out)
921 cy_as_dma_end_point_set_direction_out(ep_p);
922 else if (dir == cy_as_direction_in)
923 cy_as_dma_end_point_set_direction_in(ep_p);
924
925 /*
926 * get the maximum size of data buffer the HAL
927 * layer can accept. this is used when the DMA
928 * module is sending DMA requests to the HAL.
929 * the DMA module will never send down a request
930 * that is greater than this value.
931 *
932 * for EP0 and EP1, we can send no more than 64
933 * bytes of data at one time as this is the maximum
934 * size of a packet that can be sent via these
935 * endpoints.
936 */
937 if (ep == 0 || ep == 1)
938 ep_p->maxhaldata = 64;
939 else
940 ep_p->maxhaldata = cy_as_hal_dma_max_request_size(
941 dev_p->tag, ep);
942
943 if (enable)
944 cy_as_dma_end_point_enable(ep_p);
945 else
946 cy_as_dma_end_point_disable(ep_p);
947
948 return CY_AS_ERROR_SUCCESS;
949 }
950
951 /*
952 * This function cancels any DMA operations pending with the HAL layer as well
953 * as any DMA operation queued on the endpoint.
954 */
955 cy_as_return_status_t
956 cy_as_dma_cancel(
957 cy_as_device *dev_p,
958 cy_as_end_point_number_t ep,
959 cy_as_return_status_t err)
960 {
961 uint32_t mask;
962 cy_as_dma_end_point *ep_p;
963 cy_as_dma_queue_entry *entry_p;
964 cy_bool epstate;
965
966 /*
967 * make sure the endpoint is valid
968 */
969 if (ep >= sizeof(dev_p->endp)/sizeof(dev_p->endp[0]))
970 return CY_AS_ERROR_INVALID_ENDPOINT;
971
972 /* Get the endpoint pointer based on the endpoint number */
973 ep_p = CY_AS_NUM_EP(dev_p, ep);
974
975 if (ep_p) {
976 /* Remember the state of the endpoint */
977 epstate = cy_as_dma_end_point_is_enabled(ep_p);
978
979 /*
980 * disable the endpoint so no more DMA packets can be
981 * queued.
982 */
983 cy_as_dma_enable_end_point(dev_p, ep,
984 cy_false, cy_as_direction_dont_change);
985
986 /*
987 * don't allow any interrupts from this endpoint
988 * while we get the most current request off of
989 * the queue.
990 */
991 cy_as_dma_set_drq(dev_p, ep, cy_false);
992
993 /*
994 * cancel any pending request queued in the HAL layer
995 */
996 if (cy_as_dma_end_point_in_transit(ep_p))
997 cy_as_hal_dma_cancel_request(dev_p->tag, ep_p->ep);
998
999 /*
1000 * shutdown the DMA for this endpoint so no
1001 * more data is transferred
1002 */
1003 cy_as_dma_end_point_set_stopped(ep_p);
1004
1005 /*
1006 * mark the endpoint as not in transit, because we are
1007 * going to consume any queued requests
1008 */
1009 cy_as_dma_end_point_clear_in_transit(ep_p);
1010
1011 /*
1012 * now, remove each entry in the queue and call the
1013 * associated callback stating that the request was
1014 * canceled.
1015 */
1016 ep_p->last_p = 0;
1017 while (ep_p->queue_p != 0) {
1018 /* Disable interrupts to manipulate the queue */
1019 mask = cy_as_hal_disable_interrupts();
1020
1021 /* Remove an entry from the queue */
1022 entry_p = ep_p->queue_p;
1023 ep_p->queue_p = entry_p->next_p;
1024
1025 /* Ok, the queue has been updated, we can
1026 * turn interrupts back on */
1027 cy_as_hal_enable_interrupts(mask);
1028
1029 /* Call the callback indicating we have
1030 * canceled the DMA */
1031 if (entry_p->cb)
1032 entry_p->cb(dev_p, ep,
1033 entry_p->buf_p, entry_p->size, err);
1034
1035 cy_as_dma_add_request_to_free_queue(dev_p, entry_p);
1036 }
1037
1038 if (ep == 0 || ep == 1) {
1039 /*
1040 * if this endpoint is zero or one, we need to
1041 * clear the queue of any pending CY_RQT_USB_EP_DATA
1042 * requests as these are pending requests to send
1043 * data to the west bridge device.
1044 */
1045 cy_as_ll_remove_ep_data_requests(dev_p, ep);
1046 }
1047
1048 if (epstate) {
1049 /*
1050 * the endpoint started out enabled, so we
1051 * re-enable the endpoint here.
1052 */
1053 cy_as_dma_enable_end_point(dev_p, ep,
1054 cy_true, cy_as_direction_dont_change);
1055 }
1056 }
1057
1058 return CY_AS_ERROR_SUCCESS;
1059 }
1060
1061 cy_as_return_status_t
1062 cy_as_dma_received_data(cy_as_device *dev_p,
1063 cy_as_end_point_number_t ep, uint32_t dsize, void *data)
1064 {
1065 cy_as_dma_queue_entry *dma_p;
1066 uint8_t *src_p, *dest_p;
1067 cy_as_dma_end_point *ep_p;
1068 uint32_t xfersize;
1069
1070 /*
1071 * make sure the endpoint is valid
1072 */
1073 if (ep != 0 && ep != 1)
1074 return CY_AS_ERROR_INVALID_ENDPOINT;
1075
1076 /* Get the endpoint pointer based on the endpoint number */
1077 ep_p = CY_AS_NUM_EP(dev_p, ep);
1078 dma_p = ep_p->queue_p;
1079 if (dma_p == 0)
1080 return CY_AS_ERROR_SUCCESS;
1081
1082 /*
1083 * if the data received exceeds the size of the DMA buffer,
1084 * clip the data to the size of the buffer. this can lead
1085 * to losing some data, but is not different than doing
1086 * non-packet reads on the other endpoints.
1087 */
1088 if (dsize > dma_p->size - dma_p->offset)
1089 dsize = dma_p->size - dma_p->offset;
1090
1091 /*
1092 * copy the data from the request packet to the DMA buffer
1093 * for the endpoint
1094 */
1095 src_p = (uint8_t *)data;
1096 dest_p = ((uint8_t *)(dma_p->buf_p)) + dma_p->offset;
1097 xfersize = dsize;
1098 while (xfersize-- > 0)
1099 *dest_p++ = *src_p++;
1100
1101 /* Signal the DMA module that we have
1102 * received data for this EP request */
1103 cy_as_dma_completed_callback(dev_p->tag,
1104 ep, dsize, CY_AS_ERROR_SUCCESS);
1105
1106 return CY_AS_ERROR_SUCCESS;
1107 }