Commit | Line | Data |
---|---|---|
7e6133aa DV |
1 | /* |
2 | * Wireless Host Controller (WHC) qset management. | |
3 | * | |
4 | * Copyright (C) 2007 Cambridge Silicon Radio Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License version | |
8 | * 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | #include <linux/uwb/umc.h> | |
21 | #include <linux/usb.h> | |
22 | ||
23 | #include "../../wusbcore/wusbhc.h" | |
24 | ||
25 | #include "whcd.h" | |
26 | ||
7e6133aa DV |
27 | struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) |
28 | { | |
29 | struct whc_qset *qset; | |
30 | dma_addr_t dma; | |
31 | ||
32 | qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); | |
33 | if (qset == NULL) | |
34 | return NULL; | |
35 | memset(qset, 0, sizeof(struct whc_qset)); | |
36 | ||
37 | qset->qset_dma = dma; | |
38 | qset->whc = whc; | |
39 | ||
40 | INIT_LIST_HEAD(&qset->list_node); | |
41 | INIT_LIST_HEAD(&qset->stds); | |
42 | ||
43 | return qset; | |
44 | } | |
45 | ||
46 | /** | |
47 | * qset_fill_qh - fill the static endpoint state in a qset's QHead | |
48 | * @qset: the qset whose QH needs initializing with static endpoint | |
49 | * state | |
50 | * @urb: an urb for a transfer to this endpoint | |
51 | */ | |
52 | static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) | |
53 | { | |
54 | struct usb_device *usb_dev = urb->dev; | |
55 | struct usb_wireless_ep_comp_descriptor *epcd; | |
56 | bool is_out; | |
57 | ||
58 | is_out = usb_pipeout(urb->pipe); | |
59 | ||
294a39e7 | 60 | qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); |
7e6133aa | 61 | |
294a39e7 | 62 | epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; |
7e6133aa DV |
63 | if (epcd) { |
64 | qset->max_seq = epcd->bMaxSequence; | |
65 | qset->max_burst = epcd->bMaxBurst; | |
66 | } else { | |
67 | qset->max_seq = 2; | |
68 | qset->max_burst = 1; | |
69 | } | |
70 | ||
71 | qset->qh.info1 = cpu_to_le32( | |
72 | QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) | |
73 | | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | |
74 | | usb_pipe_to_qh_type(urb->pipe) | |
75 | | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) | |
294a39e7 | 76 | | QH_INFO1_MAX_PKT_LEN(qset->max_packet) |
7e6133aa DV |
77 | ); |
78 | qset->qh.info2 = cpu_to_le32( | |
79 | QH_INFO2_BURST(qset->max_burst) | |
80 | | QH_INFO2_DBP(0) | |
81 | | QH_INFO2_MAX_COUNT(3) | |
82 | | QH_INFO2_MAX_RETRY(3) | |
83 | | QH_INFO2_MAX_SEQ(qset->max_seq - 1) | |
84 | ); | |
85 | /* FIXME: where can we obtain these Tx parameters from? Why | |
86 | * doesn't the chip know what Tx power to use? It knows the Rx | |
87 | * strength and can presumably guess the Tx power required | |
88 | * from that? */ | |
89 | qset->qh.info3 = cpu_to_le32( | |
90 | QH_INFO3_TX_RATE_53_3 | |
91 | | QH_INFO3_TX_PWR(0) /* 0 == max power */ | |
92 | ); | |
7f0406db DV |
93 | |
94 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | |
7e6133aa DV |
95 | } |
96 | ||
97 | /** | |
98 | * qset_clear - clear fields in a qset so it may be reinserted into a | |
7f0406db DV |
99 | * schedule. |
100 | * | |
101 | * The sequence number and current window are not cleared (see | |
102 | * qset_reset()). | |
7e6133aa DV |
103 | */ |
104 | void qset_clear(struct whc *whc, struct whc_qset *qset) | |
105 | { | |
106 | qset->td_start = qset->td_end = qset->ntds = 0; | |
7e6133aa DV |
107 | |
108 | qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T); | |
7f0406db | 109 | qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; |
7e6133aa | 110 | qset->qh.err_count = 0; |
7e6133aa DV |
111 | qset->qh.scratch[0] = 0; |
112 | qset->qh.scratch[1] = 0; | |
113 | qset->qh.scratch[2] = 0; | |
114 | ||
115 | memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); | |
116 | ||
117 | init_completion(&qset->remove_complete); | |
118 | } | |
119 | ||
7f0406db DV |
120 | /** |
121 | * qset_reset - reset endpoint state in a qset. | |
122 | * | |
123 | * Clears the sequence number and current window. This qset must not | |
124 | * be in the ASL or PZL. | |
125 | */ | |
126 | void qset_reset(struct whc *whc, struct whc_qset *qset) | |
127 | { | |
831baa49 | 128 | qset->reset = 0; |
7f0406db DV |
129 | |
130 | qset->qh.status &= ~QH_STATUS_SEQ_MASK; | |
131 | qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); | |
132 | } | |
133 | ||
7e6133aa DV |
134 | /** |
135 | * get_qset - get the qset for an async endpoint | |
136 | * | |
137 | * A new qset is created if one does not already exist. | |
138 | */ | |
139 | struct whc_qset *get_qset(struct whc *whc, struct urb *urb, | |
140 | gfp_t mem_flags) | |
141 | { | |
142 | struct whc_qset *qset; | |
143 | ||
144 | qset = urb->ep->hcpriv; | |
145 | if (qset == NULL) { | |
146 | qset = qset_alloc(whc, mem_flags); | |
147 | if (qset == NULL) | |
148 | return NULL; | |
149 | ||
150 | qset->ep = urb->ep; | |
151 | urb->ep->hcpriv = qset; | |
152 | qset_fill_qh(qset, urb); | |
153 | } | |
154 | return qset; | |
155 | } | |
156 | ||
157 | void qset_remove_complete(struct whc *whc, struct whc_qset *qset) | |
158 | { | |
831baa49 | 159 | qset->remove = 0; |
7e6133aa DV |
160 | list_del_init(&qset->list_node); |
161 | complete(&qset->remove_complete); | |
162 | } | |
163 | ||
164 | /** | |
165 | * qset_add_qtds - add qTDs for an URB to a qset | |
166 | * | |
167 | * Returns true if the list (ASL/PZL) must be updated because (for a | |
168 | * WHCI 0.95 controller) an activated qTD was pointed to be iCur. | |
169 | */ | |
170 | enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) | |
171 | { | |
172 | struct whc_std *std; | |
173 | enum whc_update update = 0; | |
174 | ||
175 | list_for_each_entry(std, &qset->stds, list_node) { | |
176 | struct whc_qtd *qtd; | |
177 | uint32_t status; | |
178 | ||
179 | if (qset->ntds >= WHCI_QSET_TD_MAX | |
180 | || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) | |
181 | break; | |
182 | ||
183 | if (std->qtd) | |
184 | continue; /* already has a qTD */ | |
185 | ||
186 | qtd = std->qtd = &qset->qtd[qset->td_end]; | |
187 | ||
188 | /* Fill in setup bytes for control transfers. */ | |
189 | if (usb_pipecontrol(std->urb->pipe)) | |
190 | memcpy(qtd->setup, std->urb->setup_packet, 8); | |
191 | ||
192 | status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); | |
193 | ||
194 | if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) | |
195 | status |= QTD_STS_LAST_PKT; | |
196 | ||
197 | /* | |
198 | * For an IN transfer the iAlt field should be set so | |
199 | * the h/w will automatically advance to the next | |
200 | * transfer. However, if there are 8 or more TDs | |
201 | * remaining in this transfer then iAlt cannot be set | |
202 | * as it could point to somewhere in this transfer. | |
203 | */ | |
204 | if (std->ntds_remaining < WHCI_QSET_TD_MAX) { | |
205 | int ialt; | |
206 | ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; | |
207 | status |= QTD_STS_IALT(ialt); | |
208 | } else if (usb_pipein(std->urb->pipe)) | |
209 | qset->pause_after_urb = std->urb; | |
210 | ||
211 | if (std->num_pointers) | |
212 | qtd->options = cpu_to_le32(QTD_OPT_IOC); | |
213 | else | |
214 | qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); | |
215 | qtd->page_list_ptr = cpu_to_le64(std->dma_addr); | |
216 | ||
217 | qtd->status = cpu_to_le32(status); | |
218 | ||
219 | if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) | |
220 | update = WHC_UPDATE_UPDATED; | |
221 | ||
222 | if (++qset->td_end >= WHCI_QSET_TD_MAX) | |
223 | qset->td_end = 0; | |
224 | qset->ntds++; | |
225 | } | |
226 | ||
227 | return update; | |
228 | } | |
229 | ||
230 | /** | |
231 | * qset_remove_qtd - remove the first qTD from a qset. | |
232 | * | |
233 | * The qTD might be still active (if it's part of a IN URB that | |
234 | * resulted in a short read) so ensure it's deactivated. | |
235 | */ | |
236 | static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) | |
237 | { | |
238 | qset->qtd[qset->td_start].status = 0; | |
239 | ||
240 | if (++qset->td_start >= WHCI_QSET_TD_MAX) | |
241 | qset->td_start = 0; | |
242 | qset->ntds--; | |
243 | } | |
244 | ||
294a39e7 DV |
245 | static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) |
246 | { | |
247 | struct scatterlist *sg; | |
248 | void *bounce; | |
249 | size_t remaining, offset; | |
250 | ||
251 | bounce = std->bounce_buf; | |
252 | remaining = std->len; | |
253 | ||
254 | sg = std->bounce_sg; | |
255 | offset = std->bounce_offset; | |
256 | ||
257 | while (remaining) { | |
258 | size_t len; | |
259 | ||
260 | len = min(sg->length - offset, remaining); | |
261 | memcpy(sg_virt(sg) + offset, bounce, len); | |
262 | ||
263 | bounce += len; | |
264 | remaining -= len; | |
265 | ||
266 | offset += len; | |
267 | if (offset >= sg->length) { | |
268 | sg = sg_next(sg); | |
269 | offset = 0; | |
270 | } | |
271 | } | |
272 | ||
273 | } | |
274 | ||
7e6133aa DV |
275 | /** |
276 | * qset_free_std - remove an sTD and free it. | |
277 | * @whc: the WHCI host controller | |
278 | * @std: the sTD to remove and free. | |
279 | */ | |
280 | void qset_free_std(struct whc *whc, struct whc_std *std) | |
281 | { | |
282 | list_del(&std->list_node); | |
294a39e7 DV |
283 | if (std->bounce_buf) { |
284 | bool is_out = usb_pipeout(std->urb->pipe); | |
285 | dma_addr_t dma_addr; | |
286 | ||
287 | if (std->num_pointers) | |
288 | dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); | |
289 | else | |
290 | dma_addr = std->dma_addr; | |
291 | ||
292 | dma_unmap_single(whc->wusbhc.dev, dma_addr, | |
293 | std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
294 | if (!is_out) | |
295 | qset_copy_bounce_to_sg(whc, std); | |
296 | kfree(std->bounce_buf); | |
297 | } | |
298 | if (std->pl_virt) { | |
299 | if (std->dma_addr) | |
300 | dma_unmap_single(whc->wusbhc.dev, std->dma_addr, | |
301 | std->num_pointers * sizeof(struct whc_page_list_entry), | |
302 | DMA_TO_DEVICE); | |
7e6133aa | 303 | kfree(std->pl_virt); |
294a39e7 | 304 | std->pl_virt = NULL; |
7e6133aa | 305 | } |
7e6133aa DV |
306 | kfree(std); |
307 | } | |
308 | ||
309 | /** | |
310 | * qset_remove_qtds - remove an URB's qTDs (and sTDs). | |
311 | */ | |
312 | static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, | |
313 | struct urb *urb) | |
314 | { | |
315 | struct whc_std *std, *t; | |
316 | ||
317 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | |
318 | if (std->urb != urb) | |
319 | break; | |
320 | if (std->qtd != NULL) | |
321 | qset_remove_qtd(whc, qset); | |
322 | qset_free_std(whc, std); | |
323 | } | |
324 | } | |
325 | ||
326 | /** | |
327 | * qset_free_stds - free any remaining sTDs for an URB. | |
328 | */ | |
329 | static void qset_free_stds(struct whc_qset *qset, struct urb *urb) | |
330 | { | |
331 | struct whc_std *std, *t; | |
332 | ||
333 | list_for_each_entry_safe(std, t, &qset->stds, list_node) { | |
334 | if (std->urb == urb) | |
335 | qset_free_std(qset->whc, std); | |
336 | } | |
337 | } | |
338 | ||
339 | static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) | |
340 | { | |
341 | dma_addr_t dma_addr = std->dma_addr; | |
342 | dma_addr_t sp, ep; | |
7e6133aa DV |
343 | size_t pl_len; |
344 | int p; | |
345 | ||
294a39e7 DV |
346 | /* Short buffers don't need a page list. */ |
347 | if (std->len <= WHCI_PAGE_SIZE) { | |
348 | std->num_pointers = 0; | |
349 | return 0; | |
350 | } | |
351 | ||
352 | sp = dma_addr & ~(WHCI_PAGE_SIZE-1); | |
353 | ep = dma_addr + std->len; | |
7e6133aa DV |
354 | std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); |
355 | ||
356 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | |
357 | std->pl_virt = kmalloc(pl_len, mem_flags); | |
358 | if (std->pl_virt == NULL) | |
359 | return -ENOMEM; | |
360 | std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); | |
361 | ||
362 | for (p = 0; p < std->num_pointers; p++) { | |
363 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); | |
294a39e7 | 364 | dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); |
7e6133aa DV |
365 | } |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
370 | /** | |
371 | * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. | |
372 | */ | |
373 | static void urb_dequeue_work(struct work_struct *work) | |
374 | { | |
375 | struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); | |
376 | struct whc_qset *qset = wurb->qset; | |
377 | struct whc *whc = qset->whc; | |
378 | unsigned long flags; | |
379 | ||
380 | if (wurb->is_async == true) | |
381 | asl_update(whc, WUSBCMD_ASYNC_UPDATED | |
382 | | WUSBCMD_ASYNC_SYNCED_DB | |
383 | | WUSBCMD_ASYNC_QSET_RM); | |
384 | else | |
385 | pzl_update(whc, WUSBCMD_PERIODIC_UPDATED | |
386 | | WUSBCMD_PERIODIC_SYNCED_DB | |
387 | | WUSBCMD_PERIODIC_QSET_RM); | |
388 | ||
389 | spin_lock_irqsave(&whc->lock, flags); | |
390 | qset_remove_urb(whc, qset, wurb->urb, wurb->status); | |
391 | spin_unlock_irqrestore(&whc->lock, flags); | |
392 | } | |
393 | ||
294a39e7 DV |
394 | static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, |
395 | struct urb *urb, gfp_t mem_flags) | |
396 | { | |
397 | struct whc_std *std; | |
398 | ||
399 | std = kzalloc(sizeof(struct whc_std), mem_flags); | |
400 | if (std == NULL) | |
401 | return NULL; | |
402 | ||
403 | std->urb = urb; | |
404 | std->qtd = NULL; | |
405 | ||
406 | INIT_LIST_HEAD(&std->list_node); | |
407 | list_add_tail(&std->list_node, &qset->stds); | |
408 | ||
409 | return std; | |
410 | } | |
411 | ||
412 | static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, | |
413 | gfp_t mem_flags) | |
414 | { | |
415 | size_t remaining; | |
416 | struct scatterlist *sg; | |
417 | int i; | |
418 | int ntds = 0; | |
419 | struct whc_std *std = NULL; | |
420 | struct whc_page_list_entry *entry; | |
421 | dma_addr_t prev_end = 0; | |
422 | size_t pl_len; | |
423 | int p = 0; | |
424 | ||
294a39e7 DV |
425 | remaining = urb->transfer_buffer_length; |
426 | ||
427 | for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) { | |
428 | dma_addr_t dma_addr; | |
429 | size_t dma_remaining; | |
430 | dma_addr_t sp, ep; | |
431 | int num_pointers; | |
432 | ||
433 | if (remaining == 0) { | |
434 | break; | |
435 | } | |
436 | ||
437 | dma_addr = sg_dma_address(sg); | |
f0ad073f | 438 | dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); |
294a39e7 DV |
439 | |
440 | while (dma_remaining) { | |
441 | size_t dma_len; | |
442 | ||
443 | /* | |
444 | * We can use the previous std (if it exists) provided that: | |
445 | * - the previous one ended on a page boundary. | |
446 | * - the current one begins on a page boundary. | |
447 | * - the previous one isn't full. | |
448 | * | |
449 | * If a new std is needed but the previous one | |
450 | * did not end on a wMaxPacketSize boundary | |
451 | * then this sg list cannot be mapped onto | |
452 | * multiple qTDs. Return an error and let the | |
453 | * caller sort it out. | |
454 | */ | |
455 | if (!std | |
456 | || (prev_end & (WHCI_PAGE_SIZE-1)) | |
457 | || (dma_addr & (WHCI_PAGE_SIZE-1)) | |
458 | || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { | |
459 | if (prev_end % qset->max_packet != 0) | |
460 | return -EINVAL; | |
294a39e7 DV |
461 | std = qset_new_std(whc, qset, urb, mem_flags); |
462 | if (std == NULL) { | |
463 | return -ENOMEM; | |
464 | } | |
465 | ntds++; | |
466 | p = 0; | |
467 | } | |
468 | ||
469 | dma_len = dma_remaining; | |
470 | ||
471 | /* | |
472 | * If the remainder in this element doesn't | |
473 | * fit in a single qTD, end the qTD on a | |
474 | * wMaxPacketSize boundary. | |
475 | */ | |
476 | if (std->len + dma_len > QTD_MAX_XFER_SIZE) { | |
477 | dma_len = QTD_MAX_XFER_SIZE - std->len; | |
478 | ep = ((dma_addr + dma_len) / qset->max_packet) * qset->max_packet; | |
479 | dma_len = ep - dma_addr; | |
480 | } | |
481 | ||
294a39e7 DV |
482 | std->len += dma_len; |
483 | std->ntds_remaining = -1; /* filled in later */ | |
484 | ||
485 | sp = dma_addr & ~(WHCI_PAGE_SIZE-1); | |
486 | ep = dma_addr + dma_len; | |
487 | num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); | |
488 | std->num_pointers += num_pointers; | |
489 | ||
294a39e7 DV |
490 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); |
491 | ||
492 | std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); | |
493 | if (std->pl_virt == NULL) { | |
494 | return -ENOMEM; | |
495 | } | |
496 | ||
497 | for (;p < std->num_pointers; p++, entry++) { | |
294a39e7 DV |
498 | std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); |
499 | dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); | |
500 | } | |
501 | ||
502 | prev_end = dma_addr = ep; | |
503 | dma_remaining -= dma_len; | |
504 | remaining -= dma_len; | |
505 | } | |
506 | } | |
507 | ||
294a39e7 DV |
508 | /* Now the number of stds is know, go back and fill in |
509 | std->ntds_remaining. */ | |
510 | list_for_each_entry(std, &qset->stds, list_node) { | |
511 | if (std->ntds_remaining == -1) { | |
512 | pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); | |
513 | std->ntds_remaining = ntds--; | |
514 | std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, | |
515 | pl_len, DMA_TO_DEVICE); | |
516 | } | |
517 | } | |
518 | return 0; | |
519 | } | |
520 | ||
521 | /** | |
522 | * qset_add_urb_sg_linearize - add an urb with sg list, copying the data | |
523 | * | |
524 | * If the URB contains an sg list whose elements cannot be directly | |
525 | * mapped to qTDs then the data must be transferred via bounce | |
526 | * buffers. | |
527 | */ | |
528 | static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, | |
529 | struct urb *urb, gfp_t mem_flags) | |
530 | { | |
531 | bool is_out = usb_pipeout(urb->pipe); | |
532 | size_t max_std_len; | |
533 | size_t remaining; | |
534 | int ntds = 0; | |
535 | struct whc_std *std = NULL; | |
536 | void *bounce = NULL; | |
537 | struct scatterlist *sg; | |
538 | int i; | |
539 | ||
540 | /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ | |
541 | max_std_len = qset->max_burst * qset->max_packet; | |
542 | ||
543 | remaining = urb->transfer_buffer_length; | |
544 | ||
545 | for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) { | |
546 | size_t len; | |
547 | size_t sg_remaining; | |
548 | void *orig; | |
549 | ||
550 | if (remaining == 0) { | |
551 | break; | |
552 | } | |
553 | ||
f0ad073f | 554 | sg_remaining = min_t(size_t, remaining, sg->length); |
294a39e7 DV |
555 | orig = sg_virt(sg); |
556 | ||
294a39e7 DV |
557 | while (sg_remaining) { |
558 | if (!std || std->len == max_std_len) { | |
294a39e7 DV |
559 | std = qset_new_std(whc, qset, urb, mem_flags); |
560 | if (std == NULL) | |
561 | return -ENOMEM; | |
562 | std->bounce_buf = kmalloc(max_std_len, mem_flags); | |
563 | if (std->bounce_buf == NULL) | |
564 | return -ENOMEM; | |
565 | std->bounce_sg = sg; | |
566 | std->bounce_offset = orig - sg_virt(sg); | |
567 | bounce = std->bounce_buf; | |
568 | ntds++; | |
569 | } | |
570 | ||
571 | len = min(sg_remaining, max_std_len - std->len); | |
572 | ||
294a39e7 DV |
573 | if (is_out) |
574 | memcpy(bounce, orig, len); | |
575 | ||
576 | std->len += len; | |
577 | std->ntds_remaining = -1; /* filled in later */ | |
578 | ||
579 | bounce += len; | |
580 | orig += len; | |
581 | sg_remaining -= len; | |
582 | remaining -= len; | |
583 | } | |
584 | } | |
585 | ||
586 | /* | |
587 | * For each of the new sTDs, map the bounce buffers, create | |
588 | * page lists (if necessary), and fill in std->ntds_remaining. | |
589 | */ | |
590 | list_for_each_entry(std, &qset->stds, list_node) { | |
591 | if (std->ntds_remaining != -1) | |
592 | continue; | |
593 | ||
594 | std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, | |
595 | is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | |
596 | ||
597 | if (qset_fill_page_list(whc, std, mem_flags) < 0) | |
598 | return -ENOMEM; | |
599 | ||
600 | std->ntds_remaining = ntds--; | |
601 | } | |
602 | ||
603 | return 0; | |
604 | } | |
605 | ||
7e6133aa DV |
606 | /** |
607 | * qset_add_urb - add an urb to the qset's queue. | |
608 | * | |
609 | * The URB is chopped into sTDs, one for each qTD that will required. | |
610 | * At least one qTD (and sTD) is required even if the transfer has no | |
611 | * data (e.g., for some control transfers). | |
612 | */ | |
613 | int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, | |
614 | gfp_t mem_flags) | |
615 | { | |
616 | struct whc_urb *wurb; | |
617 | int remaining = urb->transfer_buffer_length; | |
618 | u64 transfer_dma = urb->transfer_dma; | |
619 | int ntds_remaining; | |
294a39e7 | 620 | int ret; |
7e6133aa DV |
621 | |
622 | wurb = kzalloc(sizeof(struct whc_urb), mem_flags); | |
623 | if (wurb == NULL) | |
624 | goto err_no_mem; | |
625 | urb->hcpriv = wurb; | |
626 | wurb->qset = qset; | |
627 | wurb->urb = urb; | |
628 | INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); | |
629 | ||
294a39e7 DV |
630 | if (urb->sg) { |
631 | ret = qset_add_urb_sg(whc, qset, urb, mem_flags); | |
632 | if (ret == -EINVAL) { | |
294a39e7 DV |
633 | qset_free_stds(qset, urb); |
634 | ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); | |
635 | } | |
636 | if (ret < 0) | |
637 | goto err_no_mem; | |
638 | return 0; | |
639 | } | |
640 | ||
641 | ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); | |
642 | if (ntds_remaining == 0) | |
643 | ntds_remaining = 1; | |
644 | ||
7e6133aa DV |
645 | while (ntds_remaining) { |
646 | struct whc_std *std; | |
647 | size_t std_len; | |
648 | ||
7e6133aa DV |
649 | std_len = remaining; |
650 | if (std_len > QTD_MAX_XFER_SIZE) | |
651 | std_len = QTD_MAX_XFER_SIZE; | |
652 | ||
294a39e7 DV |
653 | std = qset_new_std(whc, qset, urb, mem_flags); |
654 | if (std == NULL) | |
655 | goto err_no_mem; | |
656 | ||
7e6133aa DV |
657 | std->dma_addr = transfer_dma; |
658 | std->len = std_len; | |
659 | std->ntds_remaining = ntds_remaining; | |
7e6133aa | 660 | |
294a39e7 DV |
661 | if (qset_fill_page_list(whc, std, mem_flags) < 0) |
662 | goto err_no_mem; | |
7e6133aa DV |
663 | |
664 | ntds_remaining--; | |
665 | remaining -= std_len; | |
666 | transfer_dma += std_len; | |
667 | } | |
668 | ||
669 | return 0; | |
670 | ||
671 | err_no_mem: | |
672 | qset_free_stds(qset, urb); | |
673 | return -ENOMEM; | |
674 | } | |
675 | ||
676 | /** | |
677 | * qset_remove_urb - remove an URB from the urb queue. | |
678 | * | |
679 | * The URB is returned to the USB subsystem. | |
680 | */ | |
681 | void qset_remove_urb(struct whc *whc, struct whc_qset *qset, | |
682 | struct urb *urb, int status) | |
683 | { | |
684 | struct wusbhc *wusbhc = &whc->wusbhc; | |
685 | struct whc_urb *wurb = urb->hcpriv; | |
686 | ||
687 | usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); | |
688 | /* Drop the lock as urb->complete() may enqueue another urb. */ | |
689 | spin_unlock(&whc->lock); | |
690 | wusbhc_giveback_urb(wusbhc, urb, status); | |
691 | spin_lock(&whc->lock); | |
692 | ||
693 | kfree(wurb); | |
694 | } | |
695 | ||
696 | /** | |
697 | * get_urb_status_from_qtd - get the completed urb status from qTD status | |
698 | * @urb: completed urb | |
699 | * @status: qTD status | |
700 | */ | |
701 | static int get_urb_status_from_qtd(struct urb *urb, u32 status) | |
702 | { | |
703 | if (status & QTD_STS_HALTED) { | |
704 | if (status & QTD_STS_DBE) | |
705 | return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; | |
706 | else if (status & QTD_STS_BABBLE) | |
707 | return -EOVERFLOW; | |
708 | else if (status & QTD_STS_RCE) | |
709 | return -ETIME; | |
710 | return -EPIPE; | |
711 | } | |
712 | if (usb_pipein(urb->pipe) | |
713 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | |
714 | && urb->actual_length < urb->transfer_buffer_length) | |
715 | return -EREMOTEIO; | |
716 | return 0; | |
717 | } | |
718 | ||
719 | /** | |
720 | * process_inactive_qtd - process an inactive (but not halted) qTD. | |
721 | * | |
722 | * Update the urb with the transfer bytes from the qTD, if the urb is | |
723 | * completely transfered or (in the case of an IN only) the LPF is | |
724 | * set, then the transfer is complete and the urb should be returned | |
725 | * to the system. | |
726 | */ | |
727 | void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, | |
728 | struct whc_qtd *qtd) | |
729 | { | |
730 | struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); | |
731 | struct urb *urb = std->urb; | |
732 | uint32_t status; | |
733 | bool complete; | |
734 | ||
735 | status = le32_to_cpu(qtd->status); | |
736 | ||
737 | urb->actual_length += std->len - QTD_STS_TO_LEN(status); | |
738 | ||
739 | if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) | |
740 | complete = true; | |
741 | else | |
742 | complete = whc_std_last(std); | |
743 | ||
744 | qset_remove_qtd(whc, qset); | |
745 | qset_free_std(whc, std); | |
746 | ||
747 | /* | |
748 | * Transfers for this URB are complete? Then return it to the | |
749 | * USB subsystem. | |
750 | */ | |
751 | if (complete) { | |
752 | qset_remove_qtds(whc, qset, urb); | |
753 | qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); | |
754 | ||
755 | /* | |
756 | * If iAlt isn't valid then the hardware didn't | |
757 | * advance iCur. Adjust the start and end pointers to | |
758 | * match iCur. | |
759 | */ | |
760 | if (!(status & QTD_STS_IALT_VALID)) | |
761 | qset->td_start = qset->td_end | |
762 | = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); | |
763 | qset->pause_after_urb = NULL; | |
764 | } | |
765 | } | |
766 | ||
767 | /** | |
768 | * process_halted_qtd - process a qset with a halted qtd | |
769 | * | |
770 | * Remove all the qTDs for the failed URB and return the failed URB to | |
771 | * the USB subsystem. Then remove all other qTDs so the qset can be | |
772 | * removed. | |
773 | * | |
774 | * FIXME: this is the point where rate adaptation can be done. If a | |
775 | * transfer failed because it exceeded the maximum number of retries | |
776 | * then it could be reactivated with a slower rate without having to | |
777 | * remove the qset. | |
778 | */ | |
779 | void process_halted_qtd(struct whc *whc, struct whc_qset *qset, | |
780 | struct whc_qtd *qtd) | |
781 | { | |
782 | struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); | |
783 | struct urb *urb = std->urb; | |
784 | int urb_status; | |
785 | ||
786 | urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); | |
787 | ||
788 | qset_remove_qtds(whc, qset, urb); | |
789 | qset_remove_urb(whc, qset, urb, urb_status); | |
790 | ||
791 | list_for_each_entry(std, &qset->stds, list_node) { | |
792 | if (qset->ntds == 0) | |
793 | break; | |
794 | qset_remove_qtd(whc, qset); | |
795 | std->qtd = NULL; | |
796 | } | |
797 | ||
798 | qset->remove = 1; | |
799 | } | |
800 | ||
801 | void qset_free(struct whc *whc, struct whc_qset *qset) | |
802 | { | |
803 | dma_pool_free(whc->qset_pool, qset, qset->qset_dma); | |
804 | } | |
805 | ||
806 | /** | |
807 | * qset_delete - wait for a qset to be unused, then free it. | |
808 | */ | |
809 | void qset_delete(struct whc *whc, struct whc_qset *qset) | |
810 | { | |
811 | wait_for_completion(&qset->remove_complete); | |
812 | qset_free(whc, qset); | |
813 | } |