Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
David Brownell | d49d431 | 2005-05-07 13:21:50 -0700 | [diff] [blame] | 2 | * Copyright (C) 2001-2004 by David Brownell |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * This program is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License as published by the |
| 6 | * Free Software Foundation; either version 2 of the License, or (at your |
| 7 | * option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but |
| 10 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY |
| 11 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software Foundation, |
| 16 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 17 | */ |
| 18 | |
| 19 | /* this file is part of ehci-hcd.c */ |
| 20 | |
| 21 | /*-------------------------------------------------------------------------*/ |
| 22 | |
| 23 | /* |
| 24 | * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. |
| 25 | * |
| 26 | * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" |
| 27 | * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned |
| 28 | * buffers needed for the larger number). We use one QH per endpoint, queue |
| 29 | * multiple urbs (all three types) per endpoint. URBs may need several qtds. |
| 30 | * |
| 31 | * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with |
| 32 | * interrupts) needs careful scheduling. Performance improvements can be |
| 33 | * an ongoing challenge. That's in "ehci-sched.c". |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 34 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, |
| 36 | * or otherwise through transaction translators (TTs) in USB 2.0 hubs using |
| 37 | * (b) special fields in qh entries or (c) split iso entries. TTs will |
| 38 | * buffer low/full speed data so the host collects it at high speed. |
| 39 | */ |
| 40 | |
| 41 | /*-------------------------------------------------------------------------*/ |
| 42 | |
| 43 | /* fill a qtd, returning how much of the buffer we were able to queue up */ |
| 44 | |
| 45 | static int |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 46 | qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, |
| 47 | size_t len, int token, int maxpacket) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | { |
| 49 | int i, count; |
| 50 | u64 addr = buf; |
| 51 | |
| 52 | /* one buffer entry per 4K ... first might be short or unaligned */ |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 53 | qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); |
| 54 | qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ |
| 56 | if (likely (len < count)) /* ... iff needed */ |
| 57 | count = len; |
| 58 | else { |
| 59 | buf += 0x1000; |
| 60 | buf &= ~0x0fff; |
| 61 | |
| 62 | /* per-qtd limit: from 16K to 20K (best alignment) */ |
| 63 | for (i = 1; count < len && i < 5; i++) { |
| 64 | addr = buf; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 65 | qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); |
| 66 | qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, |
| 67 | (u32)(addr >> 32)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | buf += 0x1000; |
| 69 | if ((count + 0x1000) < len) |
| 70 | count += 0x1000; |
| 71 | else |
| 72 | count = len; |
| 73 | } |
| 74 | |
| 75 | /* short packets may only terminate transfers */ |
| 76 | if (count != len) |
| 77 | count -= (count % maxpacket); |
| 78 | } |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 79 | qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | qtd->length = count; |
| 81 | |
| 82 | return count; |
| 83 | } |
| 84 | |
| 85 | /*-------------------------------------------------------------------------*/ |
| 86 | |
| 87 | static inline void |
| 88 | qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) |
| 89 | { |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 90 | struct ehci_qh_hw *hw = qh->hw; |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | /* writes to an active overlay are unsafe */ |
| 93 | BUG_ON(qh->qh_state != QH_STATE_IDLE); |
| 94 | |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 95 | hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); |
| 96 | hw->hw_alt_next = EHCI_LIST_END(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 98 | /* Except for control endpoints, we make hardware maintain data |
| 99 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, |
| 100 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will |
| 101 | * ever clear it. |
| 102 | */ |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 103 | if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) { |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 104 | unsigned is_out, epnum; |
| 105 | |
| 106 | is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8)); |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 107 | epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 108 | if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 109 | hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 110 | usb_settoggle (qh->dev, epnum, is_out, 1); |
| 111 | } |
| 112 | } |
| 113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ |
| 115 | wmb (); |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 116 | hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | /* if it weren't for a common silicon quirk (writing the dummy into the qh |
| 120 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault |
| 121 | * recovery (including urb dequeue) would need software changes to a QH... |
| 122 | */ |
| 123 | static void |
| 124 | qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 125 | { |
| 126 | struct ehci_qtd *qtd; |
| 127 | |
| 128 | if (list_empty (&qh->qtd_list)) |
| 129 | qtd = qh->dummy; |
| 130 | else { |
| 131 | qtd = list_entry (qh->qtd_list.next, |
| 132 | struct ehci_qtd, qtd_list); |
| 133 | /* first qtd may already be partially processed */ |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 134 | if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | qtd = NULL; |
| 136 | } |
| 137 | |
| 138 | if (qtd) |
| 139 | qh_update (ehci, qh, qtd); |
| 140 | } |
| 141 | |
| 142 | /*-------------------------------------------------------------------------*/ |
| 143 | |
Alan Stern | 914b701 | 2009-06-29 10:47:30 -0400 | [diff] [blame] | 144 | static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 145 | |
| 146 | static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, |
| 147 | struct usb_host_endpoint *ep) |
| 148 | { |
| 149 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
| 150 | struct ehci_qh *qh = ep->hcpriv; |
| 151 | unsigned long flags; |
| 152 | |
| 153 | spin_lock_irqsave(&ehci->lock, flags); |
| 154 | qh->clearing_tt = 0; |
| 155 | if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) |
| 156 | && HC_IS_RUNNING(hcd->state)) |
| 157 | qh_link_async(ehci, qh); |
| 158 | spin_unlock_irqrestore(&ehci->lock, flags); |
| 159 | } |
| 160 | |
| 161 | static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, |
| 162 | struct urb *urb, u32 token) |
| 163 | { |
| 164 | |
| 165 | /* If an async split transaction gets an error or is unlinked, |
| 166 | * the TT buffer may be left in an indeterminate state. We |
| 167 | * have to clear the TT buffer. |
| 168 | * |
| 169 | * Note: this routine is never called for Isochronous transfers. |
| 170 | */ |
| 171 | if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { |
| 172 | #ifdef DEBUG |
| 173 | struct usb_device *tt = urb->dev->tt->hub; |
| 174 | dev_dbg(&tt->dev, |
| 175 | "clear tt buffer port %d, a%d ep%d t%08x\n", |
| 176 | urb->dev->ttport, urb->dev->devnum, |
| 177 | usb_pipeendpoint(urb->pipe), token); |
| 178 | #endif /* DEBUG */ |
| 179 | if (!ehci_is_TDI(ehci) |
| 180 | || urb->dev->tt->hub != |
| 181 | ehci_to_hcd(ehci)->self.root_hub) { |
| 182 | if (usb_hub_clear_tt_buffer(urb) == 0) |
| 183 | qh->clearing_tt = 1; |
| 184 | } else { |
| 185 | |
| 186 | /* REVISIT ARC-derived cores don't clear the root |
| 187 | * hub TT buffer in this way... |
| 188 | */ |
| 189 | } |
| 190 | } |
| 191 | } |
| 192 | |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 193 | static int qtd_copy_status ( |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | struct ehci_hcd *ehci, |
| 195 | struct urb *urb, |
| 196 | size_t length, |
| 197 | u32 token |
| 198 | ) |
| 199 | { |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 200 | int status = -EINPROGRESS; |
| 201 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | /* count IN/OUT bytes, not SETUP (even short packets) */ |
| 203 | if (likely (QTD_PID (token) != 2)) |
| 204 | urb->actual_length += length - QTD_LENGTH (token); |
| 205 | |
| 206 | /* don't modify error codes */ |
Alan Stern | eb23105 | 2007-08-21 15:40:36 -0400 | [diff] [blame] | 207 | if (unlikely(urb->unlinked)) |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 208 | return status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | |
| 210 | /* force cleanup after short read; not always an error */ |
| 211 | if (unlikely (IS_SHORT_READ (token))) |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 212 | status = -EREMOTEIO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 213 | |
| 214 | /* serious "can't proceed" faults reported by the hardware */ |
| 215 | if (token & QTD_STS_HALT) { |
| 216 | if (token & QTD_STS_BABBLE) { |
| 217 | /* FIXME "must" disable babbling device's port too */ |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 218 | status = -EOVERFLOW; |
Alan Stern | ba516de | 2009-06-29 17:36:14 -0400 | [diff] [blame] | 219 | /* CERR nonzero + halt --> stall */ |
| 220 | } else if (QTD_CERR(token)) { |
| 221 | status = -EPIPE; |
| 222 | |
| 223 | /* In theory, more than one of the following bits can be set |
| 224 | * since they are sticky and the transaction is retried. |
| 225 | * Which to test first is rather arbitrary. |
| 226 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } else if (token & QTD_STS_MMF) { |
| 228 | /* fs/ls interrupt xfer missed the complete-split */ |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 229 | status = -EPROTO; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | } else if (token & QTD_STS_DBE) { |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 231 | status = (QTD_PID (token) == 1) /* IN ? */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | ? -ENOSR /* hc couldn't read data */ |
| 233 | : -ECOMM; /* hc couldn't write data */ |
| 234 | } else if (token & QTD_STS_XACT) { |
Alan Stern | ba516de | 2009-06-29 17:36:14 -0400 | [diff] [blame] | 235 | /* timeout, bad CRC, wrong PID, etc */ |
| 236 | ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", |
| 237 | urb->dev->devpath, |
| 238 | usb_pipeendpoint(urb->pipe), |
| 239 | usb_pipein(urb->pipe) ? "in" : "out"); |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 240 | status = -EPROTO; |
Alan Stern | ba516de | 2009-06-29 17:36:14 -0400 | [diff] [blame] | 241 | } else { /* unknown */ |
| 242 | status = -EPROTO; |
| 243 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | |
| 245 | ehci_vdbg (ehci, |
| 246 | "dev%d ep%d%s qtd token %08x --> status %d\n", |
| 247 | usb_pipedevice (urb->pipe), |
| 248 | usb_pipeendpoint (urb->pipe), |
| 249 | usb_pipein (urb->pipe) ? "in" : "out", |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 250 | token, status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | } |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 252 | |
| 253 | return status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | static void |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 257 | ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | __releases(ehci->lock) |
| 259 | __acquires(ehci->lock) |
| 260 | { |
| 261 | if (likely (urb->hcpriv != NULL)) { |
| 262 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; |
| 263 | |
| 264 | /* S-mask in a QH means it's an interrupt urb */ |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 265 | if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
| 267 | /* ... update hc-wide periodic stats (for usbfs) */ |
| 268 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; |
| 269 | } |
| 270 | qh_put (qh); |
| 271 | } |
| 272 | |
Alan Stern | eb23105 | 2007-08-21 15:40:36 -0400 | [diff] [blame] | 273 | if (unlikely(urb->unlinked)) { |
| 274 | COUNT(ehci->stats.unlink); |
| 275 | } else { |
David Brownell | 4f66762 | 2008-04-12 08:32:05 -0700 | [diff] [blame] | 276 | /* report non-error and short read status as zero */ |
| 277 | if (status == -EINPROGRESS || status == -EREMOTEIO) |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 278 | status = 0; |
Alan Stern | eb23105 | 2007-08-21 15:40:36 -0400 | [diff] [blame] | 279 | COUNT(ehci->stats.complete); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 281 | |
| 282 | #ifdef EHCI_URB_TRACE |
| 283 | ehci_dbg (ehci, |
| 284 | "%s %s urb %p ep%d%s status %d len %d/%d\n", |
Harvey Harrison | 441b62c | 2008-03-03 16:08:34 -0800 | [diff] [blame] | 285 | __func__, urb->dev->devpath, urb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | usb_pipeendpoint (urb->pipe), |
| 287 | usb_pipein (urb->pipe) ? "in" : "out", |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 288 | status, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | urb->actual_length, urb->transfer_buffer_length); |
| 290 | #endif |
| 291 | |
| 292 | /* complete() can reenter this HCD */ |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 293 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | spin_unlock (&ehci->lock); |
Alan Stern | 4a00027 | 2007-08-24 15:42:24 -0400 | [diff] [blame] | 295 | usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | spin_lock (&ehci->lock); |
| 297 | } |
| 298 | |
| 299 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 300 | static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 301 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); |
| 303 | |
| 304 | /* |
| 305 | * Process and free completed qtds for a qh, returning URBs to drivers. |
| 306 | * Chases up to qh->hw_current. Returns number of completions called, |
| 307 | * indicating how much "real" work we did. |
| 308 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | static unsigned |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 310 | qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | { |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 312 | struct ehci_qtd *last, *end = qh->dummy; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | struct list_head *entry, *tmp; |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 314 | int last_status; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | int stopped; |
| 316 | unsigned count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | u8 state; |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 318 | struct ehci_qh_hw *hw = qh->hw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | |
| 320 | if (unlikely (list_empty (&qh->qtd_list))) |
| 321 | return count; |
| 322 | |
| 323 | /* completions (or tasks on other cpus) must never clobber HALT |
| 324 | * till we've gone through and cleaned everything up, even when |
| 325 | * they add urbs to this qh's queue or mark them for unlinking. |
| 326 | * |
| 327 | * NOTE: unlinking expects to be done in queue order. |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 328 | * |
| 329 | * It's a bug for qh->qh_state to be anything other than |
| 330 | * QH_STATE_IDLE, unless our caller is scan_async() or |
| 331 | * scan_periodic(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | */ |
| 333 | state = qh->qh_state; |
| 334 | qh->qh_state = QH_STATE_COMPLETING; |
| 335 | stopped = (state == QH_STATE_IDLE); |
| 336 | |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 337 | rescan: |
| 338 | last = NULL; |
| 339 | last_status = -EINPROGRESS; |
| 340 | qh->needs_rescan = 0; |
| 341 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | /* remove de-activated QTDs from front of queue. |
| 343 | * after faults (including short reads), cleanup this urb |
| 344 | * then let the queue advance. |
| 345 | * if queue is stopped, handles unlinks. |
| 346 | */ |
| 347 | list_for_each_safe (entry, tmp, &qh->qtd_list) { |
| 348 | struct ehci_qtd *qtd; |
| 349 | struct urb *urb; |
| 350 | u32 token = 0; |
| 351 | |
| 352 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); |
| 353 | urb = qtd->urb; |
| 354 | |
| 355 | /* clean up any state from previous QTD ...*/ |
| 356 | if (last) { |
| 357 | if (likely (last->urb != urb)) { |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 358 | ehci_urb_done(ehci, last->urb, last_status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | count++; |
Misha Zhilin | b5f7a0e | 2008-02-27 18:05:24 -0800 | [diff] [blame] | 360 | last_status = -EINPROGRESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | } |
| 362 | ehci_qtd_free (ehci, last); |
| 363 | last = NULL; |
| 364 | } |
| 365 | |
| 366 | /* ignore urbs submitted during completions we reported */ |
| 367 | if (qtd == end) |
| 368 | break; |
| 369 | |
| 370 | /* hardware copies qtd out of qh overlay */ |
| 371 | rmb (); |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 372 | token = hc32_to_cpu(ehci, qtd->hw_token); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | |
| 374 | /* always clean up qtds the hc de-activated */ |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 375 | retry_xacterr: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | if ((token & QTD_STS_ACTIVE) == 0) { |
| 377 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 378 | /* on STALL, error, and short reads this urb must |
| 379 | * complete and all its qtds must be recycled. |
| 380 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | if ((token & QTD_STS_HALT) != 0) { |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 382 | |
| 383 | /* retry transaction errors until we |
| 384 | * reach the software xacterr limit |
| 385 | */ |
| 386 | if ((token & QTD_STS_XACT) && |
| 387 | QTD_CERR(token) == 0 && |
Alan Stern | ef4638f | 2009-07-31 10:41:40 -0400 | [diff] [blame] | 388 | ++qh->xacterrs < QH_XACTERR_MAX && |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 389 | !urb->unlinked) { |
| 390 | ehci_dbg(ehci, |
Randy Dunlap | d062680 | 2009-02-13 11:22:06 -0800 | [diff] [blame] | 391 | "detected XactErr len %zu/%zu retry %d\n", |
Alan Stern | ef4638f | 2009-07-31 10:41:40 -0400 | [diff] [blame] | 392 | qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 393 | |
| 394 | /* reset the token in the qtd and the |
| 395 | * qh overlay (which still contains |
| 396 | * the qtd) so that we pick up from |
| 397 | * where we left off |
| 398 | */ |
| 399 | token &= ~QTD_STS_HALT; |
| 400 | token |= QTD_STS_ACTIVE | |
| 401 | (EHCI_TUNE_CERR << 10); |
| 402 | qtd->hw_token = cpu_to_hc32(ehci, |
| 403 | token); |
| 404 | wmb(); |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 405 | hw->hw_token = cpu_to_hc32(ehci, |
| 406 | token); |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 407 | goto retry_xacterr; |
| 408 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | stopped = 1; |
| 410 | |
| 411 | /* magic dummy for some short reads; qh won't advance. |
| 412 | * that silicon quirk can kick in with this dummy too. |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 413 | * |
| 414 | * other short reads won't stop the queue, including |
| 415 | * control transfers (status stage handles that) or |
| 416 | * most other single-qtd reads ... the queue stops if |
| 417 | * URB_SHORT_NOT_OK was set so the driver submitting |
| 418 | * the urbs could clean it up. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | */ |
| 420 | } else if (IS_SHORT_READ (token) |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 421 | && !(qtd->hw_alt_next |
| 422 | & EHCI_LIST_END(ehci))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | stopped = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | /* stop scanning when we reach qtds the hc is using */ |
| 427 | } else if (likely (!stopped |
| 428 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) { |
| 429 | break; |
| 430 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 431 | /* scan the whole queue for unlinks whenever it stops */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | } else { |
| 433 | stopped = 1; |
| 434 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 435 | /* cancel everything if we halt, suspend, etc */ |
| 436 | if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 437 | last_status = -ESHUTDOWN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 439 | /* this qtd is active; skip it unless a previous qtd |
| 440 | * for its urb faulted, or its urb was canceled. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | */ |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 442 | else if (last_status == -EINPROGRESS && !urb->unlinked) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | continue; |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 444 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 445 | /* qh unlinked; token in overlay may be most current */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | if (state == QH_STATE_IDLE |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 447 | && cpu_to_hc32(ehci, qtd->qtd_dma) |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 448 | == hw->hw_current) { |
| 449 | token = hc32_to_cpu(ehci, hw->hw_token); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | |
Alan Stern | 914b701 | 2009-06-29 10:47:30 -0400 | [diff] [blame] | 451 | /* An unlink may leave an incomplete |
| 452 | * async transaction in the TT buffer. |
| 453 | * We have to clear it. |
| 454 | */ |
| 455 | ehci_clear_tt_buffer(ehci, qh, urb, token); |
| 456 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 458 | |
David Brownell | 4f66762 | 2008-04-12 08:32:05 -0700 | [diff] [blame] | 459 | /* unless we already know the urb's status, collect qtd status |
| 460 | * and update count of bytes transferred. in common short read |
| 461 | * cases with only one data qtd (including control transfers), |
| 462 | * queue processing won't halt. but with two or more qtds (for |
| 463 | * example, with a 32 KB transfer), when the first qtd gets a |
| 464 | * short read the second must be removed by hand. |
| 465 | */ |
| 466 | if (last_status == -EINPROGRESS) { |
| 467 | last_status = qtd_copy_status(ehci, urb, |
| 468 | qtd->length, token); |
| 469 | if (last_status == -EREMOTEIO |
| 470 | && (qtd->hw_alt_next |
| 471 | & EHCI_LIST_END(ehci))) |
| 472 | last_status = -EINPROGRESS; |
Alan Stern | 914b701 | 2009-06-29 10:47:30 -0400 | [diff] [blame] | 473 | |
| 474 | /* As part of low/full-speed endpoint-halt processing |
| 475 | * we must clear the TT buffer (11.17.5). |
| 476 | */ |
| 477 | if (unlikely(last_status != -EINPROGRESS && |
Alan Stern | c2f6595 | 2009-11-18 11:37:15 -0500 | [diff] [blame] | 478 | last_status != -EREMOTEIO)) { |
| 479 | /* The TT's in some hubs malfunction when they |
| 480 | * receive this request following a STALL (they |
| 481 | * stop sending isochronous packets). Since a |
| 482 | * STALL can't leave the TT buffer in a busy |
| 483 | * state (if you believe Figures 11-48 - 11-51 |
| 484 | * in the USB 2.0 spec), we won't clear the TT |
| 485 | * buffer in this case. Strictly speaking this |
| 486 | * is a violation of the spec. |
| 487 | */ |
| 488 | if (last_status != -EPIPE) |
| 489 | ehci_clear_tt_buffer(ehci, qh, urb, |
| 490 | token); |
| 491 | } |
Alan Stern | b0d9efb | 2007-08-21 15:39:21 -0400 | [diff] [blame] | 492 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 494 | /* if we're removing something not at the queue head, |
| 495 | * patch the hardware queue pointer. |
| 496 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { |
| 498 | last = list_entry (qtd->qtd_list.prev, |
| 499 | struct ehci_qtd, qtd_list); |
| 500 | last->hw_next = qtd->hw_next; |
| 501 | } |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 502 | |
| 503 | /* remove qtd; it's recycled after possible urb completion */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | list_del (&qtd->qtd_list); |
| 505 | last = qtd; |
Alan Stern | a2c2706 | 2009-02-10 10:16:58 -0500 | [diff] [blame] | 506 | |
| 507 | /* reinit the xacterr counter for the next qtd */ |
Alan Stern | ef4638f | 2009-07-31 10:41:40 -0400 | [diff] [blame] | 508 | qh->xacterrs = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | } |
| 510 | |
| 511 | /* last urb's completion might still need calling */ |
| 512 | if (likely (last != NULL)) { |
Alan Stern | 14c04c0 | 2007-08-24 15:40:19 -0400 | [diff] [blame] | 513 | ehci_urb_done(ehci, last->urb, last_status); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 514 | count++; |
| 515 | ehci_qtd_free (ehci, last); |
| 516 | } |
| 517 | |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 518 | /* Do we need to rescan for URBs dequeued during a giveback? */ |
| 519 | if (unlikely(qh->needs_rescan)) { |
| 520 | /* If the QH is already unlinked, do the rescan now. */ |
| 521 | if (state == QH_STATE_IDLE) |
| 522 | goto rescan; |
| 523 | |
| 524 | /* Otherwise we have to wait until the QH is fully unlinked. |
| 525 | * Our caller will start an unlink if qh->needs_rescan is |
| 526 | * set. But if an unlink has already started, nothing needs |
| 527 | * to be done. |
| 528 | */ |
| 529 | if (state != QH_STATE_LINKED) |
| 530 | qh->needs_rescan = 0; |
| 531 | } |
| 532 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | /* restore original state; caller must unlink or relink */ |
| 534 | qh->qh_state = state; |
| 535 | |
| 536 | /* be sure the hardware's done with the qh before refreshing |
| 537 | * it after fault cleanup, or recovering from silicon wrongly |
| 538 | * overlaying the dummy qtd (which reduces DMA chatter). |
| 539 | */ |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 540 | if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | switch (state) { |
| 542 | case QH_STATE_IDLE: |
| 543 | qh_refresh(ehci, qh); |
| 544 | break; |
| 545 | case QH_STATE_LINKED: |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 546 | /* We won't refresh a QH that's linked (after the HC |
| 547 | * stopped the queue). That avoids a race: |
| 548 | * - HC reads first part of QH; |
| 549 | * - CPU updates that first part and the token; |
| 550 | * - HC reads rest of that QH, including token |
| 551 | * Result: HC gets an inconsistent image, and then |
| 552 | * DMAs to/from the wrong memory (corrupting it). |
| 553 | * |
| 554 | * That should be rare for interrupt transfers, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | * except maybe high bandwidth ... |
| 556 | */ |
Alan Stern | a448c9d | 2009-08-19 12:22:44 -0400 | [diff] [blame] | 557 | |
| 558 | /* Tell the caller to start an unlink */ |
| 559 | qh->needs_rescan = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | break; |
| 561 | /* otherwise, unlink already started */ |
| 562 | } |
| 563 | } |
| 564 | |
| 565 | return count; |
| 566 | } |
| 567 | |
| 568 | /*-------------------------------------------------------------------------*/ |
| 569 | |
| 570 | // high bandwidth multiplier, as encoded in highspeed endpoint descriptors |
| 571 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) |
| 572 | // ... and packet size, for any kind of endpoint descriptor |
| 573 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) |
| 574 | |
| 575 | /* |
| 576 | * reverse of qh_urb_transaction: free a list of TDs. |
| 577 | * used for cleanup after errors, before HC sees an URB's TDs. |
| 578 | */ |
| 579 | static void qtd_list_free ( |
| 580 | struct ehci_hcd *ehci, |
| 581 | struct urb *urb, |
| 582 | struct list_head *qtd_list |
| 583 | ) { |
| 584 | struct list_head *entry, *temp; |
| 585 | |
| 586 | list_for_each_safe (entry, temp, qtd_list) { |
| 587 | struct ehci_qtd *qtd; |
| 588 | |
| 589 | qtd = list_entry (entry, struct ehci_qtd, qtd_list); |
| 590 | list_del (&qtd->qtd_list); |
| 591 | ehci_qtd_free (ehci, qtd); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | /* |
| 596 | * create a list of filled qtds for this URB; won't link into qh. |
| 597 | */ |
| 598 | static struct list_head * |
| 599 | qh_urb_transaction ( |
| 600 | struct ehci_hcd *ehci, |
| 601 | struct urb *urb, |
| 602 | struct list_head *head, |
Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 603 | gfp_t flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 604 | ) { |
| 605 | struct ehci_qtd *qtd, *qtd_prev; |
| 606 | dma_addr_t buf; |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 607 | int len, this_sg_len, maxpacket; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | int is_input; |
| 609 | u32 token; |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 610 | int i; |
| 611 | struct scatterlist *sg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | |
| 613 | /* |
| 614 | * URBs map to sequences of QTDs: one logical transaction |
| 615 | */ |
| 616 | qtd = ehci_qtd_alloc (ehci, flags); |
| 617 | if (unlikely (!qtd)) |
| 618 | return NULL; |
| 619 | list_add_tail (&qtd->qtd_list, head); |
| 620 | qtd->urb = urb; |
| 621 | |
| 622 | token = QTD_STS_ACTIVE; |
| 623 | token |= (EHCI_TUNE_CERR << 10); |
| 624 | /* for split transactions, SplitXState initialized to zero */ |
| 625 | |
| 626 | len = urb->transfer_buffer_length; |
| 627 | is_input = usb_pipein (urb->pipe); |
| 628 | if (usb_pipecontrol (urb->pipe)) { |
| 629 | /* SETUP pid */ |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 630 | qtd_fill(ehci, qtd, urb->setup_dma, |
| 631 | sizeof (struct usb_ctrlrequest), |
| 632 | token | (2 /* "setup" */ << 8), 8); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | |
| 634 | /* ... and always at least one more pid */ |
| 635 | token ^= QTD_TOGGLE; |
| 636 | qtd_prev = qtd; |
| 637 | qtd = ehci_qtd_alloc (ehci, flags); |
| 638 | if (unlikely (!qtd)) |
| 639 | goto cleanup; |
| 640 | qtd->urb = urb; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 641 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | list_add_tail (&qtd->qtd_list, head); |
Alan Stern | 6912354 | 2005-11-03 11:44:49 -0500 | [diff] [blame] | 643 | |
| 644 | /* for zero length DATA stages, STATUS is always IN */ |
| 645 | if (len == 0) |
| 646 | token |= (1 /* "in" */ << 8); |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 647 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | |
| 649 | /* |
| 650 | * data transfer stage: buffer setup |
| 651 | */ |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 652 | i = urb->num_sgs; |
| 653 | if (len > 0 && i > 0) { |
Matthew Wilcox | 910f8d0 | 2010-05-01 12:20:01 -0600 | [diff] [blame] | 654 | sg = urb->sg; |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 655 | buf = sg_dma_address(sg); |
| 656 | |
| 657 | /* urb->transfer_buffer_length may be smaller than the |
| 658 | * size of the scatterlist (or vice versa) |
| 659 | */ |
| 660 | this_sg_len = min_t(int, sg_dma_len(sg), len); |
| 661 | } else { |
| 662 | sg = NULL; |
| 663 | buf = urb->transfer_dma; |
| 664 | this_sg_len = len; |
| 665 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 666 | |
Alan Stern | 6912354 | 2005-11-03 11:44:49 -0500 | [diff] [blame] | 667 | if (is_input) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | token |= (1 /* "in" */ << 8); |
| 669 | /* else it's already initted to "out" pid (0 << 8) */ |
| 670 | |
| 671 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); |
| 672 | |
| 673 | /* |
| 674 | * buffer gets wrapped in one or more qtds; |
| 675 | * last one may be "short" (including zero len) |
| 676 | * and may serve as a control status ack |
| 677 | */ |
| 678 | for (;;) { |
| 679 | int this_qtd_len; |
| 680 | |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 681 | this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, |
| 682 | maxpacket); |
| 683 | this_sg_len -= this_qtd_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | len -= this_qtd_len; |
| 685 | buf += this_qtd_len; |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 686 | |
| 687 | /* |
| 688 | * short reads advance to a "magic" dummy instead of the next |
| 689 | * qtd ... that forces the queue to stop, for manual cleanup. |
| 690 | * (this will usually be overridden later.) |
| 691 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 692 | if (is_input) |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 693 | qtd->hw_alt_next = ehci->async->hw->hw_alt_next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | |
| 695 | /* qh makes control packets use qtd toggle; maybe switch it */ |
| 696 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) |
| 697 | token ^= QTD_TOGGLE; |
| 698 | |
Alan Stern | 40f8db8 | 2009-11-06 12:29:40 -0500 | [diff] [blame] | 699 | if (likely(this_sg_len <= 0)) { |
| 700 | if (--i <= 0 || len <= 0) |
| 701 | break; |
| 702 | sg = sg_next(sg); |
| 703 | buf = sg_dma_address(sg); |
| 704 | this_sg_len = min_t(int, sg_dma_len(sg), len); |
| 705 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | |
| 707 | qtd_prev = qtd; |
| 708 | qtd = ehci_qtd_alloc (ehci, flags); |
| 709 | if (unlikely (!qtd)) |
| 710 | goto cleanup; |
| 711 | qtd->urb = urb; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 712 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 713 | list_add_tail (&qtd->qtd_list, head); |
| 714 | } |
| 715 | |
David Brownell | a082b5c | 2008-04-10 14:21:06 -0700 | [diff] [blame] | 716 | /* |
| 717 | * unless the caller requires manual cleanup after short reads, |
| 718 | * have the alt_next mechanism keep the queue running after the |
| 719 | * last data qtd (the only one, for control and most other cases). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | */ |
| 721 | if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 |
| 722 | || usb_pipecontrol (urb->pipe))) |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 723 | qtd->hw_alt_next = EHCI_LIST_END(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | |
| 725 | /* |
| 726 | * control requests may need a terminating data "status" ack; |
| 727 | * bulk ones may need a terminating short packet (zero length). |
| 728 | */ |
Alan Stern | 6912354 | 2005-11-03 11:44:49 -0500 | [diff] [blame] | 729 | if (likely (urb->transfer_buffer_length != 0)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 730 | int one_more = 0; |
| 731 | |
| 732 | if (usb_pipecontrol (urb->pipe)) { |
| 733 | one_more = 1; |
| 734 | token ^= 0x0100; /* "in" <--> "out" */ |
| 735 | token |= QTD_TOGGLE; /* force DATA1 */ |
| 736 | } else if (usb_pipebulk (urb->pipe) |
| 737 | && (urb->transfer_flags & URB_ZERO_PACKET) |
| 738 | && !(urb->transfer_buffer_length % maxpacket)) { |
| 739 | one_more = 1; |
| 740 | } |
| 741 | if (one_more) { |
| 742 | qtd_prev = qtd; |
| 743 | qtd = ehci_qtd_alloc (ehci, flags); |
| 744 | if (unlikely (!qtd)) |
| 745 | goto cleanup; |
| 746 | qtd->urb = urb; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 747 | qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | list_add_tail (&qtd->qtd_list, head); |
| 749 | |
| 750 | /* never any data in such packets */ |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 751 | qtd_fill(ehci, qtd, 0, 0, token, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | } |
| 753 | } |
| 754 | |
| 755 | /* by default, enable interrupt on urb completion */ |
| 756 | if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 757 | qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | return head; |
| 759 | |
| 760 | cleanup: |
| 761 | qtd_list_free (ehci, urb, head); |
| 762 | return NULL; |
| 763 | } |
| 764 | |
| 765 | /*-------------------------------------------------------------------------*/ |
| 766 | |
| 767 | // Would be best to create all qh's from config descriptors, |
| 768 | // when each interface/altsetting is established. Unlink |
| 769 | // any previous qh and cancel its urbs first; endpoints are |
| 770 | // implicitly reset then (data toggle too). |
| 771 | // That'd mean updating how usbcore talks to HCDs. (2.7?) |
| 772 | |
| 773 | |
| 774 | /* |
| 775 | * Each QH holds a qtd list; a QH is used for everything except iso. |
| 776 | * |
| 777 | * For interrupt urbs, the scheduler must set the microframe scheduling |
| 778 | * mask(s) each time the QH gets scheduled. For highspeed, that's |
| 779 | * just one microframe in the s-mask. For split interrupt transactions |
| 780 | * there are additional complications: c-mask, maybe FSTNs. |
| 781 | */ |
| 782 | static struct ehci_qh * |
| 783 | qh_make ( |
| 784 | struct ehci_hcd *ehci, |
| 785 | struct urb *urb, |
Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 786 | gfp_t flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 787 | ) { |
| 788 | struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); |
| 789 | u32 info1 = 0, info2 = 0; |
| 790 | int is_input, type; |
| 791 | int maxp = 0; |
David Brownell | 340ba5f | 2007-12-19 11:30:39 -0800 | [diff] [blame] | 792 | struct usb_tt *tt = urb->dev->tt; |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 793 | struct ehci_qh_hw *hw; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | |
| 795 | if (!qh) |
| 796 | return qh; |
| 797 | |
| 798 | /* |
| 799 | * init endpoint/device data for this QH |
| 800 | */ |
| 801 | info1 |= usb_pipeendpoint (urb->pipe) << 8; |
| 802 | info1 |= usb_pipedevice (urb->pipe) << 0; |
| 803 | |
| 804 | is_input = usb_pipein (urb->pipe); |
| 805 | type = usb_pipetype (urb->pipe); |
| 806 | maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); |
| 807 | |
David Brownell | caa9ef6 | 2008-02-08 15:08:44 -0800 | [diff] [blame] | 808 | /* 1024 byte maxpacket is a hardware ceiling. High bandwidth |
| 809 | * acts like up to 3KB, but is built from smaller packets. |
| 810 | */ |
| 811 | if (max_packet(maxp) > 1024) { |
| 812 | ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); |
| 813 | goto done; |
| 814 | } |
| 815 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | /* Compute interrupt scheduling parameters just once, and save. |
| 817 | * - allowing for high bandwidth, how many nsec/uframe are used? |
| 818 | * - split transactions need a second CSPLIT uframe; same question |
| 819 | * - splits also need a schedule gap (for full/low speed I/O) |
| 820 | * - qh has a polling interval |
| 821 | * |
| 822 | * For control/bulk requests, the HC or TT handles these. |
| 823 | */ |
| 824 | if (type == PIPE_INTERRUPT) { |
David Brownell | 340ba5f | 2007-12-19 11:30:39 -0800 | [diff] [blame] | 825 | qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, |
| 826 | is_input, 0, |
| 827 | hb_mult(maxp) * max_packet(maxp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | qh->start = NO_FRAME; |
Alan Stern | 1e12c91 | 2011-05-17 10:40:51 -0400 | [diff] [blame] | 829 | qh->stamp = ehci->periodic_stamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | |
| 831 | if (urb->dev->speed == USB_SPEED_HIGH) { |
| 832 | qh->c_usecs = 0; |
| 833 | qh->gap_uf = 0; |
| 834 | |
| 835 | qh->period = urb->interval >> 3; |
| 836 | if (qh->period == 0 && urb->interval != 1) { |
| 837 | /* NOTE interval 2 or 4 uframes could work. |
| 838 | * But interval 1 scheduling is simpler, and |
| 839 | * includes high bandwidth. |
| 840 | */ |
Alan Stern | 1b9a38b | 2010-01-08 11:17:55 -0500 | [diff] [blame] | 841 | urb->interval = 1; |
| 842 | } else if (qh->period > ehci->periodic_size) { |
| 843 | qh->period = ehci->periodic_size; |
| 844 | urb->interval = qh->period << 3; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 845 | } |
| 846 | } else { |
david-b@pacbell.net | d038420 | 2005-08-13 18:44:58 -0700 | [diff] [blame] | 847 | int think_time; |
| 848 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 849 | /* gap is f(FS/LS transfer times) */ |
| 850 | qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, |
| 851 | is_input, 0, maxp) / (125 * 1000); |
| 852 | |
| 853 | /* FIXME this just approximates SPLIT/CSPLIT times */ |
| 854 | if (is_input) { // SPLIT, gap, CSPLIT+DATA |
| 855 | qh->c_usecs = qh->usecs + HS_USECS (0); |
| 856 | qh->usecs = HS_USECS (1); |
| 857 | } else { // SPLIT+DATA, gap, CSPLIT |
| 858 | qh->usecs += HS_USECS (1); |
| 859 | qh->c_usecs = HS_USECS (0); |
| 860 | } |
| 861 | |
david-b@pacbell.net | d038420 | 2005-08-13 18:44:58 -0700 | [diff] [blame] | 862 | think_time = tt ? tt->think_time : 0; |
| 863 | qh->tt_usecs = NS_TO_US (think_time + |
| 864 | usb_calc_bus_time (urb->dev->speed, |
| 865 | is_input, 0, max_packet (maxp))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | qh->period = urb->interval; |
Alan Stern | 1b9a38b | 2010-01-08 11:17:55 -0500 | [diff] [blame] | 867 | if (qh->period > ehci->periodic_size) { |
| 868 | qh->period = ehci->periodic_size; |
| 869 | urb->interval = qh->period; |
| 870 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | } |
| 872 | } |
| 873 | |
| 874 | /* support for tt scheduling, and access to toggles */ |
Alan Stern | 6a8e87b | 2006-01-19 10:46:27 -0500 | [diff] [blame] | 875 | qh->dev = urb->dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 876 | |
| 877 | /* using TT? */ |
| 878 | switch (urb->dev->speed) { |
| 879 | case USB_SPEED_LOW: |
| 880 | info1 |= (1 << 12); /* EPS "low" */ |
| 881 | /* FALL THROUGH */ |
| 882 | |
| 883 | case USB_SPEED_FULL: |
| 884 | /* EPS 0 means "full" */ |
| 885 | if (type != PIPE_INTERRUPT) |
| 886 | info1 |= (EHCI_TUNE_RL_TT << 28); |
| 887 | if (type == PIPE_CONTROL) { |
| 888 | info1 |= (1 << 27); /* for TT */ |
| 889 | info1 |= 1 << 14; /* toggle from qtd */ |
| 890 | } |
| 891 | info1 |= maxp << 16; |
| 892 | |
| 893 | info2 |= (EHCI_TUNE_MULT_TT << 30); |
Kumar Gala | 8cd42e9 | 2006-01-20 13:57:52 -0800 | [diff] [blame] | 894 | |
| 895 | /* Some Freescale processors have an erratum in which the |
| 896 | * port number in the queue head was 0..N-1 instead of 1..N. |
| 897 | */ |
| 898 | if (ehci_has_fsl_portno_bug(ehci)) |
| 899 | info2 |= (urb->dev->ttport-1) << 23; |
| 900 | else |
| 901 | info2 |= urb->dev->ttport << 23; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 902 | |
| 903 | /* set the address of the TT; for TDI's integrated |
| 904 | * root hub tt, leave it zeroed. |
| 905 | */ |
David Brownell | 340ba5f | 2007-12-19 11:30:39 -0800 | [diff] [blame] | 906 | if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) |
| 907 | info2 |= tt->hub->devnum << 16; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | |
| 909 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ |
| 910 | |
| 911 | break; |
| 912 | |
| 913 | case USB_SPEED_HIGH: /* no TT involved */ |
| 914 | info1 |= (2 << 12); /* EPS "high" */ |
| 915 | if (type == PIPE_CONTROL) { |
| 916 | info1 |= (EHCI_TUNE_RL_HS << 28); |
| 917 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ |
| 918 | info1 |= 1 << 14; /* toggle from qtd */ |
| 919 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
| 920 | } else if (type == PIPE_BULK) { |
| 921 | info1 |= (EHCI_TUNE_RL_HS << 28); |
David Brownell | caa9ef6 | 2008-02-08 15:08:44 -0800 | [diff] [blame] | 922 | /* The USB spec says that high speed bulk endpoints |
| 923 | * always use 512 byte maxpacket. But some device |
| 924 | * vendors decided to ignore that, and MSFT is happy |
| 925 | * to help them do so. So now people expect to use |
| 926 | * such nonconformant devices with Linux too; sigh. |
| 927 | */ |
| 928 | info1 |= max_packet(maxp) << 16; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | info2 |= (EHCI_TUNE_MULT_HS << 30); |
| 930 | } else { /* PIPE_INTERRUPT */ |
| 931 | info1 |= max_packet (maxp) << 16; |
| 932 | info2 |= hb_mult (maxp) << 30; |
| 933 | } |
| 934 | break; |
| 935 | default: |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 936 | dbg ("bogus dev %p speed %d", urb->dev, urb->dev->speed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 | done: |
| 938 | qh_put (qh); |
| 939 | return NULL; |
| 940 | } |
| 941 | |
| 942 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ |
| 943 | |
| 944 | /* init as live, toggle clear, advance to dummy */ |
| 945 | qh->qh_state = QH_STATE_IDLE; |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 946 | hw = qh->hw; |
| 947 | hw->hw_info1 = cpu_to_hc32(ehci, info1); |
| 948 | hw->hw_info2 = cpu_to_hc32(ehci, info2); |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 949 | usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 950 | qh_refresh (ehci, qh); |
| 951 | return qh; |
| 952 | } |
| 953 | |
| 954 | /*-------------------------------------------------------------------------*/ |
| 955 | |
| 956 | /* move qh (and its qtds) onto async queue; maybe enable queue. */ |
| 957 | |
| 958 | static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 959 | { |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 960 | __hc32 dma = QH_NEXT(ehci, qh->qh_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | struct ehci_qh *head; |
| 962 | |
Alan Stern | 914b701 | 2009-06-29 10:47:30 -0400 | [diff] [blame] | 963 | /* Don't link a QH if there's a Clear-TT-Buffer pending */ |
| 964 | if (unlikely(qh->clearing_tt)) |
| 965 | return; |
| 966 | |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 967 | WARN_ON(qh->qh_state != QH_STATE_IDLE); |
| 968 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | /* (re)start the async schedule? */ |
| 970 | head = ehci->async; |
| 971 | timer_action_done (ehci, TIMER_ASYNC_OFF); |
| 972 | if (!head->qh_next.qh) { |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 973 | u32 cmd = ehci_readl(ehci, &ehci->regs->command); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 974 | |
| 975 | if (!(cmd & CMD_ASE)) { |
| 976 | /* in case a clear of CMD_ASE didn't take yet */ |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 977 | (void)handshake(ehci, &ehci->regs->status, |
| 978 | STS_ASS, 0, 150); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 979 | cmd |= CMD_ASE | CMD_RUN; |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 980 | ehci_writel(ehci, cmd, &ehci->regs->command); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; |
| 982 | /* posted write need not be known to HC yet ... */ |
| 983 | } |
| 984 | } |
| 985 | |
Alan Stern | a455212 | 2009-06-11 14:56:22 -0400 | [diff] [blame] | 986 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 987 | qh_refresh(ehci, qh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | |
| 989 | /* splice right after start */ |
| 990 | qh->qh_next = head->qh_next; |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 991 | qh->hw->hw_next = head->hw->hw_next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | wmb (); |
| 993 | |
| 994 | head->qh_next.qh = qh; |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 995 | head->hw->hw_next = dma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | |
Alan Stern | 7a0f0d9 | 2009-07-31 10:40:22 -0400 | [diff] [blame] | 997 | qh_get(qh); |
Alan Stern | ef4638f | 2009-07-31 10:41:40 -0400 | [diff] [blame] | 998 | qh->xacterrs = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 999 | qh->qh_state = QH_STATE_LINKED; |
| 1000 | /* qtd completions reported later by interrupt */ |
| 1001 | } |
| 1002 | |
| 1003 | /*-------------------------------------------------------------------------*/ |
| 1004 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1005 | /* |
| 1006 | * For control/bulk/interrupt, return QH with these TDs appended. |
| 1007 | * Allocates and initializes the QH if necessary. |
| 1008 | * Returns null if it can't allocate a QH it needs to. |
| 1009 | * If the QH has TDs (urbs) already, that's great. |
| 1010 | */ |
| 1011 | static struct ehci_qh *qh_append_tds ( |
| 1012 | struct ehci_hcd *ehci, |
| 1013 | struct urb *urb, |
| 1014 | struct list_head *qtd_list, |
| 1015 | int epnum, |
| 1016 | void **ptr |
| 1017 | ) |
| 1018 | { |
| 1019 | struct ehci_qh *qh = NULL; |
Al Viro | fd05e72 | 2008-04-28 07:00:16 +0100 | [diff] [blame] | 1020 | __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | |
| 1022 | qh = (struct ehci_qh *) *ptr; |
| 1023 | if (unlikely (qh == NULL)) { |
| 1024 | /* can't sleep here, we have ehci->lock... */ |
| 1025 | qh = qh_make (ehci, urb, GFP_ATOMIC); |
| 1026 | *ptr = qh; |
| 1027 | } |
| 1028 | if (likely (qh != NULL)) { |
| 1029 | struct ehci_qtd *qtd; |
| 1030 | |
| 1031 | if (unlikely (list_empty (qtd_list))) |
| 1032 | qtd = NULL; |
| 1033 | else |
| 1034 | qtd = list_entry (qtd_list->next, struct ehci_qtd, |
| 1035 | qtd_list); |
| 1036 | |
| 1037 | /* control qh may need patching ... */ |
| 1038 | if (unlikely (epnum == 0)) { |
| 1039 | |
| 1040 | /* usb_reset_device() briefly reverts to address 0 */ |
| 1041 | if (usb_pipedevice (urb->pipe) == 0) |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 1042 | qh->hw->hw_info1 &= ~qh_addr_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | } |
| 1044 | |
| 1045 | /* just one way to queue requests: swap with the dummy qtd. |
| 1046 | * only hc or qh_refresh() ever modify the overlay. |
| 1047 | */ |
| 1048 | if (likely (qtd != NULL)) { |
| 1049 | struct ehci_qtd *dummy; |
| 1050 | dma_addr_t dma; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 1051 | __hc32 token; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | |
| 1053 | /* to avoid racing the HC, use the dummy td instead of |
| 1054 | * the first td of our list (becomes new dummy). both |
| 1055 | * tds stay deactivated until we're done, when the |
| 1056 | * HC is allowed to fetch the old dummy (4.10.2). |
| 1057 | */ |
| 1058 | token = qtd->hw_token; |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 1059 | qtd->hw_token = HALT_BIT(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | wmb (); |
| 1061 | dummy = qh->dummy; |
| 1062 | |
| 1063 | dma = dummy->qtd_dma; |
| 1064 | *dummy = *qtd; |
| 1065 | dummy->qtd_dma = dma; |
| 1066 | |
| 1067 | list_del (&qtd->qtd_list); |
| 1068 | list_add (&dummy->qtd_list, qtd_list); |
Luis R. Rodriguez | 7d283ae | 2008-08-06 15:21:26 -0700 | [diff] [blame] | 1069 | list_splice_tail(qtd_list, &qh->qtd_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1070 | |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 1071 | ehci_qtd_init(ehci, qtd, qtd->qtd_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | qh->dummy = qtd; |
| 1073 | |
| 1074 | /* hc must see the new dummy at list end */ |
| 1075 | dma = qtd->qtd_dma; |
| 1076 | qtd = list_entry (qh->qtd_list.prev, |
| 1077 | struct ehci_qtd, qtd_list); |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 1078 | qtd->hw_next = QTD_NEXT(ehci, dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1079 | |
| 1080 | /* let the hc process these next qtds */ |
| 1081 | wmb (); |
| 1082 | dummy->hw_token = token; |
| 1083 | |
| 1084 | urb->hcpriv = qh_get (qh); |
| 1085 | } |
| 1086 | } |
| 1087 | return qh; |
| 1088 | } |
| 1089 | |
| 1090 | /*-------------------------------------------------------------------------*/ |
| 1091 | |
| 1092 | static int |
| 1093 | submit_async ( |
| 1094 | struct ehci_hcd *ehci, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | struct urb *urb, |
| 1096 | struct list_head *qtd_list, |
Al Viro | 55016f1 | 2005-10-21 03:21:58 -0400 | [diff] [blame] | 1097 | gfp_t mem_flags |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1098 | ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | int epnum; |
| 1100 | unsigned long flags; |
| 1101 | struct ehci_qh *qh = NULL; |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 1102 | int rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 1104 | epnum = urb->ep->desc.bEndpointAddress; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | |
| 1106 | #ifdef EHCI_URB_TRACE |
David Daney | eb34a90 | 2011-01-25 09:59:36 -0800 | [diff] [blame] | 1107 | { |
| 1108 | struct ehci_qtd *qtd; |
| 1109 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); |
| 1110 | ehci_dbg(ehci, |
| 1111 | "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", |
| 1112 | __func__, urb->dev->devpath, urb, |
| 1113 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", |
| 1114 | urb->transfer_buffer_length, |
| 1115 | qtd, urb->ep->hcpriv); |
| 1116 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1117 | #endif |
| 1118 | |
| 1119 | spin_lock_irqsave (&ehci->lock, flags); |
Alan Stern | 541c7d4 | 2010-06-22 16:39:10 -0400 | [diff] [blame] | 1120 | if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1121 | rc = -ESHUTDOWN; |
| 1122 | goto done; |
| 1123 | } |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 1124 | rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); |
| 1125 | if (unlikely(rc)) |
| 1126 | goto done; |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1127 | |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 1128 | qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1129 | if (unlikely(qh == NULL)) { |
Alan Stern | e9df41c | 2007-08-08 11:48:02 -0400 | [diff] [blame] | 1130 | usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1131 | rc = -ENOMEM; |
| 1132 | goto done; |
| 1133 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | |
| 1135 | /* Control/bulk operations through TTs don't need scheduling, |
| 1136 | * the HC and TT handle it when the TT has a buffer ready. |
| 1137 | */ |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1138 | if (likely (qh->qh_state == QH_STATE_IDLE)) |
Alan Stern | 7a0f0d9 | 2009-07-31 10:40:22 -0400 | [diff] [blame] | 1139 | qh_link_async(ehci, qh); |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1140 | done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1141 | spin_unlock_irqrestore (&ehci->lock, flags); |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1142 | if (unlikely (qh == NULL)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1143 | qtd_list_free (ehci, urb, qtd_list); |
Benjamin Herrenschmidt | 8de9840 | 2005-11-25 09:59:46 +1100 | [diff] [blame] | 1144 | return rc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | /*-------------------------------------------------------------------------*/ |
| 1148 | |
| 1149 | /* the async qh for the qtds being reclaimed are now unlinked from the HC */ |
| 1150 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1151 | static void end_unlink_async (struct ehci_hcd *ehci) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | { |
| 1153 | struct ehci_qh *qh = ehci->reclaim; |
| 1154 | struct ehci_qh *next; |
| 1155 | |
Alan Stern | 07d29b6 | 2007-12-11 16:05:30 -0500 | [diff] [blame] | 1156 | iaa_watchdog_done(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | |
Stefan Roese | 6dbd682 | 2007-05-01 09:29:37 -0700 | [diff] [blame] | 1158 | // qh->hw_next = cpu_to_hc32(qh->qh_dma); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | qh->qh_state = QH_STATE_IDLE; |
| 1160 | qh->qh_next.qh = NULL; |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 1161 | qh_put (qh); // refcount from reclaim |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | |
| 1163 | /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ |
| 1164 | next = qh->reclaim; |
| 1165 | ehci->reclaim = next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1166 | qh->reclaim = NULL; |
| 1167 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1168 | qh_completions (ehci, qh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 | |
| 1170 | if (!list_empty (&qh->qtd_list) |
| 1171 | && HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) |
| 1172 | qh_link_async (ehci, qh); |
| 1173 | else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1174 | /* it's not free to turn the async schedule on/off; leave it |
| 1175 | * active but idle for a while once it empties. |
| 1176 | */ |
| 1177 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state) |
| 1178 | && ehci->async->qh_next.qh == NULL) |
| 1179 | timer_action (ehci, TIMER_ASYNC_OFF); |
| 1180 | } |
Alan Stern | 7a0f0d9 | 2009-07-31 10:40:22 -0400 | [diff] [blame] | 1181 | qh_put(qh); /* refcount from async list */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | |
| 1183 | if (next) { |
| 1184 | ehci->reclaim = NULL; |
| 1185 | start_unlink_async (ehci, next); |
| 1186 | } |
Gabor Juhos | 2f7ac6c | 2011-04-13 10:54:23 +0200 | [diff] [blame] | 1187 | |
| 1188 | if (ehci->has_synopsys_hc_bug) |
| 1189 | ehci_writel(ehci, (u32) ehci->async->qh_dma, |
| 1190 | &ehci->regs->async_next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 | } |
| 1192 | |
| 1193 | /* makes sure the async qh will become idle */ |
| 1194 | /* caller must own ehci->lock */ |
| 1195 | |
| 1196 | static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
| 1197 | { |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 1198 | int cmd = ehci_readl(ehci, &ehci->regs->command); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | struct ehci_qh *prev; |
| 1200 | |
| 1201 | #ifdef DEBUG |
| 1202 | assert_spin_locked(&ehci->lock); |
| 1203 | if (ehci->reclaim |
| 1204 | || (qh->qh_state != QH_STATE_LINKED |
| 1205 | && qh->qh_state != QH_STATE_UNLINK_WAIT) |
| 1206 | ) |
| 1207 | BUG (); |
| 1208 | #endif |
| 1209 | |
| 1210 | /* stop async schedule right now? */ |
| 1211 | if (unlikely (qh == ehci->async)) { |
| 1212 | /* can't get here without STS_ASS set */ |
David Brownell | d085229 | 2006-01-20 14:35:55 -0800 | [diff] [blame] | 1213 | if (ehci_to_hcd(ehci)->state != HC_STATE_HALT |
| 1214 | && !ehci->reclaim) { |
| 1215 | /* ... and CMD_IAAD clear */ |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 1216 | ehci_writel(ehci, cmd & ~CMD_ASE, |
| 1217 | &ehci->regs->command); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | wmb (); |
| 1219 | // handshake later, if we need to |
David Brownell | d085229 | 2006-01-20 14:35:55 -0800 | [diff] [blame] | 1220 | timer_action_done (ehci, TIMER_ASYNC_OFF); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1222 | return; |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 1223 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1224 | |
| 1225 | qh->qh_state = QH_STATE_UNLINK; |
| 1226 | ehci->reclaim = qh = qh_get (qh); |
| 1227 | |
| 1228 | prev = ehci->async; |
| 1229 | while (prev->qh_next.qh != qh) |
| 1230 | prev = prev->qh_next.qh; |
| 1231 | |
Alek Du | 3807e26 | 2009-07-14 07:23:29 +0800 | [diff] [blame] | 1232 | prev->hw->hw_next = qh->hw->hw_next; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 | prev->qh_next = qh->qh_next; |
| 1234 | wmb (); |
| 1235 | |
Alan Stern | 391016f | 2009-03-16 14:21:56 -0400 | [diff] [blame] | 1236 | /* If the controller isn't running, we don't have to wait for it */ |
| 1237 | if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1238 | /* if (unlikely (qh->reclaim != 0)) |
David Brownell | 53bd6a6 | 2006-08-30 14:50:06 -0700 | [diff] [blame] | 1239 | * this will recurse, probably not much |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1240 | */ |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1241 | end_unlink_async (ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1242 | return; |
| 1243 | } |
| 1244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | cmd |= CMD_IAAD; |
Benjamin Herrenschmidt | 083522d | 2006-12-15 06:54:08 +1100 | [diff] [blame] | 1246 | ehci_writel(ehci, cmd, &ehci->regs->command); |
| 1247 | (void)ehci_readl(ehci, &ehci->regs->command); |
Alan Stern | 07d29b6 | 2007-12-11 16:05:30 -0500 | [diff] [blame] | 1248 | iaa_watchdog_start(ehci); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | } |
| 1250 | |
| 1251 | /*-------------------------------------------------------------------------*/ |
| 1252 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1253 | static void scan_async (struct ehci_hcd *ehci) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1254 | { |
Alan Stern | 94ae497 | 2011-04-05 13:36:15 -0400 | [diff] [blame] | 1255 | bool stopped; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1256 | struct ehci_qh *qh; |
| 1257 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; |
| 1258 | |
David Brownell | b963801 | 2008-06-03 22:21:55 -0700 | [diff] [blame] | 1259 | ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | timer_action_done (ehci, TIMER_ASYNC_SHRINK); |
| 1261 | rescan: |
Alan Stern | 94ae497 | 2011-04-05 13:36:15 -0400 | [diff] [blame] | 1262 | stopped = !HC_IS_RUNNING(ehci_to_hcd(ehci)->state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | qh = ehci->async->qh_next.qh; |
| 1264 | if (likely (qh != NULL)) { |
| 1265 | do { |
| 1266 | /* clean any finished work for this qh */ |
Alan Stern | 94ae497 | 2011-04-05 13:36:15 -0400 | [diff] [blame] | 1267 | if (!list_empty(&qh->qtd_list) && (stopped || |
| 1268 | qh->stamp != ehci->stamp)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | int temp; |
| 1270 | |
| 1271 | /* unlinks could happen here; completion |
| 1272 | * reporting drops the lock. rescan using |
| 1273 | * the latest schedule, but don't rescan |
Alan Stern | 94ae497 | 2011-04-05 13:36:15 -0400 | [diff] [blame] | 1274 | * qhs we already finished (no looping) |
| 1275 | * unless the controller is stopped. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | */ |
| 1277 | qh = qh_get (qh); |
| 1278 | qh->stamp = ehci->stamp; |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 1279 | temp = qh_completions (ehci, qh); |
Alan Stern | 3a44494 | 2009-08-19 12:22:06 -0400 | [diff] [blame] | 1280 | if (qh->needs_rescan) |
| 1281 | unlink_async(ehci, qh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1282 | qh_put (qh); |
| 1283 | if (temp != 0) { |
| 1284 | goto rescan; |
| 1285 | } |
| 1286 | } |
| 1287 | |
David Brownell | b963801 | 2008-06-03 22:21:55 -0700 | [diff] [blame] | 1288 | /* unlink idle entries, reducing DMA usage as well |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1289 | * as HCD schedule-scanning costs. delay for any qh |
| 1290 | * we just scanned, there's a not-unusual case that it |
| 1291 | * doesn't stay idle for long. |
| 1292 | * (plus, avoids some kind of re-activation race.) |
| 1293 | */ |
David Brownell | b963801 | 2008-06-03 22:21:55 -0700 | [diff] [blame] | 1294 | if (list_empty(&qh->qtd_list) |
| 1295 | && qh->qh_state == QH_STATE_LINKED) { |
Alan Stern | 94ae497 | 2011-04-05 13:36:15 -0400 | [diff] [blame] | 1296 | if (!ehci->reclaim && (stopped || |
| 1297 | ((ehci->stamp - qh->stamp) & 0x1fff) |
| 1298 | >= EHCI_SHRINK_FRAMES * 8)) |
David Brownell | b963801 | 2008-06-03 22:21:55 -0700 | [diff] [blame] | 1299 | start_unlink_async(ehci, qh); |
| 1300 | else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1301 | action = TIMER_ASYNC_SHRINK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | } |
| 1303 | |
| 1304 | qh = qh->qh_next.qh; |
| 1305 | } while (qh); |
| 1306 | } |
| 1307 | if (action == TIMER_ASYNC_SHRINK) |
| 1308 | timer_action (ehci, TIMER_ASYNC_SHRINK); |
| 1309 | } |