blob: 5c774ab9825272ead015fe3420f00a71dba81616 [file] [log] [blame]
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001/*
2 * ISP1362 HCD (Host Controller Driver) for USB.
3 *
4 * Copyright (C) 2005 Lothar Wassmann <LW@KARO-electronics.de>
5 *
6 * Derived from the SL811 HCD, rewritten for ISP116x.
7 * Copyright (C) 2005 Olav Kongas <ok@artecdesign.ee>
8 *
9 * Portions:
10 * Copyright (C) 2004 Psion Teklogix (for NetBook PRO)
11 * Copyright (C) 2004 David Brownell
12 */
13
14/*
15 * The ISP1362 chip requires a large delay (300ns and 462ns) between
16 * accesses to the address and data register.
17 * The following timing options exist:
18 *
19 * 1. Configure your memory controller to add such delays if it can (the best)
20 * 2. Implement platform-specific delay function possibly
21 * combined with configuring the memory controller; see
22 * include/linux/usb_isp1362.h for more info.
23 * 3. Use ndelay (easiest, poorest).
24 *
25 * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the
26 * platform specific section of isp1362.h to select the appropriate variant.
27 *
28 * Also note that according to the Philips "ISP1362 Errata" document
29 * Rev 1.00 from 27 May data corruption may occur when the #WR signal
30 * is reasserted (even with #CS deasserted) within 132ns after a
31 * write cycle to any controller register. If the hardware doesn't
32 * implement the recommended fix (gating the #WR with #CS) software
33 * must ensure that no further write cycle (not necessarily to the chip!)
34 * is issued by the CPU within this interval.
35
36 * For PXA25x this can be ensured by using VLIO with the maximum
37 * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
38 */
39
40#ifdef CONFIG_USB_DEBUG
41# define ISP1362_DEBUG
42#else
43# undef ISP1362_DEBUG
44#endif
45
46/*
47 * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
48 * GET_INTERFACE requests correctly when the SETUP and DATA stages of the
49 * requests are carried out in separate frames. This will delay any SETUP
50 * packets until the start of the next frame so that this situation is
51 * unlikely to occur (and makes usbtest happy running with a PXA255 target
52 * device).
53 */
54#undef BUGGY_PXA2XX_UDC_USBTEST
55
56#undef PTD_TRACE
57#undef URB_TRACE
58#undef VERBOSE
59#undef REGISTERS
60
61/* This enables a memory test on the ISP1362 chip memory to make sure the
62 * chip access timing is correct.
63 */
64#undef CHIP_BUFFER_TEST
65
66#include <linux/module.h>
67#include <linux/moduleparam.h>
68#include <linux/kernel.h>
69#include <linux/delay.h>
70#include <linux/ioport.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
73#include <linux/smp_lock.h>
74#include <linux/errno.h>
75#include <linux/init.h>
76#include <linux/list.h>
77#include <linux/interrupt.h>
78#include <linux/usb.h>
79#include <linux/usb/isp1362.h>
80#include <linux/platform_device.h>
81#include <linux/pm.h>
82#include <linux/io.h>
83#include <linux/bitops.h>
84
85#include <asm/irq.h>
86#include <asm/system.h>
87#include <asm/byteorder.h>
88#include <asm/unaligned.h>
89
90static int dbg_level;
91#ifdef ISP1362_DEBUG
92module_param(dbg_level, int, 0644);
93#else
94module_param(dbg_level, int, 0);
95#define STUB_DEBUG_FILE
96#endif
97
98#include "../core/hcd.h"
99#include "../core/usb.h"
100#include "isp1362.h"
101
102
103#define DRIVER_VERSION "2005-04-04"
104#define DRIVER_DESC "ISP1362 USB Host Controller Driver"
105
106MODULE_DESCRIPTION(DRIVER_DESC);
107MODULE_LICENSE("GPL");
108
109static const char hcd_name[] = "isp1362-hcd";
110
111static void isp1362_hc_stop(struct usb_hcd *hcd);
112static int isp1362_hc_start(struct usb_hcd *hcd);
113
114/*-------------------------------------------------------------------------*/
115
116/*
117 * When called from the interrupthandler only isp1362_hcd->irqenb is modified,
118 * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon
119 * completion.
120 * We don't need a 'disable' counterpart, since interrupts will be disabled
121 * only by the interrupt handler.
122 */
123static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask)
124{
125 if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb)
126 return;
127 if (mask & ~isp1362_hcd->irqenb)
128 isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb);
129 isp1362_hcd->irqenb |= mask;
130 if (isp1362_hcd->irq_active)
131 return;
132 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
133}
134
135/*-------------------------------------------------------------------------*/
136
137static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd,
138 u16 offset)
139{
140 struct isp1362_ep_queue *epq = NULL;
141
142 if (offset < isp1362_hcd->istl_queue[1].buf_start)
143 epq = &isp1362_hcd->istl_queue[0];
144 else if (offset < isp1362_hcd->intl_queue.buf_start)
145 epq = &isp1362_hcd->istl_queue[1];
146 else if (offset < isp1362_hcd->atl_queue.buf_start)
147 epq = &isp1362_hcd->intl_queue;
148 else if (offset < isp1362_hcd->atl_queue.buf_start +
149 isp1362_hcd->atl_queue.buf_size)
150 epq = &isp1362_hcd->atl_queue;
151
152 if (epq)
153 DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name);
154 else
155 pr_warning("%s: invalid PTD $%04x\n", __func__, offset);
156
157 return epq;
158}
159
160static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index)
161{
162 int offset;
163
164 if (index * epq->blk_size > epq->buf_size) {
165 pr_warning("%s: Bad %s index %d(%d)\n", __func__, epq->name, index,
166 epq->buf_size / epq->blk_size);
167 return -EINVAL;
168 }
169 offset = epq->buf_start + index * epq->blk_size;
170 DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset);
171
172 return offset;
173}
174
175/*-------------------------------------------------------------------------*/
176
177static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size,
178 int mps)
179{
180 u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size);
181
182 xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE);
183 if (xfer_size < size && xfer_size % mps)
184 xfer_size -= xfer_size % mps;
185
186 return xfer_size;
187}
188
189static int claim_ptd_buffers(struct isp1362_ep_queue *epq,
190 struct isp1362_ep *ep, u16 len)
191{
192 int ptd_offset = -EINVAL;
193 int index;
194 int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1;
195 int found = -1;
196 int last = -1;
197
198 BUG_ON(len > epq->buf_size);
199
200 if (!epq->buf_avail)
201 return -ENOMEM;
202
203 if (ep->num_ptds)
204 pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__,
205 epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map);
206 BUG_ON(ep->num_ptds != 0);
207
208 for (index = 0; index <= epq->buf_count - num_ptds; index++) {
209 if (test_bit(index, &epq->buf_map))
210 continue;
211 found = index;
212 for (last = index + 1; last < index + num_ptds; last++) {
213 if (test_bit(last, &epq->buf_map)) {
214 found = -1;
215 break;
216 }
217 }
218 if (found >= 0)
219 break;
220 }
221 if (found < 0)
222 return -EOVERFLOW;
223
224 DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__,
225 num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE));
226 ptd_offset = get_ptd_offset(epq, found);
227 WARN_ON(ptd_offset < 0);
228 ep->ptd_offset = ptd_offset;
229 ep->num_ptds += num_ptds;
230 epq->buf_avail -= num_ptds;
231 BUG_ON(epq->buf_avail > epq->buf_count);
232 ep->ptd_index = found;
233 for (index = found; index < last; index++)
234 __set_bit(index, &epq->buf_map);
235 DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n",
236 __func__, epq->name, ep->ptd_index, ep->ptd_offset,
237 epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map);
238
239 return found;
240}
241
242static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
243{
244 int index = ep->ptd_index;
245 int last = ep->ptd_index + ep->num_ptds;
246
247 if (last > epq->buf_count)
248 pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n",
249 __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index,
250 ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail,
251 epq->buf_map, epq->skip_map);
252 BUG_ON(last > epq->buf_count);
253
254 for (; index < last; index++) {
255 __clear_bit(index, &epq->buf_map);
256 __set_bit(index, &epq->skip_map);
257 }
258 epq->buf_avail += ep->num_ptds;
259 epq->ptd_count--;
260
261 BUG_ON(epq->buf_avail > epq->buf_count);
262 BUG_ON(epq->ptd_count > epq->buf_count);
263
264 DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n",
265 __func__, epq->name,
266 ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count);
267 DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__,
268 epq->buf_map, epq->skip_map);
269
270 ep->num_ptds = 0;
271 ep->ptd_offset = -EINVAL;
272 ep->ptd_index = -EINVAL;
273}
274
275/*-------------------------------------------------------------------------*/
276
277/*
278 Set up PTD's.
279*/
280static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
281 struct isp1362_ep *ep, struct isp1362_ep_queue *epq,
282 u16 fno)
283{
284 struct ptd *ptd;
285 int toggle;
286 int dir;
287 u16 len;
288 size_t buf_len = urb->transfer_buffer_length - urb->actual_length;
289
290 DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep);
291
292 ptd = &ep->ptd;
293
294 ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length;
295
296 switch (ep->nextpid) {
297 case USB_PID_IN:
298 toggle = usb_gettoggle(urb->dev, ep->epnum, 0);
299 dir = PTD_DIR_IN;
300 if (usb_pipecontrol(urb->pipe)) {
301 len = min_t(size_t, ep->maxpacket, buf_len);
302 } else if (usb_pipeisoc(urb->pipe)) {
303 len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE);
304 ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset;
305 } else
306 len = max_transfer_size(epq, buf_len, ep->maxpacket);
307 DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
308 (int)buf_len);
309 break;
310 case USB_PID_OUT:
311 toggle = usb_gettoggle(urb->dev, ep->epnum, 1);
312 dir = PTD_DIR_OUT;
313 if (usb_pipecontrol(urb->pipe))
314 len = min_t(size_t, ep->maxpacket, buf_len);
315 else if (usb_pipeisoc(urb->pipe))
316 len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE);
317 else
318 len = max_transfer_size(epq, buf_len, ep->maxpacket);
319 if (len == 0)
320 pr_info("%s: Sending ZERO packet: %d\n", __func__,
321 urb->transfer_flags & URB_ZERO_PACKET);
322 DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket,
323 (int)buf_len);
324 break;
325 case USB_PID_SETUP:
326 toggle = 0;
327 dir = PTD_DIR_SETUP;
328 len = sizeof(struct usb_ctrlrequest);
329 DBG(1, "%s: SETUP len %d\n", __func__, len);
330 ep->data = urb->setup_packet;
331 break;
332 case USB_PID_ACK:
333 toggle = 1;
334 len = 0;
335 dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ?
336 PTD_DIR_OUT : PTD_DIR_IN;
337 DBG(1, "%s: ACK len %d\n", __func__, len);
338 break;
339 default:
340 toggle = dir = len = 0;
341 pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid);
342 BUG_ON(1);
343 }
344
345 ep->length = len;
346 if (!len)
347 ep->data = NULL;
348
349 ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle);
350 ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) |
351 PTD_EP(ep->epnum);
352 ptd->len = PTD_LEN(len) | PTD_DIR(dir);
353 ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe));
354
355 if (usb_pipeint(urb->pipe)) {
356 ptd->faddr |= PTD_SF_INT(ep->branch);
357 ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0);
358 }
359 if (usb_pipeisoc(urb->pipe))
360 ptd->faddr |= PTD_SF_ISO(fno);
361
362 DBG(1, "%s: Finished\n", __func__);
363}
364
365static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
366 struct isp1362_ep_queue *epq)
367{
368 struct ptd *ptd = &ep->ptd;
369 int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
370
371 _BUG_ON(ep->ptd_offset < 0);
372
373 prefetch(ptd);
374 isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
375 if (len)
376 isp1362_write_buffer(isp1362_hcd, ep->data,
377 ep->ptd_offset + PTD_HEADER_SIZE, len);
378
379 dump_ptd(ptd);
380 dump_ptd_out_data(ptd, ep->data);
381}
382
383static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
384 struct isp1362_ep_queue *epq)
385{
386 struct ptd *ptd = &ep->ptd;
387 int act_len;
388
389 WARN_ON(list_empty(&ep->active));
390 BUG_ON(ep->ptd_offset < 0);
391
392 list_del_init(&ep->active);
393 DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active);
394
395 prefetchw(ptd);
396 isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
397 dump_ptd(ptd);
398 act_len = PTD_GET_COUNT(ptd);
399 if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0)
400 return;
401 if (act_len > ep->length)
402 pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep,
403 ep->ptd_offset, act_len, ep->length);
404 BUG_ON(act_len > ep->length);
405 /* Only transfer the amount of data that has actually been overwritten
406 * in the chip buffer. We don't want any data that doesn't belong to the
407 * transfer to leak out of the chip to the callers transfer buffer!
408 */
409 prefetchw(ep->data);
410 isp1362_read_buffer(isp1362_hcd, ep->data,
411 ep->ptd_offset + PTD_HEADER_SIZE, act_len);
412 dump_ptd_in_data(ptd, ep->data);
413}
414
415/*
416 * INT PTDs will stay in the chip until data is available.
417 * This function will remove a PTD from the chip when the URB is dequeued.
418 * Must be called with the spinlock held and IRQs disabled
419 */
420static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
421
422{
423 int index;
424 struct isp1362_ep_queue *epq;
425
426 DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset);
427 BUG_ON(ep->ptd_offset < 0);
428
429 epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset);
430 BUG_ON(!epq);
431
432 /* put ep in remove_list for cleanup */
433 WARN_ON(!list_empty(&ep->remove_list));
434 list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list);
435 /* let SOF interrupt handle the cleanup */
436 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
437
438 index = ep->ptd_index;
439 if (index < 0)
440 /* ISO queues don't have SKIP registers */
441 return;
442
443 DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__,
444 index, ep->ptd_offset, epq->skip_map, 1 << index);
445
446 /* prevent further processing of PTD (will be effective after next SOF) */
447 epq->skip_map |= 1 << index;
448 if (epq == &isp1362_hcd->atl_queue) {
449 DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__,
450 isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map);
451 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map);
452 if (~epq->skip_map == 0)
453 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
454 } else if (epq == &isp1362_hcd->intl_queue) {
455 DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__,
456 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map);
457 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map);
458 if (~epq->skip_map == 0)
459 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
460 }
461}
462
463/*
464 Take done or failed requests out of schedule. Give back
465 processed urbs.
466*/
467static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep,
468 struct urb *urb, int status)
469 __releases(isp1362_hcd->lock)
470 __acquires(isp1362_hcd->lock)
471{
472 urb->hcpriv = NULL;
473 ep->error_count = 0;
474
475 if (usb_pipecontrol(urb->pipe))
476 ep->nextpid = USB_PID_SETUP;
477
478 URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__,
479 ep->num_req, usb_pipedevice(urb->pipe),
480 usb_pipeendpoint(urb->pipe),
481 !usb_pipein(urb->pipe) ? "out" : "in",
482 usb_pipecontrol(urb->pipe) ? "ctrl" :
483 usb_pipeint(urb->pipe) ? "int" :
484 usb_pipebulk(urb->pipe) ? "bulk" :
485 "iso",
486 urb->actual_length, urb->transfer_buffer_length,
487 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
488 "short_ok" : "", urb->status);
489
490
491 usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb);
492 spin_unlock(&isp1362_hcd->lock);
493 usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status);
494 spin_lock(&isp1362_hcd->lock);
495
496 /* take idle endpoints out of the schedule right away */
497 if (!list_empty(&ep->hep->urb_list))
498 return;
499
500 /* async deschedule */
501 if (!list_empty(&ep->schedule)) {
502 list_del_init(&ep->schedule);
503 return;
504 }
505
506
507 if (ep->interval) {
508 /* periodic deschedule */
509 DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval,
510 ep, ep->branch, ep->load,
511 isp1362_hcd->load[ep->branch],
512 isp1362_hcd->load[ep->branch] - ep->load);
513 isp1362_hcd->load[ep->branch] -= ep->load;
514 ep->branch = PERIODIC_SIZE;
515 }
516}
517
518/*
519 * Analyze transfer results, handle partial transfers and errors
520*/
521static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep)
522{
523 struct urb *urb = get_urb(ep);
524 struct usb_device *udev;
525 struct ptd *ptd;
526 int short_ok;
527 u16 len;
528 int urbstat = -EINPROGRESS;
529 u8 cc;
530
531 DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req);
532
533 udev = urb->dev;
534 ptd = &ep->ptd;
535 cc = PTD_GET_CC(ptd);
536 if (cc == PTD_NOTACCESSED) {
537 pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__,
538 ep->num_req, ptd);
539 cc = PTD_DEVNOTRESP;
540 }
541
542 short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK);
543 len = urb->transfer_buffer_length - urb->actual_length;
544
545 /* Data underrun is special. For allowed underrun
546 we clear the error and continue as normal. For
547 forbidden underrun we finish the DATA stage
548 immediately while for control transfer,
549 we do a STATUS stage.
550 */
551 if (cc == PTD_DATAUNDERRUN) {
552 if (short_ok) {
553 DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n",
554 __func__, ep->num_req, short_ok ? "" : "not_",
555 PTD_GET_COUNT(ptd), ep->maxpacket, len);
556 cc = PTD_CC_NOERROR;
557 urbstat = 0;
558 } else {
559 DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n",
560 __func__, ep->num_req,
561 usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid,
562 short_ok ? "" : "not_",
563 PTD_GET_COUNT(ptd), ep->maxpacket, len);
564 if (usb_pipecontrol(urb->pipe)) {
565 ep->nextpid = USB_PID_ACK;
566 /* save the data underrun error code for later and
567 * procede with the status stage
568 */
569 urb->actual_length += PTD_GET_COUNT(ptd);
570 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
571
572 if (urb->status == -EINPROGRESS)
573 urb->status = cc_to_error[PTD_DATAUNDERRUN];
574 } else {
575 usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT,
576 PTD_GET_TOGGLE(ptd));
577 urbstat = cc_to_error[PTD_DATAUNDERRUN];
578 }
579 goto out;
580 }
581 }
582
583 if (cc != PTD_CC_NOERROR) {
584 if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) {
585 urbstat = cc_to_error[cc];
586 DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n",
587 __func__, ep->num_req, ep->nextpid, urbstat, cc,
588 ep->error_count);
589 }
590 goto out;
591 }
592
593 switch (ep->nextpid) {
594 case USB_PID_OUT:
595 if (PTD_GET_COUNT(ptd) != ep->length)
596 pr_err("%s: count=%d len=%d\n", __func__,
597 PTD_GET_COUNT(ptd), ep->length);
598 BUG_ON(PTD_GET_COUNT(ptd) != ep->length);
599 urb->actual_length += ep->length;
600 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
601 usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd));
602 if (urb->actual_length == urb->transfer_buffer_length) {
603 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
604 ep->num_req, len, ep->maxpacket, urbstat);
605 if (usb_pipecontrol(urb->pipe)) {
606 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
607 ep->num_req,
608 usb_pipein(urb->pipe) ? "IN" : "OUT");
609 ep->nextpid = USB_PID_ACK;
610 } else {
611 if (len % ep->maxpacket ||
612 !(urb->transfer_flags & URB_ZERO_PACKET)) {
613 urbstat = 0;
614 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
615 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
616 urbstat, len, ep->maxpacket, urb->actual_length);
617 }
618 }
619 }
620 break;
621 case USB_PID_IN:
622 len = PTD_GET_COUNT(ptd);
623 BUG_ON(len > ep->length);
624 urb->actual_length += len;
625 BUG_ON(urb->actual_length > urb->transfer_buffer_length);
626 usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd));
627 /* if transfer completed or (allowed) data underrun */
628 if ((urb->transfer_buffer_length == urb->actual_length) ||
629 len % ep->maxpacket) {
630 DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__,
631 ep->num_req, len, ep->maxpacket, urbstat);
632 if (usb_pipecontrol(urb->pipe)) {
633 DBG(3, "%s: req %d %s Wait for ACK\n", __func__,
634 ep->num_req,
635 usb_pipein(urb->pipe) ? "IN" : "OUT");
636 ep->nextpid = USB_PID_ACK;
637 } else {
638 urbstat = 0;
639 DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n",
640 __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT",
641 urbstat, len, ep->maxpacket, urb->actual_length);
642 }
643 }
644 break;
645 case USB_PID_SETUP:
646 if (urb->transfer_buffer_length == urb->actual_length) {
647 ep->nextpid = USB_PID_ACK;
648 } else if (usb_pipeout(urb->pipe)) {
649 usb_settoggle(udev, 0, 1, 1);
650 ep->nextpid = USB_PID_OUT;
651 } else {
652 usb_settoggle(udev, 0, 0, 1);
653 ep->nextpid = USB_PID_IN;
654 }
655 break;
656 case USB_PID_ACK:
657 DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req,
658 urbstat);
659 WARN_ON(urbstat != -EINPROGRESS);
660 urbstat = 0;
661 ep->nextpid = 0;
662 break;
663 default:
664 BUG_ON(1);
665 }
666
667 out:
668 if (urbstat != -EINPROGRESS) {
669 DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__,
670 ep, ep->num_req, urb, urbstat);
671 finish_request(isp1362_hcd, ep, urb, urbstat);
672 }
673}
674
675static void finish_unlinks(struct isp1362_hcd *isp1362_hcd)
676{
677 struct isp1362_ep *ep;
678 struct isp1362_ep *tmp;
679
680 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) {
681 struct isp1362_ep_queue *epq =
682 get_ptd_queue(isp1362_hcd, ep->ptd_offset);
683 int index = ep->ptd_index;
684
685 BUG_ON(epq == NULL);
686 if (index >= 0) {
687 DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset);
688 BUG_ON(ep->num_ptds == 0);
689 release_ptd_buffers(epq, ep);
690 }
691 if (!list_empty(&ep->hep->urb_list)) {
692 struct urb *urb = get_urb(ep);
693
694 DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__,
695 ep->num_req, ep);
696 finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN);
697 }
698 WARN_ON(list_empty(&ep->active));
699 if (!list_empty(&ep->active)) {
700 list_del_init(&ep->active);
701 DBG(1, "%s: ep %p removed from active list\n", __func__, ep);
702 }
703 list_del_init(&ep->remove_list);
704 DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep);
705 }
706 DBG(1, "%s: Done\n", __func__);
707}
708
709static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count)
710{
711 if (count > 0) {
712 if (count < isp1362_hcd->atl_queue.ptd_count)
713 isp1362_write_reg16(isp1362_hcd, HCATLDTC, count);
714 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
715 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map);
716 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
717 } else
718 isp1362_enable_int(isp1362_hcd, HCuPINT_SOF);
719}
720
721static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd)
722{
723 isp1362_enable_int(isp1362_hcd, HCuPINT_INTL);
724 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
725 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map);
726}
727
728static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip)
729{
730 isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0);
731 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ?
732 HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL);
733}
734
735static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb,
736 struct isp1362_ep *ep, struct isp1362_ep_queue *epq)
737{
738 int index = epq->free_ptd;
739
740 prepare_ptd(isp1362_hcd, urb, ep, epq, 0);
741 index = claim_ptd_buffers(epq, ep, ep->length);
742 if (index == -ENOMEM) {
743 DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__,
744 ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map);
745 return index;
746 } else if (index == -EOVERFLOW) {
747 DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n",
748 __func__, ep->num_req, ep->length, epq->name, ep->num_ptds,
749 epq->buf_map, epq->skip_map);
750 return index;
751 } else
752 BUG_ON(index < 0);
753 list_add_tail(&ep->active, &epq->active);
754 DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__,
755 ep, ep->num_req, ep->length, &epq->active);
756 DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name,
757 ep->ptd_offset, ep, ep->num_req);
758 isp1362_write_ptd(isp1362_hcd, ep, epq);
759 __clear_bit(ep->ptd_index, &epq->skip_map);
760
761 return 0;
762}
763
764static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd)
765{
766 int ptd_count = 0;
767 struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue;
768 struct isp1362_ep *ep;
769 int defer = 0;
770
771 if (atomic_read(&epq->finishing)) {
772 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
773 return;
774 }
775
776 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
777 struct urb *urb = get_urb(ep);
778 int ret;
779
780 if (!list_empty(&ep->active)) {
781 DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep);
782 continue;
783 }
784
785 DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name,
786 ep, ep->num_req);
787
788 ret = submit_req(isp1362_hcd, urb, ep, epq);
789 if (ret == -ENOMEM) {
790 defer = 1;
791 break;
792 } else if (ret == -EOVERFLOW) {
793 defer = 1;
794 continue;
795 }
796#ifdef BUGGY_PXA2XX_UDC_USBTEST
797 defer = ep->nextpid == USB_PID_SETUP;
798#endif
799 ptd_count++;
800 }
801
802 /* Avoid starving of endpoints */
803 if (isp1362_hcd->async.next != isp1362_hcd->async.prev) {
804 DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count);
805 list_move(&isp1362_hcd->async, isp1362_hcd->async.next);
806 }
807 if (ptd_count || defer)
808 enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count);
809
810 epq->ptd_count += ptd_count;
811 if (epq->ptd_count > epq->stat_maxptds) {
812 epq->stat_maxptds = epq->ptd_count;
813 DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds);
814 }
815}
816
817static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd)
818{
819 int ptd_count = 0;
820 struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue;
821 struct isp1362_ep *ep;
822
823 if (atomic_read(&epq->finishing)) {
824 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
825 return;
826 }
827
828 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
829 struct urb *urb = get_urb(ep);
830 int ret;
831
832 if (!list_empty(&ep->active)) {
833 DBG(1, "%s: Skipping active %s ep %p\n", __func__,
834 epq->name, ep);
835 continue;
836 }
837
838 DBG(1, "%s: Processing %s ep %p req %d\n", __func__,
839 epq->name, ep, ep->num_req);
840 ret = submit_req(isp1362_hcd, urb, ep, epq);
841 if (ret == -ENOMEM)
842 break;
843 else if (ret == -EOVERFLOW)
844 continue;
845 ptd_count++;
846 }
847
848 if (ptd_count) {
849 static int last_count;
850
851 if (ptd_count != last_count) {
852 DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count);
853 last_count = ptd_count;
854 }
855 enable_intl_transfers(isp1362_hcd);
856 }
857
858 epq->ptd_count += ptd_count;
859 if (epq->ptd_count > epq->stat_maxptds)
860 epq->stat_maxptds = epq->ptd_count;
861}
862
863static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep)
864{
865 u16 ptd_offset = ep->ptd_offset;
866 int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size;
867
868 DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset,
869 ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size);
870
871 ptd_offset += num_ptds * epq->blk_size;
872 if (ptd_offset < epq->buf_start + epq->buf_size)
873 return ptd_offset;
874 else
875 return -ENOMEM;
876}
877
878static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd)
879{
880 int ptd_count = 0;
881 int flip = isp1362_hcd->istl_flip;
882 struct isp1362_ep_queue *epq;
883 int ptd_offset;
884 struct isp1362_ep *ep;
885 struct isp1362_ep *tmp;
886 u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
887
888 fill2:
889 epq = &isp1362_hcd->istl_queue[flip];
890 if (atomic_read(&epq->finishing)) {
891 DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name);
892 return;
893 }
894
895 if (!list_empty(&epq->active))
896 return;
897
898 ptd_offset = epq->buf_start;
899 list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) {
900 struct urb *urb = get_urb(ep);
901 s16 diff = fno - (u16)urb->start_frame;
902
903 DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep);
904
905 if (diff > urb->number_of_packets) {
906 /* time frame for this URB has elapsed */
907 finish_request(isp1362_hcd, ep, urb, -EOVERFLOW);
908 continue;
909 } else if (diff < -1) {
910 /* URB is not due in this frame or the next one.
911 * Comparing with '-1' instead of '0' accounts for double
912 * buffering in the ISP1362 which enables us to queue the PTD
913 * one frame ahead of time
914 */
915 } else if (diff == -1) {
916 /* submit PTD's that are due in the next frame */
917 prepare_ptd(isp1362_hcd, urb, ep, epq, fno);
918 if (ptd_offset + PTD_HEADER_SIZE + ep->length >
919 epq->buf_start + epq->buf_size) {
920 pr_err("%s: Not enough ISO buffer space for %d byte PTD\n",
921 __func__, ep->length);
922 continue;
923 }
924 ep->ptd_offset = ptd_offset;
925 list_add_tail(&ep->active, &epq->active);
926
927 ptd_offset = next_ptd(epq, ep);
928 if (ptd_offset < 0) {
929 pr_warning("%s: req %d No more %s PTD buffers available\n", __func__,
930 ep->num_req, epq->name);
931 break;
932 }
933 }
934 }
935 list_for_each_entry(ep, &epq->active, active) {
936 if (epq->active.next == &ep->active)
937 ep->ptd.mps |= PTD_LAST_MSK;
938 isp1362_write_ptd(isp1362_hcd, ep, epq);
939 ptd_count++;
940 }
941
942 if (ptd_count)
943 enable_istl_transfers(isp1362_hcd, flip);
944
945 epq->ptd_count += ptd_count;
946 if (epq->ptd_count > epq->stat_maxptds)
947 epq->stat_maxptds = epq->ptd_count;
948
949 /* check, whether the second ISTL buffer may also be filled */
950 if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
951 (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) {
952 fno++;
953 ptd_count = 0;
954 flip = 1 - flip;
955 goto fill2;
956 }
957}
958
959static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map,
960 struct isp1362_ep_queue *epq)
961{
962 struct isp1362_ep *ep;
963 struct isp1362_ep *tmp;
964
965 if (list_empty(&epq->active)) {
966 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
967 return;
968 }
969
970 DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map);
971
972 atomic_inc(&epq->finishing);
973 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
974 int index = ep->ptd_index;
975
976 DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name,
977 index, ep->ptd_offset);
978
979 BUG_ON(index < 0);
980 if (__test_and_clear_bit(index, &done_map)) {
981 isp1362_read_ptd(isp1362_hcd, ep, epq);
982 epq->free_ptd = index;
983 BUG_ON(ep->num_ptds == 0);
984 release_ptd_buffers(epq, ep);
985
986 DBG(1, "%s: ep %p req %d removed from active list\n", __func__,
987 ep, ep->num_req);
988 if (!list_empty(&ep->remove_list)) {
989 list_del_init(&ep->remove_list);
990 DBG(1, "%s: ep %p removed from remove list\n", __func__, ep);
991 }
992 DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name,
993 ep, ep->num_req);
994 postproc_ep(isp1362_hcd, ep);
995 }
996 if (!done_map)
997 break;
998 }
999 if (done_map)
1000 pr_warning("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map,
1001 epq->skip_map);
1002 atomic_dec(&epq->finishing);
1003}
1004
1005static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq)
1006{
1007 struct isp1362_ep *ep;
1008 struct isp1362_ep *tmp;
1009
1010 if (list_empty(&epq->active)) {
1011 DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name);
1012 return;
1013 }
1014
1015 DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name);
1016
1017 atomic_inc(&epq->finishing);
1018 list_for_each_entry_safe(ep, tmp, &epq->active, active) {
1019 DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset);
1020
1021 isp1362_read_ptd(isp1362_hcd, ep, epq);
1022 DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep);
1023 postproc_ep(isp1362_hcd, ep);
1024 }
1025 WARN_ON(epq->blk_size != 0);
1026 atomic_dec(&epq->finishing);
1027}
1028
1029static irqreturn_t isp1362_irq(struct usb_hcd *hcd)
1030{
1031 int handled = 0;
1032 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1033 u16 irqstat;
1034 u16 svc_mask;
1035
1036 spin_lock(&isp1362_hcd->lock);
1037
1038 BUG_ON(isp1362_hcd->irq_active++);
1039
1040 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1041
1042 irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT);
1043 DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb);
1044
1045 /* only handle interrupts that are currently enabled */
1046 irqstat &= isp1362_hcd->irqenb;
1047 isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat);
1048 svc_mask = irqstat;
1049
1050 if (irqstat & HCuPINT_SOF) {
1051 isp1362_hcd->irqenb &= ~HCuPINT_SOF;
1052 isp1362_hcd->irq_stat[ISP1362_INT_SOF]++;
1053 handled = 1;
1054 svc_mask &= ~HCuPINT_SOF;
1055 DBG(3, "%s: SOF\n", __func__);
1056 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1057 if (!list_empty(&isp1362_hcd->remove_list))
1058 finish_unlinks(isp1362_hcd);
1059 if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) {
1060 if (list_empty(&isp1362_hcd->atl_queue.active)) {
1061 start_atl_transfers(isp1362_hcd);
1062 } else {
1063 isp1362_enable_int(isp1362_hcd, HCuPINT_ATL);
1064 isp1362_write_reg32(isp1362_hcd, HCATLSKIP,
1065 isp1362_hcd->atl_queue.skip_map);
1066 isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1067 }
1068 }
1069 }
1070
1071 if (irqstat & HCuPINT_ISTL0) {
1072 isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++;
1073 handled = 1;
1074 svc_mask &= ~HCuPINT_ISTL0;
1075 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL);
1076 DBG(1, "%s: ISTL0\n", __func__);
1077 WARN_ON((int)!!isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001078 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1079 HCBUFSTAT_ISTL0_ACTIVE);
1080 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1081 HCBUFSTAT_ISTL0_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001082 isp1362_hcd->irqenb &= ~HCuPINT_ISTL0;
1083 }
1084
1085 if (irqstat & HCuPINT_ISTL1) {
1086 isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++;
1087 handled = 1;
1088 svc_mask &= ~HCuPINT_ISTL1;
1089 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL);
1090 DBG(1, "%s: ISTL1\n", __func__);
1091 WARN_ON(!(int)isp1362_hcd->istl_flip);
Julia Lawall3d2b0812009-08-12 16:51:09 +02001092 WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1093 HCBUFSTAT_ISTL1_ACTIVE);
1094 WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) &
1095 HCBUFSTAT_ISTL1_DONE));
Lothar Wassmanna9d43092009-07-16 20:51:21 -04001096 isp1362_hcd->irqenb &= ~HCuPINT_ISTL1;
1097 }
1098
1099 if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) {
1100 WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) ==
1101 (HCuPINT_ISTL0 | HCuPINT_ISTL1));
1102 finish_iso_transfers(isp1362_hcd,
1103 &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]);
1104 start_iso_transfers(isp1362_hcd);
1105 isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip;
1106 }
1107
1108 if (irqstat & HCuPINT_INTL) {
1109 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1110 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP);
1111 isp1362_hcd->irq_stat[ISP1362_INT_INTL]++;
1112
1113 DBG(2, "%s: INTL\n", __func__);
1114
1115 svc_mask &= ~HCuPINT_INTL;
1116
1117 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map);
1118 if (~(done_map | skip_map) == 0)
1119 /* All PTDs are finished, disable INTL processing entirely */
1120 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE);
1121
1122 handled = 1;
1123 WARN_ON(!done_map);
1124 if (done_map) {
1125 DBG(3, "%s: INTL done_map %08x\n", __func__, done_map);
1126 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1127 start_intl_transfers(isp1362_hcd);
1128 }
1129 }
1130
1131 if (irqstat & HCuPINT_ATL) {
1132 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1133 u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP);
1134 isp1362_hcd->irq_stat[ISP1362_INT_ATL]++;
1135
1136 DBG(2, "%s: ATL\n", __func__);
1137
1138 svc_mask &= ~HCuPINT_ATL;
1139
1140 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map);
1141 if (~(done_map | skip_map) == 0)
1142 isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE);
1143 if (done_map) {
1144 DBG(3, "%s: ATL done_map %08x\n", __func__, done_map);
1145 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1146 start_atl_transfers(isp1362_hcd);
1147 }
1148 handled = 1;
1149 }
1150
1151 if (irqstat & HCuPINT_OPR) {
1152 u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT);
1153 isp1362_hcd->irq_stat[ISP1362_INT_OPR]++;
1154
1155 svc_mask &= ~HCuPINT_OPR;
1156 DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb);
1157 intstat &= isp1362_hcd->intenb;
1158 if (intstat & OHCI_INTR_UE) {
1159 pr_err("Unrecoverable error\n");
1160 /* FIXME: do here reset or cleanup or whatever */
1161 }
1162 if (intstat & OHCI_INTR_RHSC) {
1163 isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS);
1164 isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1);
1165 isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2);
1166 }
1167 if (intstat & OHCI_INTR_RD) {
1168 pr_info("%s: RESUME DETECTED\n", __func__);
1169 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1170 usb_hcd_resume_root_hub(hcd);
1171 }
1172 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat);
1173 irqstat &= ~HCuPINT_OPR;
1174 handled = 1;
1175 }
1176
1177 if (irqstat & HCuPINT_SUSP) {
1178 isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++;
1179 handled = 1;
1180 svc_mask &= ~HCuPINT_SUSP;
1181
1182 pr_info("%s: SUSPEND IRQ\n", __func__);
1183 }
1184
1185 if (irqstat & HCuPINT_CLKRDY) {
1186 isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++;
1187 handled = 1;
1188 isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY;
1189 svc_mask &= ~HCuPINT_CLKRDY;
1190 pr_info("%s: CLKRDY IRQ\n", __func__);
1191 }
1192
1193 if (svc_mask)
1194 pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask);
1195
1196 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
1197 isp1362_hcd->irq_active--;
1198 spin_unlock(&isp1362_hcd->lock);
1199
1200 return IRQ_RETVAL(handled);
1201}
1202
1203/*-------------------------------------------------------------------------*/
1204
1205#define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */
1206static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load)
1207{
1208 int i, branch = -ENOSPC;
1209
1210 /* search for the least loaded schedule branch of that interval
1211 * which has enough bandwidth left unreserved.
1212 */
1213 for (i = 0; i < interval; i++) {
1214 if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) {
1215 int j;
1216
1217 for (j = i; j < PERIODIC_SIZE; j += interval) {
1218 if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) {
1219 pr_err("%s: new load %d load[%02x] %d max %d\n", __func__,
1220 load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD);
1221 break;
1222 }
1223 }
1224 if (j < PERIODIC_SIZE)
1225 continue;
1226 branch = i;
1227 }
1228 }
1229 return branch;
1230}
1231
1232/* NB! ALL the code above this point runs with isp1362_hcd->lock
1233 held, irqs off
1234*/
1235
1236/*-------------------------------------------------------------------------*/
1237
1238static int isp1362_urb_enqueue(struct usb_hcd *hcd,
1239 struct urb *urb,
1240 gfp_t mem_flags)
1241{
1242 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1243 struct usb_device *udev = urb->dev;
1244 unsigned int pipe = urb->pipe;
1245 int is_out = !usb_pipein(pipe);
1246 int type = usb_pipetype(pipe);
1247 int epnum = usb_pipeendpoint(pipe);
1248 struct usb_host_endpoint *hep = urb->ep;
1249 struct isp1362_ep *ep = NULL;
1250 unsigned long flags;
1251 int retval = 0;
1252
1253 DBG(3, "%s: urb %p\n", __func__, urb);
1254
1255 if (type == PIPE_ISOCHRONOUS) {
1256 pr_err("Isochronous transfers not supported\n");
1257 return -ENOSPC;
1258 }
1259
1260 URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__,
1261 usb_pipedevice(pipe), epnum,
1262 is_out ? "out" : "in",
1263 usb_pipecontrol(pipe) ? "ctrl" :
1264 usb_pipeint(pipe) ? "int" :
1265 usb_pipebulk(pipe) ? "bulk" :
1266 "iso",
1267 urb->transfer_buffer_length,
1268 (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "",
1269 !(urb->transfer_flags & URB_SHORT_NOT_OK) ?
1270 "short_ok" : "");
1271
1272 /* avoid all allocations within spinlocks: request or endpoint */
1273 if (!hep->hcpriv) {
1274 ep = kcalloc(1, sizeof *ep, mem_flags);
1275 if (!ep)
1276 return -ENOMEM;
1277 }
1278 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1279
1280 /* don't submit to a dead or disabled port */
1281 if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) &
1282 (1 << USB_PORT_FEAT_ENABLE)) ||
1283 !HC_IS_RUNNING(hcd->state)) {
1284 kfree(ep);
1285 retval = -ENODEV;
1286 goto fail_not_linked;
1287 }
1288
1289 retval = usb_hcd_link_urb_to_ep(hcd, urb);
1290 if (retval) {
1291 kfree(ep);
1292 goto fail_not_linked;
1293 }
1294
1295 if (hep->hcpriv) {
1296 ep = hep->hcpriv;
1297 } else {
1298 INIT_LIST_HEAD(&ep->schedule);
1299 INIT_LIST_HEAD(&ep->active);
1300 INIT_LIST_HEAD(&ep->remove_list);
1301 ep->udev = usb_get_dev(udev);
1302 ep->hep = hep;
1303 ep->epnum = epnum;
1304 ep->maxpacket = usb_maxpacket(udev, urb->pipe, is_out);
1305 ep->ptd_offset = -EINVAL;
1306 ep->ptd_index = -EINVAL;
1307 usb_settoggle(udev, epnum, is_out, 0);
1308
1309 if (type == PIPE_CONTROL)
1310 ep->nextpid = USB_PID_SETUP;
1311 else if (is_out)
1312 ep->nextpid = USB_PID_OUT;
1313 else
1314 ep->nextpid = USB_PID_IN;
1315
1316 switch (type) {
1317 case PIPE_ISOCHRONOUS:
1318 case PIPE_INTERRUPT:
1319 if (urb->interval > PERIODIC_SIZE)
1320 urb->interval = PERIODIC_SIZE;
1321 ep->interval = urb->interval;
1322 ep->branch = PERIODIC_SIZE;
1323 ep->load = usb_calc_bus_time(udev->speed, !is_out,
1324 (type == PIPE_ISOCHRONOUS),
1325 usb_maxpacket(udev, pipe, is_out)) / 1000;
1326 break;
1327 }
1328 hep->hcpriv = ep;
1329 }
1330 ep->num_req = isp1362_hcd->req_serial++;
1331
1332 /* maybe put endpoint into schedule */
1333 switch (type) {
1334 case PIPE_CONTROL:
1335 case PIPE_BULK:
1336 if (list_empty(&ep->schedule)) {
1337 DBG(1, "%s: Adding ep %p req %d to async schedule\n",
1338 __func__, ep, ep->num_req);
1339 list_add_tail(&ep->schedule, &isp1362_hcd->async);
1340 }
1341 break;
1342 case PIPE_ISOCHRONOUS:
1343 case PIPE_INTERRUPT:
1344 urb->interval = ep->interval;
1345
1346 /* urb submitted for already existing EP */
1347 if (ep->branch < PERIODIC_SIZE)
1348 break;
1349
1350 retval = balance(isp1362_hcd, ep->interval, ep->load);
1351 if (retval < 0) {
1352 pr_err("%s: balance returned %d\n", __func__, retval);
1353 goto fail;
1354 }
1355 ep->branch = retval;
1356 retval = 0;
1357 isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1358 DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n",
1359 __func__, isp1362_hcd->fmindex, ep->branch,
1360 ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) &
1361 ~(PERIODIC_SIZE - 1)) + ep->branch,
1362 (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch);
1363
1364 if (list_empty(&ep->schedule)) {
1365 if (type == PIPE_ISOCHRONOUS) {
1366 u16 frame = isp1362_hcd->fmindex;
1367
1368 frame += max_t(u16, 8, ep->interval);
1369 frame &= ~(ep->interval - 1);
1370 frame |= ep->branch;
1371 if (frame_before(frame, isp1362_hcd->fmindex))
1372 frame += ep->interval;
1373 urb->start_frame = frame;
1374
1375 DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep);
1376 list_add_tail(&ep->schedule, &isp1362_hcd->isoc);
1377 } else {
1378 DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep);
1379 list_add_tail(&ep->schedule, &isp1362_hcd->periodic);
1380 }
1381 } else
1382 DBG(1, "%s: ep %p already scheduled\n", __func__, ep);
1383
1384 DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__,
1385 ep->load / ep->interval, isp1362_hcd->load[ep->branch],
1386 isp1362_hcd->load[ep->branch] + ep->load);
1387 isp1362_hcd->load[ep->branch] += ep->load;
1388 }
1389
1390 urb->hcpriv = hep;
1391 ALIGNSTAT(isp1362_hcd, urb->transfer_buffer);
1392
1393 switch (type) {
1394 case PIPE_CONTROL:
1395 case PIPE_BULK:
1396 start_atl_transfers(isp1362_hcd);
1397 break;
1398 case PIPE_INTERRUPT:
1399 start_intl_transfers(isp1362_hcd);
1400 break;
1401 case PIPE_ISOCHRONOUS:
1402 start_iso_transfers(isp1362_hcd);
1403 break;
1404 default:
1405 BUG();
1406 }
1407 fail:
1408 if (retval)
1409 usb_hcd_unlink_urb_from_ep(hcd, urb);
1410
1411
1412 fail_not_linked:
1413 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1414 if (retval)
1415 DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval);
1416 return retval;
1417}
1418
1419static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1420{
1421 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1422 struct usb_host_endpoint *hep;
1423 unsigned long flags;
1424 struct isp1362_ep *ep;
1425 int retval = 0;
1426
1427 DBG(3, "%s: urb %p\n", __func__, urb);
1428
1429 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1430 retval = usb_hcd_check_unlink_urb(hcd, urb, status);
1431 if (retval)
1432 goto done;
1433
1434 hep = urb->hcpriv;
1435
1436 if (!hep) {
1437 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1438 return -EIDRM;
1439 }
1440
1441 ep = hep->hcpriv;
1442 if (ep) {
1443 /* In front of queue? */
1444 if (ep->hep->urb_list.next == &urb->urb_list) {
1445 if (!list_empty(&ep->active)) {
1446 DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__,
1447 urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1448 /* disable processing and queue PTD for removal */
1449 remove_ptd(isp1362_hcd, ep);
1450 urb = NULL;
1451 }
1452 }
1453 if (urb) {
1454 DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep,
1455 ep->num_req);
1456 finish_request(isp1362_hcd, ep, urb, status);
1457 } else
1458 DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb);
1459 } else {
1460 pr_warning("%s: No EP in URB %p\n", __func__, urb);
1461 retval = -EINVAL;
1462 }
1463done:
1464 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1465
1466 DBG(3, "%s: exit\n", __func__);
1467
1468 return retval;
1469}
1470
1471static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
1472{
1473 struct isp1362_ep *ep = hep->hcpriv;
1474 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1475 unsigned long flags;
1476
1477 DBG(1, "%s: ep %p\n", __func__, ep);
1478 if (!ep)
1479 return;
1480 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1481 if (!list_empty(&hep->urb_list)) {
1482 if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) {
1483 DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__,
1484 ep, ep->num_req, ep->ptd_index, ep->ptd_offset);
1485 remove_ptd(isp1362_hcd, ep);
1486 pr_info("%s: Waiting for Interrupt to clean up\n", __func__);
1487 }
1488 }
1489 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1490 /* Wait for interrupt to clear out active list */
1491 while (!list_empty(&ep->active))
1492 msleep(1);
1493
1494 DBG(1, "%s: Freeing EP %p\n", __func__, ep);
1495
1496 usb_put_dev(ep->udev);
1497 kfree(ep);
1498 hep->hcpriv = NULL;
1499}
1500
1501static int isp1362_get_frame(struct usb_hcd *hcd)
1502{
1503 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1504 u32 fmnum;
1505 unsigned long flags;
1506
1507 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1508 fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM);
1509 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1510
1511 return (int)fmnum;
1512}
1513
1514/*-------------------------------------------------------------------------*/
1515
1516/* Adapted from ohci-hub.c */
1517static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf)
1518{
1519 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1520 int ports, i, changed = 0;
1521 unsigned long flags;
1522
1523 if (!HC_IS_RUNNING(hcd->state))
1524 return -ESHUTDOWN;
1525
1526 /* Report no status change now, if we are scheduled to be
1527 called later */
1528 if (timer_pending(&hcd->rh_timer))
1529 return 0;
1530
1531 ports = isp1362_hcd->rhdesca & RH_A_NDP;
1532 BUG_ON(ports > 2);
1533
1534 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1535 /* init status */
1536 if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC))
1537 buf[0] = changed = 1;
1538 else
1539 buf[0] = 0;
1540
1541 for (i = 0; i < ports; i++) {
1542 u32 status = isp1362_hcd->rhport[i];
1543
1544 if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC |
1545 RH_PS_OCIC | RH_PS_PRSC)) {
1546 changed = 1;
1547 buf[0] |= 1 << (i + 1);
1548 continue;
1549 }
1550
1551 if (!(status & RH_PS_CCS))
1552 continue;
1553 }
1554 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1555 return changed;
1556}
1557
1558static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd,
1559 struct usb_hub_descriptor *desc)
1560{
1561 u32 reg = isp1362_hcd->rhdesca;
1562
1563 DBG(3, "%s: enter\n", __func__);
1564
1565 desc->bDescriptorType = 0x29;
1566 desc->bDescLength = 9;
1567 desc->bHubContrCurrent = 0;
1568 desc->bNbrPorts = reg & 0x3;
1569 /* Power switching, device type, overcurrent. */
1570 desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & 0x1f);
1571 DBG(0, "%s: hubcharacteristics = %02x\n", __func__, cpu_to_le16((reg >> 8) & 0x1f));
1572 desc->bPwrOn2PwrGood = (reg >> 24) & 0xff;
1573 /* two bitmaps: ports removable, and legacy PortPwrCtrlMask */
1574 desc->bitmap[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1;
1575 desc->bitmap[1] = ~0;
1576
1577 DBG(3, "%s: exit\n", __func__);
1578}
1579
1580/* Adapted from ohci-hub.c */
1581static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1582 u16 wIndex, char *buf, u16 wLength)
1583{
1584 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1585 int retval = 0;
1586 unsigned long flags;
1587 unsigned long t1;
1588 int ports = isp1362_hcd->rhdesca & RH_A_NDP;
1589 u32 tmp = 0;
1590
1591 switch (typeReq) {
1592 case ClearHubFeature:
1593 DBG(0, "ClearHubFeature: ");
1594 switch (wValue) {
1595 case C_HUB_OVER_CURRENT:
1596 _DBG(0, "C_HUB_OVER_CURRENT\n");
1597 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1598 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
1599 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1600 case C_HUB_LOCAL_POWER:
1601 _DBG(0, "C_HUB_LOCAL_POWER\n");
1602 break;
1603 default:
1604 goto error;
1605 }
1606 break;
1607 case SetHubFeature:
1608 DBG(0, "SetHubFeature: ");
1609 switch (wValue) {
1610 case C_HUB_OVER_CURRENT:
1611 case C_HUB_LOCAL_POWER:
1612 _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
1613 break;
1614 default:
1615 goto error;
1616 }
1617 break;
1618 case GetHubDescriptor:
1619 DBG(0, "GetHubDescriptor\n");
1620 isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf);
1621 break;
1622 case GetHubStatus:
1623 DBG(0, "GetHubStatus\n");
1624 put_unaligned(cpu_to_le32(0), (__le32 *) buf);
1625 break;
1626 case GetPortStatus:
1627#ifndef VERBOSE
1628 DBG(0, "GetPortStatus\n");
1629#endif
1630 if (!wIndex || wIndex > ports)
1631 goto error;
1632 tmp = isp1362_hcd->rhport[--wIndex];
1633 put_unaligned(cpu_to_le32(tmp), (__le32 *) buf);
1634 break;
1635 case ClearPortFeature:
1636 DBG(0, "ClearPortFeature: ");
1637 if (!wIndex || wIndex > ports)
1638 goto error;
1639 wIndex--;
1640
1641 switch (wValue) {
1642 case USB_PORT_FEAT_ENABLE:
1643 _DBG(0, "USB_PORT_FEAT_ENABLE\n");
1644 tmp = RH_PS_CCS;
1645 break;
1646 case USB_PORT_FEAT_C_ENABLE:
1647 _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
1648 tmp = RH_PS_PESC;
1649 break;
1650 case USB_PORT_FEAT_SUSPEND:
1651 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1652 tmp = RH_PS_POCI;
1653 break;
1654 case USB_PORT_FEAT_C_SUSPEND:
1655 _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
1656 tmp = RH_PS_PSSC;
1657 break;
1658 case USB_PORT_FEAT_POWER:
1659 _DBG(0, "USB_PORT_FEAT_POWER\n");
1660 tmp = RH_PS_LSDA;
1661
1662 break;
1663 case USB_PORT_FEAT_C_CONNECTION:
1664 _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
1665 tmp = RH_PS_CSC;
1666 break;
1667 case USB_PORT_FEAT_C_OVER_CURRENT:
1668 _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
1669 tmp = RH_PS_OCIC;
1670 break;
1671 case USB_PORT_FEAT_C_RESET:
1672 _DBG(0, "USB_PORT_FEAT_C_RESET\n");
1673 tmp = RH_PS_PRSC;
1674 break;
1675 default:
1676 goto error;
1677 }
1678
1679 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1680 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp);
1681 isp1362_hcd->rhport[wIndex] =
1682 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1683 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1684 break;
1685 case SetPortFeature:
1686 DBG(0, "SetPortFeature: ");
1687 if (!wIndex || wIndex > ports)
1688 goto error;
1689 wIndex--;
1690 switch (wValue) {
1691 case USB_PORT_FEAT_SUSPEND:
1692 _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
1693#ifdef CONFIG_USB_OTG
1694 if (ohci->hcd.self.otg_port == (wIndex + 1) &&
1695 ohci->hcd.self.b_hnp_enable) {
1696 start_hnp(ohci);
1697 break;
1698 }
1699#endif
1700 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1701 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
1702 isp1362_hcd->rhport[wIndex] =
1703 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1704 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1705 break;
1706 case USB_PORT_FEAT_POWER:
1707 _DBG(0, "USB_PORT_FEAT_POWER\n");
1708 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1709 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
1710 isp1362_hcd->rhport[wIndex] =
1711 isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1712 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1713 break;
1714 case USB_PORT_FEAT_RESET:
1715 _DBG(0, "USB_PORT_FEAT_RESET\n");
1716 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1717
1718 t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
1719 while (time_before(jiffies, t1)) {
1720 /* spin until any current reset finishes */
1721 for (;;) {
1722 tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex);
1723 if (!(tmp & RH_PS_PRS))
1724 break;
1725 udelay(500);
1726 }
1727 if (!(tmp & RH_PS_CCS))
1728 break;
1729 /* Reset lasts 10ms (claims datasheet) */
1730 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS));
1731
1732 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1733 msleep(10);
1734 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1735 }
1736
1737 isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd,
1738 HCRHPORT1 + wIndex);
1739 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1740 break;
1741 default:
1742 goto error;
1743 }
1744 break;
1745
1746 default:
1747 error:
1748 /* "protocol stall" on error */
1749 _DBG(0, "PROTOCOL STALL\n");
1750 retval = -EPIPE;
1751 }
1752
1753 return retval;
1754}
1755
1756#ifdef CONFIG_PM
1757static int isp1362_bus_suspend(struct usb_hcd *hcd)
1758{
1759 int status = 0;
1760 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1761 unsigned long flags;
1762
1763 if (time_before(jiffies, isp1362_hcd->next_statechange))
1764 msleep(5);
1765
1766 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1767
1768 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1769 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1770 case OHCI_USB_RESUME:
1771 DBG(0, "%s: resume/suspend?\n", __func__);
1772 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1773 isp1362_hcd->hc_control |= OHCI_USB_RESET;
1774 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1775 /* FALL THROUGH */
1776 case OHCI_USB_RESET:
1777 status = -EBUSY;
1778 pr_warning("%s: needs reinit!\n", __func__);
1779 goto done;
1780 case OHCI_USB_SUSPEND:
1781 pr_warning("%s: already suspended?\n", __func__);
1782 goto done;
1783 }
1784 DBG(0, "%s: suspend root hub\n", __func__);
1785
1786 /* First stop any processing */
1787 hcd->state = HC_STATE_QUIESCING;
1788 if (!list_empty(&isp1362_hcd->atl_queue.active) ||
1789 !list_empty(&isp1362_hcd->intl_queue.active) ||
1790 !list_empty(&isp1362_hcd->istl_queue[0] .active) ||
1791 !list_empty(&isp1362_hcd->istl_queue[1] .active)) {
1792 int limit;
1793
1794 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
1795 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
1796 isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0);
1797 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
1798 isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF);
1799
1800 DBG(0, "%s: stopping schedules ...\n", __func__);
1801 limit = 2000;
1802 while (limit > 0) {
1803 udelay(250);
1804 limit -= 250;
1805 if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF)
1806 break;
1807 }
1808 mdelay(7);
1809 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) {
1810 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE);
1811 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue);
1812 }
1813 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) {
1814 u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE);
1815 finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue);
1816 }
1817 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0)
1818 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]);
1819 if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1)
1820 finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]);
1821 }
1822 DBG(0, "%s: HCINTSTAT: %08x\n", __func__,
1823 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1824 isp1362_write_reg32(isp1362_hcd, HCINTSTAT,
1825 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
1826
1827 /* Suspend hub */
1828 isp1362_hcd->hc_control = OHCI_USB_SUSPEND;
1829 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1830 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1831 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1832
1833#if 1
1834 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1835 if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) {
1836 pr_err("%s: controller won't suspend %08x\n", __func__,
1837 isp1362_hcd->hc_control);
1838 status = -EBUSY;
1839 } else
1840#endif
1841 {
1842 /* no resumes until devices finish suspending */
1843 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5);
1844 }
1845done:
1846 if (status == 0) {
1847 hcd->state = HC_STATE_SUSPENDED;
1848 DBG(0, "%s: HCD suspended: %08x\n", __func__,
1849 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
1850 }
1851 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1852 return status;
1853}
1854
1855static int isp1362_bus_resume(struct usb_hcd *hcd)
1856{
1857 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
1858 u32 port;
1859 unsigned long flags;
1860 int status = -EINPROGRESS;
1861
1862 if (time_before(jiffies, isp1362_hcd->next_statechange))
1863 msleep(5);
1864
1865 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1866 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL);
1867 pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control);
1868 if (hcd->state == HC_STATE_RESUMING) {
1869 pr_warning("%s: duplicate resume\n", __func__);
1870 status = 0;
1871 } else
1872 switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) {
1873 case OHCI_USB_SUSPEND:
1874 DBG(0, "%s: resume root hub\n", __func__);
1875 isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS;
1876 isp1362_hcd->hc_control |= OHCI_USB_RESUME;
1877 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1878 break;
1879 case OHCI_USB_RESUME:
1880 /* HCFS changes sometime after INTR_RD */
1881 DBG(0, "%s: remote wakeup\n", __func__);
1882 break;
1883 case OHCI_USB_OPER:
1884 DBG(0, "%s: odd resume\n", __func__);
1885 status = 0;
1886 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1887 break;
1888 default: /* RESET, we lost power */
1889 DBG(0, "%s: root hub hardware reset\n", __func__);
1890 status = -EBUSY;
1891 }
1892 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1893 if (status == -EBUSY) {
1894 DBG(0, "%s: Restarting HC\n", __func__);
1895 isp1362_hc_stop(hcd);
1896 return isp1362_hc_start(hcd);
1897 }
1898 if (status != -EINPROGRESS)
1899 return status;
1900 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1901 port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP;
1902 while (port--) {
1903 u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port);
1904
1905 /* force global, not selective, resume */
1906 if (!(stat & RH_PS_PSS)) {
1907 DBG(0, "%s: Not Resuming RH port %d\n", __func__, port);
1908 continue;
1909 }
1910 DBG(0, "%s: Resuming RH port %d\n", __func__, port);
1911 isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI);
1912 }
1913 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1914
1915 /* Some controllers (lucent) need extra-long delays */
1916 hcd->state = HC_STATE_RESUMING;
1917 mdelay(20 /* usb 11.5.1.10 */ + 15);
1918
1919 isp1362_hcd->hc_control = OHCI_USB_OPER;
1920 spin_lock_irqsave(&isp1362_hcd->lock, flags);
1921 isp1362_show_reg(isp1362_hcd, HCCONTROL);
1922 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
1923 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
1924 /* TRSMRCY */
1925 msleep(10);
1926
1927 /* keep it alive for ~5x suspend + resume costs */
1928 isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250);
1929
1930 hcd->self.root_hub->dev.power.power_state = PMSG_ON;
1931 hcd->state = HC_STATE_RUNNING;
1932 return 0;
1933}
1934#else
1935#define isp1362_bus_suspend NULL
1936#define isp1362_bus_resume NULL
1937#endif
1938
1939/*-------------------------------------------------------------------------*/
1940
1941#ifdef STUB_DEBUG_FILE
1942
1943static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
1944{
1945}
1946static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
1947{
1948}
1949
1950#else
1951
1952#include <linux/proc_fs.h>
1953#include <linux/seq_file.h>
1954
1955static void dump_irq(struct seq_file *s, char *label, u16 mask)
1956{
1957 seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
1958 mask & HCuPINT_CLKRDY ? " clkrdy" : "",
1959 mask & HCuPINT_SUSP ? " susp" : "",
1960 mask & HCuPINT_OPR ? " opr" : "",
1961 mask & HCuPINT_EOT ? " eot" : "",
1962 mask & HCuPINT_ATL ? " atl" : "",
1963 mask & HCuPINT_SOF ? " sof" : "");
1964}
1965
1966static void dump_int(struct seq_file *s, char *label, u32 mask)
1967{
1968 seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask,
1969 mask & OHCI_INTR_MIE ? " MIE" : "",
1970 mask & OHCI_INTR_RHSC ? " rhsc" : "",
1971 mask & OHCI_INTR_FNO ? " fno" : "",
1972 mask & OHCI_INTR_UE ? " ue" : "",
1973 mask & OHCI_INTR_RD ? " rd" : "",
1974 mask & OHCI_INTR_SF ? " sof" : "",
1975 mask & OHCI_INTR_SO ? " so" : "");
1976}
1977
1978static void dump_ctrl(struct seq_file *s, char *label, u32 mask)
1979{
1980 seq_printf(s, "%-15s %08x%s%s%s\n", label, mask,
1981 mask & OHCI_CTRL_RWC ? " rwc" : "",
1982 mask & OHCI_CTRL_RWE ? " rwe" : "",
1983 ({
1984 char *hcfs;
1985 switch (mask & OHCI_CTRL_HCFS) {
1986 case OHCI_USB_OPER:
1987 hcfs = " oper";
1988 break;
1989 case OHCI_USB_RESET:
1990 hcfs = " reset";
1991 break;
1992 case OHCI_USB_RESUME:
1993 hcfs = " resume";
1994 break;
1995 case OHCI_USB_SUSPEND:
1996 hcfs = " suspend";
1997 break;
1998 default:
1999 hcfs = " ?";
2000 }
2001 hcfs;
2002 }));
2003}
2004
2005static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
2006{
2007 seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION),
2008 isp1362_read_reg32(isp1362_hcd, HCREVISION));
2009 seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL),
2010 isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2011 seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT),
2012 isp1362_read_reg32(isp1362_hcd, HCCMDSTAT));
2013 seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT),
2014 isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2015 seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB),
2016 isp1362_read_reg32(isp1362_hcd, HCINTENB));
2017 seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL),
2018 isp1362_read_reg32(isp1362_hcd, HCFMINTVL));
2019 seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM),
2020 isp1362_read_reg32(isp1362_hcd, HCFMREM));
2021 seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM),
2022 isp1362_read_reg32(isp1362_hcd, HCFMNUM));
2023 seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH),
2024 isp1362_read_reg32(isp1362_hcd, HCLSTHRESH));
2025 seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA),
2026 isp1362_read_reg32(isp1362_hcd, HCRHDESCA));
2027 seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB),
2028 isp1362_read_reg32(isp1362_hcd, HCRHDESCB));
2029 seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS),
2030 isp1362_read_reg32(isp1362_hcd, HCRHSTATUS));
2031 seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1),
2032 isp1362_read_reg32(isp1362_hcd, HCRHPORT1));
2033 seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2),
2034 isp1362_read_reg32(isp1362_hcd, HCRHPORT2));
2035 seq_printf(s, "\n");
2036 seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG),
2037 isp1362_read_reg16(isp1362_hcd, HCHWCFG));
2038 seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG),
2039 isp1362_read_reg16(isp1362_hcd, HCDMACFG));
2040 seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR),
2041 isp1362_read_reg16(isp1362_hcd, HCXFERCTR));
2042 seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT),
2043 isp1362_read_reg16(isp1362_hcd, HCuPINT));
2044 seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB),
2045 isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2046 seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID),
2047 isp1362_read_reg16(isp1362_hcd, HCCHIPID));
2048 seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH),
2049 isp1362_read_reg16(isp1362_hcd, HCSCRATCH));
2050 seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT),
2051 isp1362_read_reg16(isp1362_hcd, HCBUFSTAT));
2052 seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR),
2053 isp1362_read_reg32(isp1362_hcd, HCDIRADDR));
2054#if 0
2055 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA),
2056 isp1362_read_reg16(isp1362_hcd, HCDIRDATA));
2057#endif
2058 seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ),
2059 isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ));
2060 seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE),
2061 isp1362_read_reg16(isp1362_hcd, HCISTLRATE));
2062 seq_printf(s, "\n");
2063 seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ),
2064 isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ));
2065 seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ),
2066 isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ));
2067 seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE),
2068 isp1362_read_reg32(isp1362_hcd, HCINTLDONE));
2069 seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP),
2070 isp1362_read_reg32(isp1362_hcd, HCINTLSKIP));
2071 seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST),
2072 isp1362_read_reg32(isp1362_hcd, HCINTLLAST));
2073 seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR),
2074 isp1362_read_reg16(isp1362_hcd, HCINTLCURR));
2075 seq_printf(s, "\n");
2076 seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ),
2077 isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ));
2078 seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ),
2079 isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ));
2080#if 0
2081 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE),
2082 isp1362_read_reg32(isp1362_hcd, HCATLDONE));
2083#endif
2084 seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP),
2085 isp1362_read_reg32(isp1362_hcd, HCATLSKIP));
2086 seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST),
2087 isp1362_read_reg32(isp1362_hcd, HCATLLAST));
2088 seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR),
2089 isp1362_read_reg16(isp1362_hcd, HCATLCURR));
2090 seq_printf(s, "\n");
2091 seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC),
2092 isp1362_read_reg16(isp1362_hcd, HCATLDTC));
2093 seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO),
2094 isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
2095}
2096
2097static int proc_isp1362_show(struct seq_file *s, void *unused)
2098{
2099 struct isp1362_hcd *isp1362_hcd = s->private;
2100 struct isp1362_ep *ep;
2101 int i;
2102
2103 seq_printf(s, "%s\n%s version %s\n",
2104 isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION);
2105
2106 /* collect statistics to help estimate potential win for
2107 * DMA engines that care about alignment (PXA)
2108 */
2109 seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n",
2110 isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4,
2111 isp1362_hcd->stat2, isp1362_hcd->stat1);
2112 seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds);
2113 seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds);
2114 seq_printf(s, "max # ptds in ISTL fifo: %d\n",
2115 max(isp1362_hcd->istl_queue[0] .stat_maxptds,
2116 isp1362_hcd->istl_queue[1] .stat_maxptds));
2117
2118 /* FIXME: don't show the following in suspended state */
2119 spin_lock_irq(&isp1362_hcd->lock);
2120
2121 dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB));
2122 dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT));
2123 dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB));
2124 dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT));
2125 dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL));
2126
2127 for (i = 0; i < NUM_ISP1362_IRQS; i++)
2128 if (isp1362_hcd->irq_stat[i])
2129 seq_printf(s, "%-15s: %d\n",
2130 ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]);
2131
2132 dump_regs(s, isp1362_hcd);
2133 list_for_each_entry(ep, &isp1362_hcd->async, schedule) {
2134 struct urb *urb;
2135
2136 seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum,
2137 ({
2138 char *s;
2139 switch (ep->nextpid) {
2140 case USB_PID_IN:
2141 s = "in";
2142 break;
2143 case USB_PID_OUT:
2144 s = "out";
2145 break;
2146 case USB_PID_SETUP:
2147 s = "setup";
2148 break;
2149 case USB_PID_ACK:
2150 s = "status";
2151 break;
2152 default:
2153 s = "?";
2154 break;
2155 };
2156 s;}), ep->maxpacket) ;
2157 list_for_each_entry(urb, &ep->hep->urb_list, urb_list) {
2158 seq_printf(s, " urb%p, %d/%d\n", urb,
2159 urb->actual_length,
2160 urb->transfer_buffer_length);
2161 }
2162 }
2163 if (!list_empty(&isp1362_hcd->async))
2164 seq_printf(s, "\n");
2165 dump_ptd_queue(&isp1362_hcd->atl_queue);
2166
2167 seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE);
2168
2169 list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) {
2170 seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch,
2171 isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset);
2172
2173 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2174 ep->interval, ep,
2175 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2176 ep->udev->devnum, ep->epnum,
2177 (ep->epnum == 0) ? "" :
2178 ((ep->nextpid == USB_PID_IN) ?
2179 "in" : "out"), ep->maxpacket);
2180 }
2181 dump_ptd_queue(&isp1362_hcd->intl_queue);
2182
2183 seq_printf(s, "ISO:\n");
2184
2185 list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) {
2186 seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n",
2187 ep->interval, ep,
2188 (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ",
2189 ep->udev->devnum, ep->epnum,
2190 (ep->epnum == 0) ? "" :
2191 ((ep->nextpid == USB_PID_IN) ?
2192 "in" : "out"), ep->maxpacket);
2193 }
2194
2195 spin_unlock_irq(&isp1362_hcd->lock);
2196 seq_printf(s, "\n");
2197
2198 return 0;
2199}
2200
2201static int proc_isp1362_open(struct inode *inode, struct file *file)
2202{
2203 return single_open(file, proc_isp1362_show, PDE(inode)->data);
2204}
2205
2206static const struct file_operations proc_ops = {
2207 .open = proc_isp1362_open,
2208 .read = seq_read,
2209 .llseek = seq_lseek,
2210 .release = single_release,
2211};
2212
2213/* expect just one isp1362_hcd per system */
2214static const char proc_filename[] = "driver/isp1362";
2215
2216static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
2217{
2218 struct proc_dir_entry *pde;
2219
2220 pde = create_proc_entry(proc_filename, 0, NULL);
2221 if (pde == NULL) {
2222 pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
2223 return;
2224 }
2225
2226 pde->proc_fops = &proc_ops;
2227 pde->data = isp1362_hcd;
2228 isp1362_hcd->pde = pde;
2229}
2230
2231static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
2232{
2233 if (isp1362_hcd->pde)
2234 remove_proc_entry(proc_filename, 0);
2235}
2236
2237#endif
2238
2239/*-------------------------------------------------------------------------*/
2240
2241static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
2242{
2243 int tmp = 20;
2244 unsigned long flags;
2245
2246 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2247
2248 isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC);
2249 isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR);
2250 while (--tmp) {
2251 mdelay(1);
2252 if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR))
2253 break;
2254 }
2255 if (!tmp)
2256 pr_err("Software reset timeout\n");
2257 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2258}
2259
2260static int isp1362_mem_config(struct usb_hcd *hcd)
2261{
2262 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2263 unsigned long flags;
2264 u32 total;
2265 u16 istl_size = ISP1362_ISTL_BUFSIZE;
2266 u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE;
2267 u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize;
2268 u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE;
2269 u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize;
2270 u16 atl_size;
2271 int i;
2272
2273 WARN_ON(istl_size & 3);
2274 WARN_ON(atl_blksize & 3);
2275 WARN_ON(intl_blksize & 3);
2276 WARN_ON(atl_blksize < PTD_HEADER_SIZE);
2277 WARN_ON(intl_blksize < PTD_HEADER_SIZE);
2278
2279 BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32);
2280 if (atl_buffers > 32)
2281 atl_buffers = 32;
2282 atl_size = atl_buffers * atl_blksize;
2283 total = atl_size + intl_size + istl_size;
2284 dev_info(hcd->self.controller, "ISP1362 Memory usage:\n");
2285 dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n",
2286 istl_size / 2, istl_size, 0, istl_size / 2);
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002287 dev_info(hcd->self.controller, " INTL: %4d * (%3lu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002288 ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE,
2289 intl_size, istl_size);
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002290 dev_info(hcd->self.controller, " ATL : %4d * (%3lu+8): %4d @ $%04x\n",
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002291 atl_buffers, atl_blksize - PTD_HEADER_SIZE,
2292 atl_size, istl_size + intl_size);
2293 dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total,
2294 ISP1362_BUF_SIZE - total);
2295
2296 if (total > ISP1362_BUF_SIZE) {
2297 dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n",
2298 __func__, total, ISP1362_BUF_SIZE);
2299 return -ENOMEM;
2300 }
2301
2302 total = istl_size + intl_size + atl_size;
2303 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2304
2305 for (i = 0; i < 2; i++) {
2306 isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2,
2307 isp1362_hcd->istl_queue[i].buf_size = istl_size / 2;
2308 isp1362_hcd->istl_queue[i].blk_size = 4;
2309 INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active);
2310 snprintf(isp1362_hcd->istl_queue[i].name,
2311 sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i);
2312 DBG(3, "%s: %5s buf $%04x %d\n", __func__,
2313 isp1362_hcd->istl_queue[i].name,
2314 isp1362_hcd->istl_queue[i].buf_start,
2315 isp1362_hcd->istl_queue[i].buf_size);
2316 }
2317 isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2);
2318
2319 isp1362_hcd->intl_queue.buf_start = istl_size;
2320 isp1362_hcd->intl_queue.buf_size = intl_size;
2321 isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS;
2322 isp1362_hcd->intl_queue.blk_size = intl_blksize;
2323 isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count;
2324 isp1362_hcd->intl_queue.skip_map = ~0;
2325 INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active);
2326
2327 isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ,
2328 isp1362_hcd->intl_queue.buf_size);
2329 isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ,
2330 isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE);
2331 isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0);
2332 isp1362_write_reg32(isp1362_hcd, HCINTLLAST,
2333 1 << (ISP1362_INTL_BUFFERS - 1));
2334
2335 isp1362_hcd->atl_queue.buf_start = istl_size + intl_size;
2336 isp1362_hcd->atl_queue.buf_size = atl_size;
2337 isp1362_hcd->atl_queue.buf_count = atl_buffers;
2338 isp1362_hcd->atl_queue.blk_size = atl_blksize;
2339 isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count;
2340 isp1362_hcd->atl_queue.skip_map = ~0;
2341 INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active);
2342
2343 isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ,
2344 isp1362_hcd->atl_queue.buf_size);
2345 isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ,
2346 isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE);
2347 isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0);
2348 isp1362_write_reg32(isp1362_hcd, HCATLLAST,
2349 1 << (atl_buffers - 1));
2350
2351 snprintf(isp1362_hcd->atl_queue.name,
2352 sizeof(isp1362_hcd->atl_queue.name), "ATL");
2353 snprintf(isp1362_hcd->intl_queue.name,
2354 sizeof(isp1362_hcd->intl_queue.name), "INTL");
2355 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2356 isp1362_hcd->intl_queue.name,
2357 isp1362_hcd->intl_queue.buf_start,
2358 ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size,
2359 isp1362_hcd->intl_queue.buf_size);
2360 DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__,
2361 isp1362_hcd->atl_queue.name,
2362 isp1362_hcd->atl_queue.buf_start,
2363 atl_buffers, isp1362_hcd->atl_queue.blk_size,
2364 isp1362_hcd->atl_queue.buf_size);
2365
2366 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2367
2368 return 0;
2369}
2370
2371static int isp1362_hc_reset(struct usb_hcd *hcd)
2372{
2373 int ret = 0;
2374 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2375 unsigned long t;
2376 unsigned long timeout = 100;
2377 unsigned long flags;
2378 int clkrdy = 0;
2379
2380 pr_info("%s:\n", __func__);
2381
2382 if (isp1362_hcd->board && isp1362_hcd->board->reset) {
2383 isp1362_hcd->board->reset(hcd->self.controller, 1);
2384 msleep(20);
2385 if (isp1362_hcd->board->clock)
2386 isp1362_hcd->board->clock(hcd->self.controller, 1);
2387 isp1362_hcd->board->reset(hcd->self.controller, 0);
2388 } else
2389 isp1362_sw_reset(isp1362_hcd);
2390
2391 /* chip has been reset. First we need to see a clock */
2392 t = jiffies + msecs_to_jiffies(timeout);
2393 while (!clkrdy && time_before_eq(jiffies, t)) {
2394 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2395 clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY;
2396 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2397 if (!clkrdy)
2398 msleep(4);
2399 }
2400
2401 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2402 isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY);
2403 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2404 if (!clkrdy) {
2405 pr_err("Clock not ready after %lums\n", timeout);
2406 ret = -ENODEV;
2407 }
2408 return ret;
2409}
2410
2411static void isp1362_hc_stop(struct usb_hcd *hcd)
2412{
2413 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2414 unsigned long flags;
2415 u32 tmp;
2416
2417 pr_info("%s:\n", __func__);
2418
2419 del_timer_sync(&hcd->rh_timer);
2420
2421 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2422
2423 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2424
2425 /* Switch off power for all ports */
2426 tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2427 tmp &= ~(RH_A_NPS | RH_A_PSM);
2428 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp);
2429 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2430
2431 /* Reset the chip */
2432 if (isp1362_hcd->board && isp1362_hcd->board->reset)
2433 isp1362_hcd->board->reset(hcd->self.controller, 1);
2434 else
2435 isp1362_sw_reset(isp1362_hcd);
2436
2437 if (isp1362_hcd->board && isp1362_hcd->board->clock)
2438 isp1362_hcd->board->clock(hcd->self.controller, 0);
2439
2440 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2441}
2442
2443#ifdef CHIP_BUFFER_TEST
2444static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd)
2445{
2446 int ret = 0;
2447 u16 *ref;
2448 unsigned long flags;
2449
2450 ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL);
2451 if (ref) {
2452 int offset;
2453 u16 *tst = &ref[ISP1362_BUF_SIZE / 2];
2454
2455 for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) {
2456 ref[offset] = ~offset;
2457 tst[offset] = offset;
2458 }
2459
2460 for (offset = 0; offset < 4; offset++) {
2461 int j;
2462
2463 for (j = 0; j < 8; j++) {
2464 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2465 isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j);
2466 isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j);
2467 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2468
2469 if (memcmp(ref, tst, j)) {
2470 ret = -ENODEV;
2471 pr_err("%s: memory check with %d byte offset %d failed\n",
2472 __func__, j, offset);
2473 dump_data((u8 *)ref + offset, j);
2474 dump_data((u8 *)tst + offset, j);
2475 }
2476 }
2477 }
2478
2479 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2480 isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE);
2481 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2482 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2483
2484 if (memcmp(ref, tst, ISP1362_BUF_SIZE)) {
2485 ret = -ENODEV;
2486 pr_err("%s: memory check failed\n", __func__);
2487 dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2);
2488 }
2489
2490 for (offset = 0; offset < 256; offset++) {
2491 int test_size = 0;
2492
2493 yield();
2494
2495 memset(tst, 0, ISP1362_BUF_SIZE);
2496 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2497 isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2498 isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE);
2499 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2500 if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))),
2501 ISP1362_BUF_SIZE / 2)) {
2502 pr_err("%s: Failed to clear buffer\n", __func__);
2503 dump_data((u8 *)tst, ISP1362_BUF_SIZE);
2504 break;
2505 }
2506 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2507 isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE);
2508 isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref),
2509 offset * 2 + PTD_HEADER_SIZE, test_size);
2510 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2511 PTD_HEADER_SIZE + test_size);
2512 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2513 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2514 dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size);
2515 dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size);
2516 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2517 isp1362_read_buffer(isp1362_hcd, tst, offset * 2,
2518 PTD_HEADER_SIZE + test_size);
2519 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2520 if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) {
2521 ret = -ENODEV;
2522 pr_err("%s: memory check with offset %02x failed\n",
2523 __func__, offset);
2524 break;
2525 }
2526 pr_warning("%s: memory check with offset %02x ok after second read\n",
2527 __func__, offset);
2528 }
2529 }
2530 kfree(ref);
2531 }
2532 return ret;
2533}
2534#endif
2535
2536static int isp1362_hc_start(struct usb_hcd *hcd)
2537{
2538 int ret;
2539 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2540 struct isp1362_platform_data *board = isp1362_hcd->board;
2541 u16 hwcfg;
2542 u16 chipid;
2543 unsigned long flags;
2544
2545 pr_info("%s:\n", __func__);
2546
2547 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2548 chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID);
2549 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2550
2551 if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) {
2552 pr_err("%s: Invalid chip ID %04x\n", __func__, chipid);
2553 return -ENODEV;
2554 }
2555
2556#ifdef CHIP_BUFFER_TEST
2557 ret = isp1362_chip_test(isp1362_hcd);
2558 if (ret)
2559 return -ENODEV;
2560#endif
2561 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2562 /* clear interrupt status and disable all interrupt sources */
2563 isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff);
2564 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0);
2565
2566 /* HW conf */
2567 hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1);
2568 if (board->sel15Kres)
2569 hwcfg |= HCHWCFG_PULLDOWN_DS2 |
Ken MacLeod7949f4e2009-08-06 14:18:27 -05002570 ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002571 if (board->clknotstop)
2572 hwcfg |= HCHWCFG_CLKNOTSTOP;
2573 if (board->oc_enable)
2574 hwcfg |= HCHWCFG_ANALOG_OC;
2575 if (board->int_act_high)
2576 hwcfg |= HCHWCFG_INT_POL;
2577 if (board->int_edge_triggered)
2578 hwcfg |= HCHWCFG_INT_TRIGGER;
2579 if (board->dreq_act_high)
2580 hwcfg |= HCHWCFG_DREQ_POL;
2581 if (board->dack_act_high)
2582 hwcfg |= HCHWCFG_DACK_POL;
2583 isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg);
2584 isp1362_show_reg(isp1362_hcd, HCHWCFG);
2585 isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0);
2586 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2587
2588 ret = isp1362_mem_config(hcd);
2589 if (ret)
2590 return ret;
2591
2592 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2593
2594 /* Root hub conf */
2595 isp1362_hcd->rhdesca = 0;
2596 if (board->no_power_switching)
2597 isp1362_hcd->rhdesca |= RH_A_NPS;
2598 if (board->power_switching_mode)
2599 isp1362_hcd->rhdesca |= RH_A_PSM;
2600 if (board->potpg)
2601 isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT;
2602 else
2603 isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT;
2604
2605 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM);
2606 isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM);
2607 isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA);
2608
2609 isp1362_hcd->rhdescb = RH_B_PPCM;
2610 isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb);
2611 isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB);
2612
2613 isp1362_read_reg32(isp1362_hcd, HCFMINTVL);
2614 isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI);
2615 isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH);
2616
2617 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2618
2619 isp1362_hcd->hc_control = OHCI_USB_OPER;
2620 hcd->state = HC_STATE_RUNNING;
2621
2622 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2623 /* Set up interrupts */
2624 isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE;
2625 isp1362_hcd->intenb |= OHCI_INTR_RD;
2626 isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP;
2627 isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb);
2628 isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb);
2629
2630 /* Go operational */
2631 isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control);
2632 /* enable global power */
2633 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE);
2634
2635 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2636
2637 return 0;
2638}
2639
2640/*-------------------------------------------------------------------------*/
2641
2642static struct hc_driver isp1362_hc_driver = {
2643 .description = hcd_name,
2644 .product_desc = "ISP1362 Host Controller",
2645 .hcd_priv_size = sizeof(struct isp1362_hcd),
2646
2647 .irq = isp1362_irq,
2648 .flags = HCD_USB11 | HCD_MEMORY,
2649
2650 .reset = isp1362_hc_reset,
2651 .start = isp1362_hc_start,
2652 .stop = isp1362_hc_stop,
2653
2654 .urb_enqueue = isp1362_urb_enqueue,
2655 .urb_dequeue = isp1362_urb_dequeue,
2656 .endpoint_disable = isp1362_endpoint_disable,
2657
2658 .get_frame_number = isp1362_get_frame,
2659
2660 .hub_status_data = isp1362_hub_status_data,
2661 .hub_control = isp1362_hub_control,
2662 .bus_suspend = isp1362_bus_suspend,
2663 .bus_resume = isp1362_bus_resume,
2664};
2665
2666/*-------------------------------------------------------------------------*/
2667
2668#define resource_len(r) (((r)->end - (r)->start) + 1)
2669
2670static int __devexit isp1362_remove(struct platform_device *pdev)
2671{
2672 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2673 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2674 struct resource *res;
2675
2676 remove_debug_file(isp1362_hcd);
2677 DBG(0, "%s: Removing HCD\n", __func__);
2678 usb_remove_hcd(hcd);
2679
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002680 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__,
2681 isp1362_hcd->data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002682 iounmap(isp1362_hcd->data_reg);
2683
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002684 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__,
2685 isp1362_hcd->addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002686 iounmap(isp1362_hcd->addr_reg);
2687
2688 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2689 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2690 if (res)
2691 release_mem_region(res->start, resource_len(res));
2692
2693 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2694 DBG(0, "%s: release mem_region: %08lx\n", __func__, (long unsigned int)res->start);
2695 if (res)
2696 release_mem_region(res->start, resource_len(res));
2697
2698 DBG(0, "%s: put_hcd\n", __func__);
2699 usb_put_hcd(hcd);
2700 DBG(0, "%s: Done\n", __func__);
2701
2702 return 0;
2703}
2704
2705static int __init isp1362_probe(struct platform_device *pdev)
2706{
2707 struct usb_hcd *hcd;
2708 struct isp1362_hcd *isp1362_hcd;
2709 struct resource *addr, *data;
2710 void __iomem *addr_reg;
2711 void __iomem *data_reg;
2712 int irq;
2713 int retval = 0;
2714
2715 /* basic sanity checks first. board-specific init logic should
2716 * have initialized this the three resources and probably board
2717 * specific platform_data. we don't probe for IRQs, and do only
2718 * minimal sanity checking.
2719 */
2720 if (pdev->num_resources < 3) {
2721 retval = -ENODEV;
2722 goto err1;
2723 }
2724
2725 data = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2726 addr = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2727 irq = platform_get_irq(pdev, 0);
2728 if (!addr || !data || irq < 0) {
2729 retval = -ENODEV;
2730 goto err1;
2731 }
2732
2733#ifdef CONFIG_USB_HCD_DMA
2734 if (pdev->dev.dma_mask) {
2735 struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
2736
2737 if (!dma_res) {
2738 retval = -ENODEV;
2739 goto err1;
2740 }
2741 isp1362_hcd->data_dma = dma_res->start;
2742 isp1362_hcd->max_dma_size = resource_len(dma_res);
2743 }
2744#else
2745 if (pdev->dev.dma_mask) {
2746 DBG(1, "won't do DMA");
2747 retval = -ENODEV;
2748 goto err1;
2749 }
2750#endif
2751
2752 if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
2753 retval = -EBUSY;
2754 goto err1;
2755 }
2756 addr_reg = ioremap(addr->start, resource_len(addr));
2757 if (addr_reg == NULL) {
2758 retval = -ENOMEM;
2759 goto err2;
2760 }
2761
2762 if (!request_mem_region(data->start, resource_len(data), hcd_name)) {
2763 retval = -EBUSY;
2764 goto err3;
2765 }
2766 data_reg = ioremap(data->start, resource_len(data));
2767 if (data_reg == NULL) {
2768 retval = -ENOMEM;
2769 goto err4;
2770 }
2771
2772 /* allocate and initialize hcd */
2773 hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev));
2774 if (!hcd) {
2775 retval = -ENOMEM;
2776 goto err5;
2777 }
2778 hcd->rsrc_start = data->start;
2779 isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2780 isp1362_hcd->data_reg = data_reg;
2781 isp1362_hcd->addr_reg = addr_reg;
2782
2783 isp1362_hcd->next_statechange = jiffies;
2784 spin_lock_init(&isp1362_hcd->lock);
2785 INIT_LIST_HEAD(&isp1362_hcd->async);
2786 INIT_LIST_HEAD(&isp1362_hcd->periodic);
2787 INIT_LIST_HEAD(&isp1362_hcd->isoc);
2788 INIT_LIST_HEAD(&isp1362_hcd->remove_list);
2789 isp1362_hcd->board = pdev->dev.platform_data;
2790#if USE_PLATFORM_DELAY
2791 if (!isp1362_hcd->board->delay) {
2792 dev_err(hcd->self.controller, "No platform delay function given\n");
2793 retval = -ENODEV;
2794 goto err6;
2795 }
2796#endif
2797
2798#ifdef CONFIG_ARM
2799 if (isp1362_hcd->board)
2800 set_irq_type(irq, isp1362_hcd->board->int_act_high ? IRQT_RISING : IRQT_FALLING);
2801#endif
2802
2803 retval = usb_add_hcd(hcd, irq, IRQF_TRIGGER_LOW | IRQF_DISABLED | IRQF_SHARED);
2804 if (retval != 0)
2805 goto err6;
2806 pr_info("%s, irq %d\n", hcd->product_desc, irq);
2807
2808 create_debug_file(isp1362_hcd);
2809
2810 return 0;
2811
2812 err6:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002813 DBG(0, "%s: Freeing dev %p\n", __func__, isp1362_hcd);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002814 usb_put_hcd(hcd);
2815 err5:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002816 DBG(0, "%s: Unmapping data_reg @ %p\n", __func__, data_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002817 iounmap(data_reg);
2818 err4:
2819 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)data->start);
2820 release_mem_region(data->start, resource_len(data));
2821 err3:
Mike Frysingerb0a9cf22009-10-07 04:29:31 -04002822 DBG(0, "%s: Unmapping addr_reg @ %p\n", __func__, addr_reg);
Lothar Wassmanna9d43092009-07-16 20:51:21 -04002823 iounmap(addr_reg);
2824 err2:
2825 DBG(0, "%s: Releasing mem region %08lx\n", __func__, (long unsigned int)addr->start);
2826 release_mem_region(addr->start, resource_len(addr));
2827 err1:
2828 pr_err("%s: init error, %d\n", __func__, retval);
2829
2830 return retval;
2831}
2832
2833#ifdef CONFIG_PM
2834static int isp1362_suspend(struct platform_device *pdev, pm_message_t state)
2835{
2836 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2837 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2838 unsigned long flags;
2839 int retval = 0;
2840
2841 DBG(0, "%s: Suspending device\n", __func__);
2842
2843 if (state.event == PM_EVENT_FREEZE) {
2844 DBG(0, "%s: Suspending root hub\n", __func__);
2845 retval = isp1362_bus_suspend(hcd);
2846 } else {
2847 DBG(0, "%s: Suspending RH ports\n", __func__);
2848 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2849 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS);
2850 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2851 }
2852 if (retval == 0)
2853 pdev->dev.power.power_state = state;
2854 return retval;
2855}
2856
2857static int isp1362_resume(struct platform_device *pdev)
2858{
2859 struct usb_hcd *hcd = platform_get_drvdata(pdev);
2860 struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd);
2861 unsigned long flags;
2862
2863 DBG(0, "%s: Resuming\n", __func__);
2864
2865 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2866 DBG(0, "%s: Resume RH ports\n", __func__);
2867 spin_lock_irqsave(&isp1362_hcd->lock, flags);
2868 isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC);
2869 spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
2870 return 0;
2871 }
2872
2873 pdev->dev.power.power_state = PMSG_ON;
2874
2875 return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd));
2876}
2877#else
2878#define isp1362_suspend NULL
2879#define isp1362_resume NULL
2880#endif
2881
2882static struct platform_driver isp1362_driver = {
2883 .probe = isp1362_probe,
2884 .remove = __devexit_p(isp1362_remove),
2885
2886 .suspend = isp1362_suspend,
2887 .resume = isp1362_resume,
2888 .driver = {
2889 .name = (char *)hcd_name,
2890 .owner = THIS_MODULE,
2891 },
2892};
2893
2894/*-------------------------------------------------------------------------*/
2895
2896static int __init isp1362_init(void)
2897{
2898 if (usb_disabled())
2899 return -ENODEV;
2900 pr_info("driver %s, %s\n", hcd_name, DRIVER_VERSION);
2901 return platform_driver_register(&isp1362_driver);
2902}
2903module_init(isp1362_init);
2904
2905static void __exit isp1362_cleanup(void)
2906{
2907 platform_driver_unregister(&isp1362_driver);
2908}
2909module_exit(isp1362_cleanup);