summaryrefslogtreecommitdiff
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-03-01 21:35:16 -0800
committerDavid S. Miller <davem@davemloft.net>2009-03-01 21:35:16 -0800
commitaa4abc9bcce0d2a7ec189e897f8f8c58ca04643b (patch)
tree22ef88d84a2e06380bb6a853c3ba28657e4e5f92 /drivers/usb/musb/musb_host.c
parent814c01dc7c533033b4e99981a2e24a6195bfb43c (diff)
parent52c0326beaa3cb0049d0f1c51c6ad5d4a04e4430 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-tx.c net/8021q/vlan_core.c net/core/dev.c
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c93
1 files changed, 58 insertions, 35 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index a035ceccf950..6dbbd0786a6a 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -335,16 +335,11 @@ musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
static struct musb_qh *
musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
{
- int is_in;
struct musb_hw_ep *ep = qh->hw_ep;
struct musb *musb = ep->musb;
+ int is_in = usb_pipein(urb->pipe);
int ready = qh->is_ready;
- if (ep->is_shared_fifo)
- is_in = 1;
- else
- is_in = usb_pipein(urb->pipe);
-
/* save toggle eagerly, for paranoia */
switch (qh->type) {
case USB_ENDPOINT_XFER_BULK:
@@ -432,7 +427,7 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
else
qh = musb_giveback(qh, urb, urb->status);
- if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
+ if (qh != NULL && qh->is_ready) {
DBG(4, "... next ep%d %cX urb %p\n",
hw_ep->epnum, is_in ? 'R' : 'T',
next_urb(qh));
@@ -942,8 +937,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
switch (musb->ep0_stage) {
case MUSB_EP0_IN:
fifo_dest = urb->transfer_buffer + urb->actual_length;
- fifo_count = min(len, ((u16) (urb->transfer_buffer_length
- - urb->actual_length)));
+ fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
+ urb->actual_length);
if (fifo_count < len)
urb->status = -EOVERFLOW;
@@ -976,10 +971,9 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
}
/* FALLTHROUGH */
case MUSB_EP0_OUT:
- fifo_count = min(qh->maxpacket, ((u16)
- (urb->transfer_buffer_length
- - urb->actual_length)));
-
+ fifo_count = min_t(size_t, qh->maxpacket,
+ urb->transfer_buffer_length -
+ urb->actual_length);
if (fifo_count) {
fifo_dest = (u8 *) (urb->transfer_buffer
+ urb->actual_length);
@@ -1161,7 +1155,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
struct urb *urb;
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
void __iomem *epio = hw_ep->regs;
- struct musb_qh *qh = hw_ep->out_qh;
+ struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
+ : hw_ep->out_qh;
u32 status = 0;
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
@@ -1308,7 +1303,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
* packets before updating TXCSR ... other docs disagree ...
*/
/* PIO: start next packet in this URB */
- wLength = min(qh->maxpacket, (u16) wLength);
+ if (wLength > qh->maxpacket)
+ wLength = qh->maxpacket;
musb_write_fifo(hw_ep, wLength, buf);
qh->segsize = wLength;
@@ -1867,19 +1863,21 @@ static int musb_urb_enqueue(
}
qh->type_reg = type_reg;
- /* precompute rxinterval/txinterval register */
- interval = min((u8)16, epd->bInterval); /* log encoding */
+ /* Precompute RXINTERVAL/TXINTERVAL register */
switch (qh->type) {
case USB_ENDPOINT_XFER_INT:
- /* fullspeed uses linear encoding */
- if (USB_SPEED_FULL == urb->dev->speed) {
- interval = epd->bInterval;
- if (!interval)
- interval = 1;
+ /*
+ * Full/low speeds use the linear encoding,
+ * high speed uses the logarithmic encoding.
+ */
+ if (urb->dev->speed <= USB_SPEED_FULL) {
+ interval = max_t(u8, epd->bInterval, 1);
+ break;
}
/* FALLTHROUGH */
case USB_ENDPOINT_XFER_ISOC:
- /* iso always uses log encoding */
+ /* ISO always uses logarithmic encoding */
+ interval = min_t(u8, epd->bInterval, 16);
break;
default:
/* REVISIT we actually want to use NAK limits, hinting to the
@@ -2037,9 +2035,9 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
/* Any URB not actively programmed into endpoint hardware can be
- * immediately given back. Such an URB must be at the head of its
+ * immediately given back; that's any URB not at the head of an
* endpoint queue, unless someday we get real DMA queues. And even
- * then, it might not be known to the hardware...
+ * if it's at the head, it might not be known to the hardware...
*
* Otherwise abort current transfer, pending dma, etc.; urb->status
* has already been updated. This is a synchronous abort; it'd be
@@ -2078,6 +2076,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
qh->is_ready = 0;
__musb_giveback(musb, urb, 0);
qh->is_ready = ready;
+
+ /* If nothing else (usually musb_giveback) is using it
+ * and its URB list has emptied, recycle this qh.
+ */
+ if (ready && list_empty(&qh->hep->urb_list)) {
+ qh->hep->hcpriv = NULL;
+ list_del(&qh->ring);
+ kfree(qh);
+ }
} else
ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
done:
@@ -2093,15 +2100,16 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
unsigned long flags;
struct musb *musb = hcd_to_musb(hcd);
u8 is_in = epnum & USB_DIR_IN;
- struct musb_qh *qh = hep->hcpriv;
- struct urb *urb, *tmp;
+ struct musb_qh *qh;
+ struct urb *urb;
struct list_head *sched;
- if (!qh)
- return;
-
spin_lock_irqsave(&musb->lock, flags);
+ qh = hep->hcpriv;
+ if (qh == NULL)
+ goto exit;
+
switch (qh->type) {
case USB_ENDPOINT_XFER_CONTROL:
sched = &musb->control;
@@ -2135,13 +2143,28 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
/* cleanup */
musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
- } else
- urb = NULL;
- /* then just nuke all the others */
- list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
- musb_giveback(qh, urb, -ESHUTDOWN);
+ /* Then nuke all the others ... and advance the
+ * queue on hw_ep (e.g. bulk ring) when we're done.
+ */
+ while (!list_empty(&hep->urb_list)) {
+ urb = next_urb(qh);
+ urb->status = -ESHUTDOWN;
+ musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
+ }
+ } else {
+ /* Just empty the queue; the hardware is busy with
+ * other transfers, and since !qh->is_ready nothing
+ * will activate any of these as it advances.
+ */
+ while (!list_empty(&hep->urb_list))
+ __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
+ hep->hcpriv = NULL;
+ list_del(&qh->ring);
+ kfree(qh);
+ }
+exit:
spin_unlock_irqrestore(&musb->lock, flags);
}