summaryrefslogtreecommitdiff
path: root/drivers/usb/musb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb')
-rw-r--r--drivers/usb/musb/davinci.c2
-rw-r--r--drivers/usb/musb/musb_debugfs.c6
-rw-r--r--drivers/usb/musb/musb_gadget.c73
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c8
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c2
6 files changed, 56 insertions, 37 deletions
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 8bdf25a8b023..f9a3f62a83b5 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -35,7 +35,7 @@
#include <mach/hardware.h>
#include <mach/memory.h>
-#include <mach/gpio.h>
+#include <asm/gpio.h>
#include <mach/cputype.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index b0176e4569e0..61f4ee466df7 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -41,12 +41,6 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#ifdef CONFIG_ARM
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/mach-types.h>
-#endif
-
#include <asm/uaccess.h>
#include "musb_core.h"
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index e81820370d6f..ae4a20acef6c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -634,6 +634,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
u16 len;
u16 csr = musb_readw(epio, MUSB_RXCSR);
struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
+ u8 use_mode_1;
if (hw_ep->is_shared_fifo)
musb_ep = &hw_ep->ep_in;
@@ -683,6 +684,18 @@ static void rxstate(struct musb *musb, struct musb_request *req)
if (csr & MUSB_RXCSR_RXPKTRDY) {
len = musb_readw(epio, MUSB_RXCOUNT);
+
+ /*
+ * Enable Mode 1 on RX transfers only when short_not_ok flag
+ * is set. Currently short_not_ok flag is set only from
+ * file_storage and f_mass_storage drivers
+ */
+
+ if (request->short_not_ok && len == musb_ep->packet_sz)
+ use_mode_1 = 1;
+ else
+ use_mode_1 = 0;
+
if (request->actual < request->length) {
#ifdef CONFIG_USB_INVENTRA_DMA
if (is_buffer_mapped(req)) {
@@ -704,7 +717,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* most these gadgets, end of is signified either by a short packet,
* or filling the last byte of the buffer. (Sending extra data in
* that last pckate should trigger an overflow fault.) But in mode 1,
- * we don't get DMA completion interrrupt for short packets.
+ * we don't get DMA completion interrupt for short packets.
*
* Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1),
* to get endpoint interrupt on every DMA req, but that didn't seem
@@ -714,37 +727,41 @@ static void rxstate(struct musb *musb, struct musb_request *req)
* then becomes usable as a runtime "use mode 1" hint...
*/
- csr |= MUSB_RXCSR_DMAENAB;
-#ifdef USE_MODE1
- csr |= MUSB_RXCSR_AUTOCLEAR;
- /* csr |= MUSB_RXCSR_DMAMODE; */
-
- /* this special sequence (enabling and then
- * disabling MUSB_RXCSR_DMAMODE) is required
- * to get DMAReq to activate
- */
- musb_writew(epio, MUSB_RXCSR,
- csr | MUSB_RXCSR_DMAMODE);
-#else
- if (!musb_ep->hb_mult &&
- musb_ep->hw_ep->rx_double_buffered)
+ /* Experimental: Mode1 works with mass storage use cases */
+ if (use_mode_1) {
csr |= MUSB_RXCSR_AUTOCLEAR;
-#endif
- musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ /*
+ * this special sequence (enabling and then
+ * disabling MUSB_RXCSR_DMAMODE) is required
+ * to get DMAReq to activate
+ */
+ musb_writew(epio, MUSB_RXCSR,
+ csr | MUSB_RXCSR_DMAMODE);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ } else {
+ if (!musb_ep->hb_mult &&
+ musb_ep->hw_ep->rx_double_buffered)
+ csr |= MUSB_RXCSR_AUTOCLEAR;
+ csr |= MUSB_RXCSR_DMAENAB;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
if (request->actual < request->length) {
int transfer_size = 0;
-#ifdef USE_MODE1
- transfer_size = min(request->length - request->actual,
- channel->max_len);
-#else
- transfer_size = min(request->length - request->actual,
- (unsigned)len);
-#endif
- if (transfer_size <= musb_ep->packet_sz)
- musb_ep->dma->desired_mode = 0;
- else
+ if (use_mode_1) {
+ transfer_size = min(request->length - request->actual,
+ channel->max_len);
musb_ep->dma->desired_mode = 1;
+ } else {
+ transfer_size = min(request->length - request->actual,
+ (unsigned)len);
+ musb_ep->dma->desired_mode = 0;
+ }
use_dma = c->channel_program(
channel,
@@ -1020,7 +1037,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
goto fail;
/* REVISIT this rules out high bandwidth periodic transfers */
- tmp = le16_to_cpu(desc->wMaxPacketSize);
+ tmp = usb_endpoint_maxp(desc);
if (tmp & ~0x07ff) {
int ok;
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 9378b359c1f0..6a0d0467ec74 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -679,6 +679,14 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
musb_readb(mbase, MUSB_FADDR),
decode_ep0stage(musb->ep0_state));
+ if (csr & MUSB_CSR0_P_DATAEND) {
+ /*
+ * If DATAEND is set we should not call the callback,
+ * hence the status stage is not complete.
+ */
+ return IRQ_HANDLED;
+ }
+
/* I sent a stall.. need to acknowledge it now.. */
if (csr & MUSB_CSR0_P_SENTSTALL) {
musb_writew(regs, MUSB_CSR0,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 8b2473fa0f47..60ddba8066ea 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1932,7 +1932,7 @@ static int musb_urb_enqueue(
INIT_LIST_HEAD(&qh->ring);
qh->is_ready = 1;
- qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
+ qh->maxpacket = usb_endpoint_maxp(epd);
qh->type = usb_endpoint_type(epd);
/* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier.
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index f70c5a577736..57a608584e16 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -408,7 +408,7 @@ dma_controller_create(struct musb *musb, void __iomem *base)
controller->controller.channel_program = dma_channel_program;
controller->controller.channel_abort = dma_channel_abort;
- if (request_irq(irq, dma_controller_irq, IRQF_DISABLED,
+ if (request_irq(irq, dma_controller_irq, 0,
dev_name(musb->controller), &controller->controller)) {
dev_err(dev, "request_irq %d failed!\n", irq);
dma_controller_destroy(&controller->controller);