summaryrefslogtreecommitdiff
path: root/drivers/usb/musb/musb_host.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/musb/musb_host.c')
-rw-r--r--drivers/usb/musb/musb_host.c264
1 files changed, 196 insertions, 68 deletions
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 4bb717d0bd4..d385e8a8187 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -693,6 +693,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
void __iomem *epio = hw_ep->regs;
struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out);
u16 packet_sz = qh->maxpacket;
+ u8 use_dma = 1;
+ u16 csr;
dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
"h_addr%02x h_port%02x bytes %d\n",
@@ -704,9 +706,17 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
musb_ep_select(mbase, epnum);
+ if (is_out && !len) {
+ use_dma = 0;
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ hw_ep->tx_channel = NULL;
+ }
+
/* candidate for DMA? */
dma_controller = musb->dma_controller;
- if (is_dma_capable() && epnum && dma_controller) {
+ if (use_dma && is_dma_capable() && epnum && dma_controller) {
dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
if (!dma_channel) {
dma_channel = dma_controller->channel_alloc(
@@ -813,9 +823,28 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
if (load_count) {
/* PIO to load FIFO */
qh->segsize = load_count;
- musb_write_fifo(hw_ep, load_count, buf);
+ if (!buf) {
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ SG_MITER_ATOMIC
+ | SG_MITER_FROM_SG);
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller,
+ "error: sg"
+ "list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ goto finish;
+ }
+ buf = qh->sg_miter.addr + urb->sg->offset +
+ urb->actual_length;
+ load_count = min_t(u32, load_count,
+ qh->sg_miter.length);
+ musb_write_fifo(hw_ep, load_count, buf);
+ qh->sg_miter.consumed = load_count;
+ sg_miter_stop(&qh->sg_miter);
+ } else
+ musb_write_fifo(hw_ep, load_count, buf);
}
-
+finish:
/* re-enable interrupt */
musb_writew(mbase, MUSB_INTRTXE, int_txe);
@@ -882,6 +911,73 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
}
}
+/* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to
+ * the end; avoids starvation for other endpoints.
+ */
+static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
+ int is_in)
+{
+ struct dma_channel *dma;
+ struct urb *urb;
+ void __iomem *mbase = musb->mregs;
+ void __iomem *epio = ep->regs;
+ struct musb_qh *cur_qh, *next_qh;
+ u16 rx_csr, tx_csr;
+
+ musb_ep_select(mbase, ep->epnum);
+ if (is_in) {
+ dma = is_dma_capable() ? ep->rx_channel : NULL;
+
+ /* clear nak timeout bit */
+ rx_csr = musb_readw(epio, MUSB_RXCSR);
+ rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+ rx_csr &= ~MUSB_RXCSR_DATAERROR;
+ musb_writew(epio, MUSB_RXCSR, rx_csr);
+
+ cur_qh = first_qh(&musb->in_bulk);
+ } else {
+ dma = is_dma_capable() ? ep->tx_channel : NULL;
+
+ /* clear nak timeout bit */
+ tx_csr = musb_readw(epio, MUSB_TXCSR);
+ tx_csr |= MUSB_TXCSR_H_WZC_BITS;
+ tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT;
+ musb_writew(epio, MUSB_TXCSR, tx_csr);
+
+ cur_qh = first_qh(&musb->out_bulk);
+ }
+ if (cur_qh) {
+ urb = next_urb(cur_qh);
+ if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
+ dma->status = MUSB_DMA_STATUS_CORE_ABORT;
+ musb->dma_controller->channel_abort(dma);
+ urb->actual_length += dma->actual_len;
+ dma->actual_len = 0L;
+ }
+ musb_save_toggle(cur_qh, is_in, urb);
+
+ if (is_in) {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->in_bulk);
+
+ /* get the next qh from musb->in_bulk */
+ next_qh = first_qh(&musb->in_bulk);
+
+ /* set rx_reinit and schedule the next qh */
+ ep->rx_reinit = 1;
+ } else {
+ /* move cur_qh to end of queue */
+ list_move_tail(&cur_qh->ring, &musb->out_bulk);
+
+ /* get the next qh from musb->out_bulk */
+ next_qh = first_qh(&musb->out_bulk);
+
+ /* set tx_reinit and schedule the next qh */
+ ep->tx_reinit = 1;
+ }
+ musb_start_urb(musb, is_in, next_qh);
+ }
+}
/*
* Service the default endpoint (ep0) as host.
@@ -1116,6 +1212,7 @@ void musb_host_tx(struct musb *musb, u8 epnum)
void __iomem *mbase = musb->mregs;
struct dma_channel *dma;
bool transfer_pending = false;
+ static bool use_sg;
musb_ep_select(mbase, epnum);
tx_csr = musb_readw(epio, MUSB_TXCSR);
@@ -1146,23 +1243,31 @@ void musb_host_tx(struct musb *musb, u8 epnum)
status = -ETIMEDOUT;
} else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
- dev_dbg(musb->controller, "TX end=%d device not responding\n", epnum);
-
- /* NOTE: this code path would be a good place to PAUSE a
- * transfer, if there's some other (nonperiodic) tx urb
- * that could use this fifo. (dma complicates it...)
- * That's already done for bulk RX transfers.
- *
- * if (bulk && qh->ring.next != &musb->out_bulk), then
- * we have a candidate... NAKing is *NOT* an error
- */
- musb_ep_select(mbase, epnum);
- musb_writew(epio, MUSB_TXCSR,
- MUSB_TXCSR_H_WZC_BITS
- | MUSB_TXCSR_TXPKTRDY);
- return;
+ if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
+ && !list_is_singular(&musb->out_bulk)) {
+ dev_dbg(musb->controller,
+ "NAK timeout on TX%d ep\n", epnum);
+ musb_bulk_nak_timeout(musb, hw_ep, 0);
+ } else {
+ dev_dbg(musb->controller,
+ "TX end=%d device not responding\n", epnum);
+ /* NOTE: this code path would be a good place to PAUSE a
+ * transfer, if there's some other (nonperiodic) tx urb
+ * that could use this fifo. (dma complicates it...)
+ * That's already done for bulk RX transfers.
+ *
+ * if (bulk && qh->ring.next != &musb->out_bulk), then
+ * we have a candidate... NAKing is *NOT* an error
+ */
+ musb_ep_select(mbase, epnum);
+ musb_writew(epio, MUSB_TXCSR,
+ MUSB_TXCSR_H_WZC_BITS
+ | MUSB_TXCSR_TXPKTRDY);
+ }
+ return;
}
+done:
if (status) {
if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
dma->status = MUSB_DMA_STATUS_CORE_ABORT;
@@ -1332,9 +1437,38 @@ void musb_host_tx(struct musb *musb, u8 epnum)
length = qh->maxpacket;
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
- musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer)
+ use_sg = true;
+
+ if (use_sg) {
+ /* sg_miter_start is already done in musb_ep_program */
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ goto done;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ length = min_t(u32, length, qh->sg_miter.length);
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer);
+ qh->sg_miter.consumed = length;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
+ }
+
qh->segsize = length;
+ if (use_sg) {
+ if (offset + length >= urb->transfer_buffer_length)
+ use_sg = false;
+ }
+
musb_ep_select(mbase, epnum);
musb_writew(epio, MUSB_TXCSR,
MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
@@ -1380,50 +1514,6 @@ void musb_host_tx(struct musb *musb, u8 epnum)
#endif
-/* Schedule next QH from musb->in_bulk and move the current qh to
- * the end; avoids starvation for other endpoints.
- */
-static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
-{
- struct dma_channel *dma;
- struct urb *urb;
- void __iomem *mbase = musb->mregs;
- void __iomem *epio = ep->regs;
- struct musb_qh *cur_qh, *next_qh;
- u16 rx_csr;
-
- musb_ep_select(mbase, ep->epnum);
- dma = is_dma_capable() ? ep->rx_channel : NULL;
-
- /* clear nak timeout bit */
- rx_csr = musb_readw(epio, MUSB_RXCSR);
- rx_csr |= MUSB_RXCSR_H_WZC_BITS;
- rx_csr &= ~MUSB_RXCSR_DATAERROR;
- musb_writew(epio, MUSB_RXCSR, rx_csr);
-
- cur_qh = first_qh(&musb->in_bulk);
- if (cur_qh) {
- urb = next_urb(cur_qh);
- if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
- dma->status = MUSB_DMA_STATUS_CORE_ABORT;
- musb->dma_controller->channel_abort(dma);
- urb->actual_length += dma->actual_len;
- dma->actual_len = 0L;
- }
- musb_save_toggle(cur_qh, 1, urb);
-
- /* move cur_qh to end of queue */
- list_move_tail(&cur_qh->ring, &musb->in_bulk);
-
- /* get the next qh from musb->in_bulk */
- next_qh = first_qh(&musb->in_bulk);
-
- /* set rx_reinit and schedule the next qh */
- ep->rx_reinit = 1;
- musb_start_urb(musb, 1, next_qh);
- }
-}
-
/*
* Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
* and high-bandwidth IN transfer cases.
@@ -1442,6 +1532,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
bool done = false;
u32 status;
struct dma_channel *dma;
+ static bool use_sg;
+ unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
musb_ep_select(mbase, epnum);
@@ -1500,7 +1592,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
if (usb_pipebulk(urb->pipe)
&& qh->mux == 1
&& !list_is_singular(&musb->in_bulk)) {
- musb_bulk_rx_nak_timeout(musb, hw_ep);
+ musb_bulk_nak_timeout(musb, hw_ep, 1);
return;
}
musb_ep_select(mbase, epnum);
@@ -1756,10 +1848,43 @@ void musb_host_rx(struct musb *musb, u8 epnum)
#endif /* Mentor DMA */
if (!dma) {
+ unsigned int received_len;
+
/* Unmap the buffer so that CPU can use it */
usb_hcd_unmap_urb_for_dma(musb_to_hcd(musb), urb);
- done = musb_host_packet_rx(musb, urb,
- epnum, iso_err);
+
+ /*
+ * We need to map sg if the transfer_buffer is
+ * NULL.
+ */
+ if (!urb->transfer_buffer) {
+ use_sg = true;
+ sg_miter_start(&qh->sg_miter, urb->sg, 1,
+ sg_flags);
+ }
+
+ if (use_sg) {
+ if (!sg_miter_next(&qh->sg_miter)) {
+ dev_err(musb->controller, "error: sg list empty\n");
+ sg_miter_stop(&qh->sg_miter);
+ status = -EINVAL;
+ done = true;
+ goto finish;
+ }
+ urb->transfer_buffer = qh->sg_miter.addr;
+ received_len = urb->actual_length;
+ qh->offset = 0x0;
+ done = musb_host_packet_rx(musb, urb, epnum,
+ iso_err);
+ /* Calculate the number of bytes received */
+ received_len = urb->actual_length -
+ received_len;
+ qh->sg_miter.consumed = received_len;
+ sg_miter_stop(&qh->sg_miter);
+ } else {
+ done = musb_host_packet_rx(musb, urb,
+ epnum, iso_err);
+ }
dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
}
}
@@ -1768,6 +1893,9 @@ finish:
urb->actual_length += xfer_len;
qh->offset += xfer_len;
if (done) {
+ if (use_sg)
+ use_sg = false;
+
if (urb->status == -EINPROGRESS)
urb->status = status;
musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
@@ -1863,14 +1991,14 @@ static int musb_schedule(
else
head = &musb->out_bulk;
- /* Enable bulk RX NAK timeout scheme when bulk requests are
+ /* Enable bulk RX/TX NAK timeout scheme when bulk requests are
* multiplexed. This scheme doen't work in high speed to full
* speed scenario as NAK interrupts are not coming from a
* full speed device connected to a high speed device.
* NAK timeout interval is 8 (128 uframe or 16ms) for HS and
* 4 (8 frame or 8ms) for FS device.
*/
- if (is_in && qh->dev)
+ if (qh->dev)
qh->intv_reg =
(USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
goto success;