diff options
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r-- | drivers/usb/host/xhci-mem.c | 74 |
1 files changed, 46 insertions, 28 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index bad0d1f9a41d..6afe32381209 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -37,7 +37,9 @@ * "All components of all Command and Transfer TRBs shall be initialized to '0'" */ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, - unsigned int cycle_state, gfp_t flags) + unsigned int cycle_state, + unsigned int max_packet, + gfp_t flags) { struct xhci_segment *seg; dma_addr_t dma; @@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, return NULL; } + if (max_packet) { + seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA); + if (!seg->bounce_buf) { + dma_pool_free(xhci->segment_pool, seg->trbs, dma); + kfree(seg); + return NULL; + } + } /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { for (i = 0; i < TRBS_PER_SEGMENT; i++) @@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } + kfree(seg->bounce_buf); kfree(seg); } @@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring, static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, struct xhci_segment **first, struct xhci_segment **last, unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, gfp_t flags) + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_segment *prev; - prev = xhci_segment_alloc(xhci, cycle_state, flags); + prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); if (!prev) return -ENOMEM; num_segs--; @@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, while (num_segs > 0) { struct xhci_segment *next; - next = xhci_segment_alloc(xhci, cycle_state, flags); + next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags); if (!next) { prev = *first; while (prev) { @@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci, */ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs, unsigned int cycle_state, - enum xhci_ring_type type, gfp_t flags) + enum xhci_ring_type type, unsigned int max_packet, gfp_t flags) { struct xhci_ring *ring; int ret; @@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, return NULL; ring->num_segs = num_segs; + ring->bounce_buf_len = max_packet; INIT_LIST_HEAD(&ring->td_list); ring->type = type; if (num_segs == 0) return ring; ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, - &ring->last_seg, num_segs, cycle_state, type, flags); + &ring->last_seg, num_segs, cycle_state, type, + max_packet, flags); if (ret) goto fail; @@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring, ring->num_segs : num_segs_needed; ret = xhci_alloc_segments_for_ring(xhci, &first, &last, - num_segs, ring->cycle_state, ring->type, flags); + num_segs, ring->cycle_state, ring->type, + ring->bounce_buf_len, flags); if (ret) return -ENOMEM; @@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring( */ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, unsigned int num_stream_ctxs, - unsigned int num_streams, gfp_t mem_flags) + unsigned int num_streams, + unsigned int max_packet, gfp_t mem_flags) { struct xhci_stream_info *stream_info; u32 cur_stream; @@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, * and add their segment DMA addresses to the radix tree. * Stream 0 is reserved. */ + for (cur_stream = 1; cur_stream < num_streams; cur_stream++) { stream_info->stream_rings[cur_stream] = - xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags); + xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet, + mem_flags); cur_ring = stream_info->stream_rings[cur_stream]; if (!cur_ring) goto cleanup_rings; @@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, } /* Allocate endpoint 0 ring */ - dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags); + dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags); if (!dev->eps[0].ring) goto fail; @@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, return -EINVAL; ring_type = usb_endpoint_type(&ep->desc); - /* Set up the endpoint ring */ - virt_dev->eps[ep_index].new_ring = - xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags); - if (!virt_dev->eps[ep_index].new_ring) { - /* Attempt to use the ring cache */ - if (virt_dev->num_rings_cached == 0) - return -ENOMEM; - virt_dev->num_rings_cached--; - virt_dev->eps[ep_index].new_ring = - virt_dev->ring_cache[virt_dev->num_rings_cached]; - virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, - 1, ring_type); - } - virt_dev->eps[ep_index].skip = false; - ep_ring = virt_dev->eps[ep_index].new_ring; /* * Get values to fill the endpoint context, mostly from ep descriptor. @@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2)) mult = 0; + /* Set up the endpoint ring */ + virt_dev->eps[ep_index].new_ring = + xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags); + if (!virt_dev->eps[ep_index].new_ring) { + /* Attempt to use the ring cache */ + if (virt_dev->num_rings_cached == 0) + return -ENOMEM; + virt_dev->num_rings_cached--; + virt_dev->eps[ep_index].new_ring = + virt_dev->ring_cache[virt_dev->num_rings_cached]; + virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; + xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, + 1, ring_type); + } + virt_dev->eps[ep_index].skip = false; + ep_ring = virt_dev->eps[ep_index].new_ring; + /* Fill the endpoint context */ ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) | EP_INTERVAL(interval) | @@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) goto fail; /* Set up the command ring to have one segments for now. */ - xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags); + xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags); if (!xhci->cmd_ring) goto fail; xhci_dbg_trace(xhci, trace_xhci_dbg_init, @@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) */ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring"); xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT, - flags); + 0, flags); if (!xhci->event_ring) goto fail; if (xhci_check_trb_in_td_math(xhci) < 0) |