summaryrefslogtreecommitdiff
path: root/drivers/usb/chipidea/udc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/chipidea/udc.c')
-rw-r--r--drivers/usb/chipidea/udc.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index d3556416dae4..f0fcaf2b1f33 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmapool.h>
+#include <linux/dma-direct.h>
#include <linux/err.h>
#include <linux/irqreturn.h>
#include <linux/kernel.h>
@@ -540,6 +541,126 @@ static int prepare_td_for_sg(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
return ret;
}
+/*
+ * Verify if the scatterlist is valid by iterating each sg entry.
+ * Return invalid sg entry index which is less than num_sgs.
+ */
+static int sglist_get_invalid_entry(struct device *dma_dev, u8 dir,
+ struct usb_request *req)
+{
+ int i;
+ struct scatterlist *s = req->sg;
+
+ if (req->num_sgs == 1)
+ return 1;
+
+ dir = dir ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ for (i = 0; i < req->num_sgs; i++, s = sg_next(s)) {
+ /* Only small sg (generally last sg) may be bounced. If
+ * that happens. we can't ensure the addr is page-aligned
+ * after dma map.
+ */
+ if (dma_kmalloc_needs_bounce(dma_dev, s->length, dir))
+ break;
+
+ /* Make sure each sg start address (except first sg) is
+ * page-aligned and end address (except last sg) is also
+ * page-aligned.
+ */
+ if (i == 0) {
+ if (!IS_ALIGNED(s->offset + s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ } else {
+ if (s->offset)
+ break;
+ if (!sg_is_last(s) && !IS_ALIGNED(s->length,
+ CI_HDRC_PAGE_SIZE))
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int sglist_do_bounce(struct ci_hw_req *hwreq, int index,
+ bool copy, unsigned int *bounced)
+{
+ void *buf;
+ int i, ret, nents, num_sgs;
+ unsigned int rest, rounded;
+ struct scatterlist *sg, *src, *dst;
+
+ nents = index + 1;
+ ret = sg_alloc_table(&hwreq->sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ sg = src = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ rest = hwreq->req.length;
+ dst = hwreq->sgt.sgl;
+
+ for (i = 0; i < index; i++) {
+ memcpy(dst, src, sizeof(*src));
+ rest -= src->length;
+ src = sg_next(src);
+ dst = sg_next(dst);
+ }
+
+ /* create one bounce buffer */
+ rounded = round_up(rest, CI_HDRC_PAGE_SIZE);
+ buf = kmalloc(rounded, GFP_KERNEL);
+ if (!buf) {
+ sg_free_table(&hwreq->sgt);
+ return -ENOMEM;
+ }
+
+ sg_set_buf(dst, buf, rounded);
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ if (copy)
+ sg_copy_to_buffer(src, num_sgs - index, buf, rest);
+
+ *bounced = rest;
+
+ return 0;
+}
+
+static void sglist_do_debounce(struct ci_hw_req *hwreq, bool copy)
+{
+ void *buf;
+ int i, nents, num_sgs;
+ struct scatterlist *sg, *src, *dst;
+
+ sg = hwreq->req.sg;
+ num_sgs = hwreq->req.num_sgs;
+ src = sg_last(sg, num_sgs);
+ buf = sg_virt(src);
+
+ if (copy) {
+ dst = hwreq->sgt.sgl;
+ for (i = 0; i < num_sgs - 1; i++)
+ dst = sg_next(dst);
+
+ nents = hwreq->sgt.nents - num_sgs + 1;
+ sg_copy_from_buffer(dst, nents, buf, sg_dma_len(src));
+ }
+
+ hwreq->req.sg = hwreq->sgt.sgl;
+ hwreq->req.num_sgs = hwreq->sgt.nents;
+ hwreq->sgt.sgl = sg;
+ hwreq->sgt.nents = num_sgs;
+
+ kfree(buf);
+ sg_free_table(&hwreq->sgt);
+}
+
/**
* _hardware_enqueue: configures a request at hardware level
* @hwep: endpoint
@@ -552,6 +673,8 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
struct ci_hdrc *ci = hwep->ci;
int ret = 0;
struct td_node *firstnode, *lastnode;
+ unsigned int bounced_size;
+ struct scatterlist *sg;
/* don't queue twice */
if (hwreq->req.status == -EALREADY)
@@ -559,11 +682,29 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
hwreq->req.status = -EALREADY;
+ if (hwreq->req.num_sgs && hwreq->req.length &&
+ ci->has_short_pkt_limit) {
+ ret = sglist_get_invalid_entry(ci->dev->parent, hwep->dir,
+ &hwreq->req);
+ if (ret < hwreq->req.num_sgs) {
+ ret = sglist_do_bounce(hwreq, ret, hwep->dir == TX,
+ &bounced_size);
+ if (ret)
+ return ret;
+ }
+ }
+
ret = usb_gadget_map_request_by_dev(ci->dev->parent,
&hwreq->req, hwep->dir);
if (ret)
return ret;
+ if (hwreq->sgt.sgl) {
+ /* We've mapped a bigger buffer, now recover the actual size */
+ sg = sg_last(hwreq->req.sg, hwreq->req.num_sgs);
+ sg_dma_len(sg) = min(sg_dma_len(sg), bounced_size);
+ }
+
if (hwreq->req.num_mapped_sgs)
ret = prepare_td_for_sg(hwep, hwreq);
else
@@ -733,6 +874,10 @@ static int _hardware_dequeue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
usb_gadget_unmap_request_by_dev(hwep->ci->dev->parent,
&hwreq->req, hwep->dir);
+ /* sglist bounced */
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, hwep->dir == RX);
+
hwreq->req.actual += actual;
if (hwreq->req.status)
@@ -1580,6 +1725,9 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir);
+ if (hwreq->sgt.sgl)
+ sglist_do_debounce(hwreq, false);
+
req->status = -ECONNRESET;
if (hwreq->req.complete != NULL) {