f_mtp:Increase the MTP buffer request buffer size
authora17671 <a17671@motorola.com>
Sat, 2 Feb 2019 02:33:27 +0000 (10:33 +0800)
committerlingsen1 <lingsen1@lenovo.com>
Sun, 7 Feb 2021 09:36:56 +0000 (17:36 +0800)
Increase the MTP buffer request size
To improve the throughput rate

Change-Id: Ia022787f45c1951fc183d3b535bb8df59ea09619
Signed-off-by: a17671 <a17671@motorola.com>
Reviewed-on: https://gerrit.mot.com/1304532
SLTApproved: Slta Waiver
SME-Granted: SME Approvals Granted
Tested-by: Jira Key
Reviewed-by: Wei Deng <dengwei1@motorola.com>
Reviewed-by: Xiangpo Zhao <zhaoxp3@motorola.com>
Submit-Approved: Jira Key

drivers/usb/gadget/function/f_mtp.c

index 01e6153c54d39ace52e3b4740082bef780b08455..d605b3262030bfaf1694a6080d2978cf8be431dc 100644 (file)
@@ -40,6 +40,8 @@
 \r
 #include "configfs.h"\r
 \r
+#define MTP_RX_BUFFER_INIT_SIZE    1048576\r
+#define MTP_TX_BUFFER_INIT_SIZE    1048576\r
 #define MTP_BULK_BUFFER_SIZE       16384\r
 #define INTR_BUFFER_SIZE           28\r
 #define MAX_INST_NAME_LEN          40\r
@@ -56,7 +58,7 @@
 #define STATE_ERROR                 4   /* error from completion routine */\r
 \r
 /* number of tx and rx requests to allocate */\r
-#define TX_REQ_MAX 4\r
+#define MTP_TX_REQ_MAX 8\r
 #define RX_REQ_MAX 2\r
 #define INTR_REQ_MAX 5\r
 \r
 #if IS_ENABLED(CONFIG_USB_CONFIGFS_UEVENT)\r
 #define DRIVER_NAME_PTP "ptp"\r
 #endif\r
+#define MAX_ITERATION          100\r
+\r
+unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;\r
+module_param(mtp_rx_req_len, uint, 0644);\r
+\r
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;\r
+module_param(mtp_tx_req_len, uint, 0644);\r
+\r
+unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;\r
+module_param(mtp_tx_reqs, uint, 0644);\r
 \r
 static const char mtp_shortname[] = DRIVER_NAME "_usb";\r
 \r
@@ -120,6 +132,10 @@ struct mtp_dev {
        uint16_t xfer_command;\r
        uint32_t xfer_transaction_id;\r
        int xfer_result;\r
+       unsigned int mtp_rx_req_len;\r
+       unsigned int mtp_tx_req_len;\r
+       unsigned int mtp_tx_reqs;\r
+       struct mutex  read_mutex;\r
 };\r
 \r
 static struct usb_interface_descriptor mtp_interface_desc = {\r
@@ -453,7 +469,7 @@ static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
 {\r
        struct mtp_dev *dev = _mtp_dev;\r
 \r
-       if (req->status != 0)\r
+       if (req->status != 0 && dev->state != STATE_OFFLINE)\r
                dev->state = STATE_ERROR;\r
 \r
        mtp_req_put(dev, &dev->tx_idle, req);\r
@@ -466,7 +482,7 @@ static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
        struct mtp_dev *dev = _mtp_dev;\r
 \r
        dev->rx_done = 1;\r
-       if (req->status != 0)\r
+       if (req->status != 0 && dev->state != STATE_OFFLINE)\r
                dev->state = STATE_ERROR;\r
 \r
        wake_up(&dev->read_wq);\r
@@ -476,7 +492,7 @@ static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
 {\r
        struct mtp_dev *dev = _mtp_dev;\r
 \r
-       if (req->status != 0)\r
+       if (req->status != 0 && dev->state != STATE_OFFLINE)\r
                dev->state = STATE_ERROR;\r
 \r
        mtp_req_put(dev, &dev->intr_idle, req);\r
@@ -523,18 +539,44 @@ static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
        ep->driver_data = dev;          /* claim the endpoint */\r
        dev->ep_intr = ep;\r
 \r
+retry_tx_alloc:\r
        /* now allocate requests for our endpoints */\r
-       for (i = 0; i < TX_REQ_MAX; i++) {\r
-               req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);\r
-               if (!req)\r
-                       goto fail;\r
+       for (i = 0; i < dev->mtp_tx_reqs; i++) {\r
+               req = mtp_request_new(dev->ep_in,\r
+                               dev->mtp_tx_req_len);\r
+               if (!req) {\r
+                       if (dev->mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)\r
+                               goto fail;\r
+                       while ((req = mtp_req_get(dev, &dev->tx_idle)))\r
+                               mtp_request_free(req, dev->ep_in);\r
+                       dev->mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;\r
+                       dev->mtp_tx_reqs = MTP_TX_REQ_MAX;\r
+                       goto retry_tx_alloc;\r
+               }\r
                req->complete = mtp_complete_in;\r
                mtp_req_put(dev, &dev->tx_idle, req);\r
        }\r
+\r
+       /*\r
+        * The RX buffer should be aligned to EP max packet for\r
+        * some controllers.  At bind time, we don't know the\r
+        * operational speed.  Hence assuming super speed max\r
+        * packet size.\r
+        */\r
+       if (dev->mtp_rx_req_len % 1024)\r
+               dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;\r
+\r
+retry_rx_alloc:\r
        for (i = 0; i < RX_REQ_MAX; i++) {\r
-               req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);\r
-               if (!req)\r
-                       goto fail;\r
+               req = mtp_request_new(dev->ep_out, dev->mtp_rx_req_len);\r
+               if (!req) {\r
+                       if (dev->mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)\r
+                               goto fail;\r
+                       for (--i; i >= 0; i--)\r
+                               mtp_request_free(dev->rx_req[i], dev->ep_out);\r
+                       dev->mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;\r
+                       goto retry_rx_alloc;\r
+               }\r
                req->complete = mtp_complete_out;\r
                dev->rx_req[i] = req;\r
        }\r
@@ -579,6 +621,11 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
                r = ret;\r
                goto done;\r
        }\r
+\r
+       len = ALIGN(count, dev->ep_out->maxpacket);\r
+       if (len > dev->mtp_rx_req_len)\r
+               return -EINVAL;\r
+\r
        spin_lock_irq(&dev->lock);\r
        if (dev->state == STATE_OFFLINE) {\r
                spin_unlock_irq(&dev->lock);\r
@@ -607,12 +654,19 @@ static ssize_t mtp_read(struct file *fp, char __user *buf,
        dev->state = STATE_BUSY;\r
        spin_unlock_irq(&dev->lock);\r
 \r
+       mutex_lock(&dev->read_mutex);\r
+       if (dev->state == STATE_OFFLINE) {\r
+               r = -EIO;\r
+               mutex_unlock(&dev->read_mutex);\r
+               goto done;\r
+       }\r
 requeue_req:\r
        /* queue a request */\r
        req = dev->rx_req[0];\r
        req->length = len;\r
        dev->rx_done = 0;\r
        set_read_req_length(req);\r
+       mutex_unlock(&dev->read_mutex);\r
        ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);\r
        if (ret < 0) {\r
                r = -EIO;\r
@@ -622,12 +676,23 @@ requeue_req:
        }\r
 \r
        /* wait for a request to complete */\r
-       ret = wait_event_interruptible(dev->read_wq, dev->rx_done);\r
+       ret = wait_event_interruptible(dev->read_wq,\r
+                               dev->rx_done || dev->state != STATE_BUSY);\r
+       if (dev->state == STATE_CANCELED) {\r
+               r = -ECANCELED;\r
+               if (!dev->rx_done)\r
+                       usb_ep_dequeue(dev->ep_out, req);\r
+               spin_lock_irq(&dev->lock);\r
+               dev->state = STATE_CANCELED;\r
+               spin_unlock_irq(&dev->lock);\r
+               goto done;\r
+       }\r
        if (ret < 0) {\r
                r = ret;\r
                usb_ep_dequeue(dev->ep_out, req);\r
                goto done;\r
        }\r
+       mutex_lock(&dev->read_mutex);\r
        if (dev->state == STATE_BUSY) {\r
                /* If we got a 0-len packet, throw it back and try again. */\r
                if (req->actual == 0)\r
@@ -641,6 +706,7 @@ requeue_req:
        } else\r
                r = -EIO;\r
 \r
+       mutex_unlock(&dev->read_mutex);\r
 done:\r
        spin_lock_irq(&dev->lock);\r
        if (dev->state == STATE_CANCELED)\r
@@ -712,8 +778,8 @@ static ssize_t mtp_write(struct file *fp, const char __user *buf,
                        break;\r
                }\r
 \r
-               if (count > MTP_BULK_BUFFER_SIZE)\r
-                       xfer = MTP_BULK_BUFFER_SIZE;\r
+               if (count > dev->mtp_tx_req_len)\r
+                       xfer = dev->mtp_tx_req_len;\r
                else\r
                        xfer = count;\r
                if (xfer && copy_from_user(req->buf, buf, xfer)) {\r
@@ -771,6 +837,11 @@ static void send_file_work(struct work_struct *data)
        offset = dev->xfer_file_offset;\r
        count = dev->xfer_file_length;\r
 \r
+       if (count < 0) {\r
+               dev->xfer_result = -EINVAL;\r
+               return;\r
+       }\r
+\r
        DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);\r
 \r
        if (dev->xfer_send_header) {\r
@@ -805,8 +876,8 @@ static void send_file_work(struct work_struct *data)
                        break;\r
                }\r
 \r
-               if (count > MTP_BULK_BUFFER_SIZE)\r
-                       xfer = MTP_BULK_BUFFER_SIZE;\r
+               if (count > dev->mtp_tx_req_len)\r
+                       xfer = dev->mtp_tx_req_len;\r
                else\r
                        xfer = count;\r
 \r
@@ -838,7 +909,8 @@ static void send_file_work(struct work_struct *data)
                ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);\r
                if (ret < 0) {\r
                        DBG(cdev, "send_file_work: xfer error %d\n", ret);\r
-                       dev->state = STATE_ERROR;\r
+                       if (dev->state != STATE_OFFLINE)\r
+                               dev->state = STATE_ERROR;\r
                        r = -EIO;\r
                        break;\r
                }\r
@@ -877,37 +949,58 @@ static void receive_file_work(struct work_struct *data)
        offset = dev->xfer_file_offset;\r
        count = dev->xfer_file_length;\r
 \r
+       if (count < 0) {\r
+               dev->xfer_result = -EINVAL;\r
+               return;\r
+       }\r
+\r
        DBG(cdev, "receive_file_work(%lld)\n", count);\r
 \r
        while (count > 0 || write_req) {\r
                if (count > 0) {\r
+                       mutex_lock(&dev->read_mutex);\r
+                       if (dev->state == STATE_OFFLINE) {\r
+                               r = -EIO;\r
+                               mutex_unlock(&dev->read_mutex);\r
+                               break;\r
+                       }\r
                        /* queue a request */\r
                        read_req = dev->rx_req[cur_buf];\r
                        cur_buf = (cur_buf + 1) % RX_REQ_MAX;\r
 \r
-                       read_req->length = (count > MTP_BULK_BUFFER_SIZE\r
-                                       ? MTP_BULK_BUFFER_SIZE : count);\r
-                       dev->rx_done = 0;\r
+                       /* some h/w expects size to be aligned to ep's MTU */\r
+                       read_req->length = dev->mtp_rx_req_len;\r
 \r
-                       set_read_req_length(read_req);\r
+                       dev->rx_done = 0;\r
+                       mutex_unlock(&dev->read_mutex);\r
                        ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);\r
                        if (ret < 0) {\r
                                r = -EIO;\r
-                               dev->state = STATE_ERROR;\r
+                               if (dev->state != STATE_OFFLINE)\r
+                                       dev->state = STATE_ERROR;\r
                                break;\r
                        }\r
                }\r
 \r
                if (write_req) {\r
-                       DBG(cdev, "rx %p %d\n", write_req, write_req->actual);\r
+                       DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);\r
+                       mutex_lock(&dev->read_mutex);\r
+                       if (dev->state == STATE_OFFLINE) {\r
+                               r = -EIO;\r
+                               mutex_unlock(&dev->read_mutex);\r
+                               break;\r
+                       }\r
                        ret = vfs_write(filp, write_req->buf, write_req->actual,\r
                                &offset);\r
                        DBG(cdev, "vfs_write %d\n", ret);\r
                        if (ret != write_req->actual) {\r
                                r = -EIO;\r
-                               dev->state = STATE_ERROR;\r
+                               mutex_unlock(&dev->read_mutex);\r
+                               if (dev->state != STATE_OFFLINE)\r
+                                       dev->state = STATE_ERROR;\r
                                break;\r
                        }\r
+                       mutex_unlock(&dev->read_mutex);\r
                        write_req = NULL;\r
                }\r
 \r
@@ -915,8 +1008,12 @@ static void receive_file_work(struct work_struct *data)
                        /* wait for our last read to complete */\r
                        ret = wait_event_interruptible(dev->read_wq,\r
                                dev->rx_done || dev->state != STATE_BUSY);\r
-                       if (dev->state == STATE_CANCELED) {\r
-                               r = -ECANCELED;\r
+                       if (dev->state == STATE_CANCELED\r
+                                       || dev->state == STATE_OFFLINE) {\r
+                               if (dev->state == STATE_OFFLINE)\r
+                                       r = -EIO;\r
+                               else\r
+                                       r = -ECANCELED;\r
                                if (!dev->rx_done)\r
                                        usb_ep_dequeue(dev->ep_out, read_req);\r
                                break;\r
@@ -925,6 +1022,17 @@ static void receive_file_work(struct work_struct *data)
                                r = read_req->status;\r
                                break;\r
                        }\r
+\r
+                       mutex_lock(&dev->read_mutex);\r
+                       if (dev->state == STATE_OFFLINE) {\r
+                               r = -EIO;\r
+                               mutex_unlock(&dev->read_mutex);\r
+                               break;\r
+                       }\r
+                       /* Check if we aligned the size due to MTU constraint */\r
+                       if (count < read_req->length)\r
+                               read_req->actual = (read_req->actual > count ?\r
+                                               count : read_req->actual);\r
                        /* if xfer_file_length is 0xFFFFFFFF, then we read until\r
                         * we get a zero length packet\r
                         */\r
@@ -941,6 +1049,7 @@ static void receive_file_work(struct work_struct *data)
 \r
                        write_req = read_req;\r
                        read_req = NULL;\r
+                       mutex_unlock(&dev->read_mutex);\r
                }\r
        }\r
 \r
@@ -1157,6 +1266,7 @@ static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
                        value = (w_length < sizeof(mtp_ext_config_desc) ?\r
                                        w_length : sizeof(mtp_ext_config_desc));\r
                        memcpy(cdev->req->buf, &mtp_ext_config_desc, value);\r
+\r
                }\r
        } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {\r
                DBG(cdev, "class request: %d index: %d value: %d length: %d\n",\r
@@ -1227,6 +1337,10 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
        dev->cdev = cdev;\r
        DBG(cdev, "mtp_function_bind dev: %p\n", dev);\r
 \r
+\r
+       dev->mtp_rx_req_len = mtp_rx_req_len;\r
+       dev->mtp_tx_req_len = mtp_tx_req_len;\r
+       dev->mtp_tx_reqs = mtp_tx_reqs;\r
        /* allocate interface ID(s) */\r
        id = usb_interface_id(c, f);\r
        if (id < 0)\r
@@ -1279,6 +1393,7 @@ mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
                mtp_ss_out_comp_desc.bMaxBurst = max_burst;\r
        }\r
 \r
+       fi_mtp->func_inst.f = &dev->function;\r
        DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",\r
                gadget_is_superspeed(c->cdev->gadget) ? "super" :\r
                (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),\r
@@ -1290,19 +1405,27 @@ static void
 mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)\r
 {\r
        struct mtp_dev  *dev = func_to_mtp(f);\r
+       struct mtp_instance *fi_mtp;\r
        struct usb_request *req;\r
        int i;\r
+       fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);\r
 \r
        mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;\r
+       mutex_lock(&dev->read_mutex);\r
        while ((req = mtp_req_get(dev, &dev->tx_idle)))\r
                mtp_request_free(req, dev->ep_in);\r
        for (i = 0; i < RX_REQ_MAX; i++)\r
                mtp_request_free(dev->rx_req[i], dev->ep_out);\r
        while ((req = mtp_req_get(dev, &dev->intr_idle)))\r
                mtp_request_free(req, dev->ep_intr);\r
+       mutex_unlock(&dev->read_mutex);\r
+       spin_lock_irq(&dev->lock);\r
        dev->state = STATE_OFFLINE;\r
+       dev->cdev = NULL;\r
+       spin_unlock_irq(&dev->lock);\r
        kfree(f->os_desc_table);\r
        f->os_desc_n = 0;\r
+       fi_mtp->func_inst.f = NULL;\r
 }\r
 \r
 static int mtp_function_set_alt(struct usb_function *f,\r
@@ -1355,7 +1478,9 @@ static void mtp_function_disable(struct usb_function *f)
        struct usb_composite_dev        *cdev = dev->cdev;\r
 \r
        DBG(cdev, "mtp_function_disable\n");\r
+       spin_lock_irq(&dev->lock);\r
        dev->state = STATE_OFFLINE;\r
+       spin_unlock_irq(&dev->lock);\r
        usb_ep_disable(dev->ep_in);\r
        usb_ep_disable(dev->ep_out);\r
        usb_ep_disable(dev->ep_intr);\r
@@ -1523,6 +1648,8 @@ struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
        usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,\r
                                        descs, names, THIS_MODULE);\r
 \r
+       mutex_init(&fi_mtp->dev->read_mutex);\r
+\r
        return  &fi_mtp->func_inst;\r
 }\r
 EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);\r