dcc2ce2438
Nokia only provides .zip files without any git history, so I had to download the kernel git repo for 3.18.79 and then apply all downstream changes as one singular commit to preserve at least some of the history. My previous PC had big problems with creating patch files for symlinks so I added some commits that should have been in patchfiles instead. This merge request fixes that, leaving in repo just 3.18.79+downstream patches and all my patches as patchfiles inside aports. [ci:skip-build]: already built successfully in CI
2028 lines
No EOL
61 KiB
Diff
2028 lines
No EOL
61 KiB
Diff
This patch fixes ethernet gadget/rndis support so the phone is detected as usb device
|
|
and can assign ip to the PC, as well as allow for ssh connections through usb
|
|
---
|
|
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
|
|
index 4b7e1593c787..f13fc6a58565 100644
|
|
--- a/drivers/usb/gadget/function/f_rndis.c
|
|
+++ b/drivers/usb/gadget/function/f_rndis.c
|
|
@@ -70,22 +70,6 @@
|
|
* - MS-Windows drivers sometimes emit undocumented requests.
|
|
*/
|
|
|
|
-static unsigned int rndis_dl_max_pkt_per_xfer = 10;
|
|
-module_param(rndis_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
|
|
-MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
|
|
- "Maximum packets per transfer for DL aggregation");
|
|
-
|
|
-static unsigned int rndis_ul_max_pkt_per_xfer = 1;
|
|
-module_param(rndis_ul_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
|
|
-MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer,
|
|
- "Maximum packets per transfer for UL aggregation");
|
|
-
|
|
-static unsigned int f_rndis_debug;
|
|
-module_param(f_rndis_debug, uint, S_IRUGO | S_IWUSR);
|
|
-MODULE_PARM_DESC(f_rndis_debug,
|
|
- "f_rndis debug flag");
|
|
-#define F_RNDIS_DBG(fmt, args...) pr_debug("F_RNDIS,%s, " fmt, __func__, ## args)
|
|
-
|
|
struct f_rndis {
|
|
struct gether port;
|
|
u8 ctrl_id, data_id;
|
|
@@ -389,35 +373,13 @@ static struct sk_buff *rndis_add_header(struct gether *port,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct sk_buff *skb2;
|
|
- struct rndis_packet_msg_type *header = NULL;
|
|
- struct f_rndis *rndis = func_to_rndis(&port->func);
|
|
-
|
|
- if (rndis->port.multi_pkt_xfer) {
|
|
- if (port->header) {
|
|
- header = port->header;
|
|
- memset(header, 0, sizeof(*header));
|
|
- header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
|
|
- header->MessageLength = cpu_to_le32(skb->len +
|
|
- sizeof(*header));
|
|
- header->DataOffset = cpu_to_le32(36);
|
|
- header->DataLength = cpu_to_le32(skb->len);
|
|
- pr_debug("MessageLength:%d DataLength:%d\n",
|
|
- header->MessageLength,
|
|
- header->DataLength);
|
|
- return skb;
|
|
- }
|
|
- pr_err("RNDIS header is NULL.\n");
|
|
- return NULL;
|
|
|
|
- } else {
|
|
- skb2 = skb_realloc_headroom(skb,
|
|
- sizeof(struct rndis_packet_msg_type));
|
|
- if (skb2)
|
|
- rndis_add_hdr(skb2);
|
|
+ skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
|
|
+ if (skb2)
|
|
+ rndis_add_hdr(skb2);
|
|
|
|
- dev_kfree_skb(skb);
|
|
- return skb2;
|
|
- }
|
|
+ dev_kfree_skb(skb);
|
|
+ return skb2;
|
|
}
|
|
|
|
static void rndis_response_available(void *_rndis)
|
|
@@ -449,14 +411,9 @@ static void rndis_response_available(void *_rndis)
|
|
static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
struct f_rndis *rndis = req->context;
|
|
- struct usb_composite_dev *cdev;
|
|
+ struct usb_composite_dev *cdev = rndis->port.func.config->cdev;
|
|
int status = req->status;
|
|
|
|
- if (!rndis->port.func.config || !rndis->port.func.config->cdev)
|
|
- return;
|
|
-
|
|
- cdev = rndis->port.func.config->cdev;
|
|
-
|
|
/* after TX:
|
|
* - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
|
|
* - RNDIS_RESPONSE_AVAILABLE (status/irq)
|
|
@@ -493,14 +450,7 @@ static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
|
|
static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
struct f_rndis *rndis = req->context;
|
|
- struct usb_composite_dev *cdev;
|
|
int status;
|
|
- rndis_init_msg_type *buf;
|
|
-
|
|
- if (!rndis->port.func.config || !rndis->port.func.config->cdev)
|
|
- return;
|
|
-
|
|
- cdev = rndis->port.func.config->cdev;
|
|
|
|
/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
|
|
// spin_lock(&dev->lock);
|
|
@@ -508,24 +458,6 @@ static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
|
|
if (status < 0)
|
|
pr_err("RNDIS command error %d, %d/%d\n",
|
|
status, req->actual, req->length);
|
|
-
|
|
- buf = (rndis_init_msg_type *)req->buf;
|
|
-
|
|
- if (buf->MessageType == RNDIS_MSG_INIT) {
|
|
- if (buf->MaxTransferSize > 2048) {
|
|
- rndis->port.multi_pkt_xfer = 1;
|
|
- rndis->port.dl_max_transfer_len = buf->MaxTransferSize;
|
|
- gether_update_dl_max_xfer_size(&rndis->port,
|
|
- rndis->port.dl_max_transfer_len);
|
|
- } else
|
|
- rndis->port.multi_pkt_xfer = 0;
|
|
- pr_info("%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
|
|
- __func__, buf->MaxTransferSize,
|
|
- rndis->port.multi_pkt_xfer ? "enabled" :
|
|
- "disabled");
|
|
- if (rndis_dl_max_pkt_per_xfer <= 1)
|
|
- rndis->port.multi_pkt_xfer = 0;
|
|
- }
|
|
// spin_unlock(&dev->lock);
|
|
}
|
|
|
|
@@ -543,9 +475,6 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
|
|
/* composite driver infrastructure handles everything except
|
|
* CDC class messages; interface activation uses set_alt().
|
|
*/
|
|
- if (f_rndis_debug)
|
|
- F_RNDIS_DBG("ctrl->bRequestType:0x%x, ctrl->bRequest:0x%x, w_index:0x%x, w_value:0x%x\n",
|
|
- ctrl->bRequestType , ctrl->bRequest, w_index, w_value);
|
|
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
|
|
|
|
/* RNDIS uses the CDC command encapsulation mechanism to implement
|
|
@@ -569,8 +498,6 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
|
|
else {
|
|
u8 *buf;
|
|
u32 n;
|
|
- u32 MsgType, MsgLength, MsgID;
|
|
- __le32 *tmp;
|
|
|
|
/* return the result */
|
|
buf = rndis_get_next_response(rndis->config, &n);
|
|
@@ -578,19 +505,8 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
|
|
memcpy(req->buf, buf, n);
|
|
req->complete = rndis_response_complete;
|
|
req->context = rndis;
|
|
-
|
|
- tmp = (__le32 *)buf;
|
|
- MsgType = get_unaligned_le32(tmp++);
|
|
- MsgLength = get_unaligned_le32(tmp++);
|
|
- MsgID = get_unaligned_le32(tmp++);
|
|
-
|
|
rndis_free_response(rndis->config, buf);
|
|
value = n;
|
|
-
|
|
- if (f_rndis_debug)
|
|
- F_RNDIS_DBG("response MsgLength %d, msg type:0x%x, RequestID:0x%x\n",
|
|
- MsgLength, MsgType, MsgID);
|
|
- rndis_test_last_resp_id = MsgID;
|
|
}
|
|
/* else stalls ... spec says to avoid that */
|
|
}
|
|
@@ -598,10 +514,6 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
|
|
|
|
default:
|
|
invalid:
|
|
- F_RNDIS_DBG("invalid control req%02x.%02x v%04x i%04x l%d\n",
|
|
- ctrl->bRequestType, ctrl->bRequest,
|
|
- w_value, w_index, w_length);
|
|
-
|
|
VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
|
|
ctrl->bRequestType, ctrl->bRequest,
|
|
w_value, w_index, w_length);
|
|
@@ -630,18 +542,13 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
|
struct usb_composite_dev *cdev = f->config->cdev;
|
|
|
|
/* we know alt == 0 */
|
|
- F_RNDIS_DBG("interface :%d, rndis ctrl id :%d, rndis data id :%d\n" ,
|
|
- intf, rndis->ctrl_id, rndis->data_id);
|
|
-
|
|
|
|
if (intf == rndis->ctrl_id) {
|
|
if (rndis->notify->driver_data) {
|
|
- F_RNDIS_DBG("reset rndis control %d\n", intf);
|
|
VDBG(cdev, "reset rndis control %d\n", intf);
|
|
usb_ep_disable(rndis->notify);
|
|
}
|
|
if (!rndis->notify->desc) {
|
|
- F_RNDIS_DBG("init rndis ctrl %d\n", intf);
|
|
VDBG(cdev, "init rndis ctrl %d\n", intf);
|
|
if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
|
|
goto fail;
|
|
@@ -653,13 +560,11 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
|
struct net_device *net;
|
|
|
|
if (rndis->port.in_ep->driver_data) {
|
|
- F_RNDIS_DBG("reset rndis\n");
|
|
DBG(cdev, "reset rndis\n");
|
|
gether_disconnect(&rndis->port);
|
|
}
|
|
|
|
if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
|
|
- F_RNDIS_DBG("init rndis\n");
|
|
DBG(cdev, "init rndis\n");
|
|
if (config_ep_by_speed(cdev->gadget, f,
|
|
rndis->port.in_ep) ||
|
|
@@ -688,7 +593,7 @@ static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
|
|
*/
|
|
rndis->port.cdc_filter = 0;
|
|
|
|
- DBG(cdev, "RNDIS RX/TX early activation ...\n");
|
|
+ DBG(cdev, "RNDIS RX/TX early activation ... \n");
|
|
net = gether_connect(&rndis->port);
|
|
if (IS_ERR(net))
|
|
return PTR_ERR(net);
|
|
@@ -711,7 +616,6 @@ static void rndis_disable(struct usb_function *f)
|
|
if (!rndis->notify->driver_data)
|
|
return;
|
|
|
|
- F_RNDIS_DBG("\n");
|
|
DBG(cdev, "rndis deactivated\n");
|
|
|
|
rndis_uninit(rndis->config);
|
|
@@ -735,7 +639,6 @@ static void rndis_open(struct gether *geth)
|
|
struct f_rndis *rndis = func_to_rndis(&geth->func);
|
|
struct usb_composite_dev *cdev = geth->func.config->cdev;
|
|
|
|
- F_RNDIS_DBG("\n");
|
|
DBG(cdev, "%s\n", __func__);
|
|
|
|
rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
|
|
@@ -747,7 +650,6 @@ static void rndis_close(struct gether *geth)
|
|
{
|
|
struct f_rndis *rndis = func_to_rndis(&geth->func);
|
|
|
|
- F_RNDIS_DBG("\n");
|
|
DBG(geth->func.config->cdev, "%s\n", __func__);
|
|
|
|
rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
|
|
@@ -776,8 +678,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
|
|
|
|
struct f_rndis_opts *rndis_opts;
|
|
|
|
- F_RNDIS_DBG("\n");
|
|
-
|
|
if (!can_support_rndis(c))
|
|
return -EINVAL;
|
|
|
|
@@ -799,9 +699,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
|
|
* with list_for_each_entry, so we assume no race condition
|
|
* with regard to rndis_opts->bound access
|
|
*/
|
|
-
|
|
- /* if (!rndis_opts->bound) { */
|
|
- if (rndis_opts && !rndis_opts->bound) {
|
|
+ if (!rndis_opts->bound) {
|
|
gether_set_gadget(rndis_opts->net, cdev->gadget);
|
|
status = gether_register_netdev(rndis_opts->net);
|
|
if (status)
|
|
@@ -872,11 +770,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
|
|
rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
|
|
if (!rndis->notify_req)
|
|
goto fail;
|
|
-#if defined(CONFIG_64BIT) && defined(CONFIG_MTK_LM_MODE)
|
|
- rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL | GFP_DMA);
|
|
-#else
|
|
rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
|
|
-#endif
|
|
if (!rndis->notify_req->buf)
|
|
goto fail;
|
|
rndis->notify_req->length = STATUS_BYTECOUNT;
|
|
@@ -905,7 +799,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
|
|
|
|
rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
|
|
rndis_set_host_mac(rndis->config, rndis->ethaddr);
|
|
- rndis_set_max_pkt_xfer(rndis->config, rndis_ul_max_pkt_per_xfer);
|
|
|
|
if (rndis->manufacturer && rndis->vendorID &&
|
|
rndis_set_param_vendor(rndis->config, rndis->vendorID,
|
|
@@ -919,11 +812,6 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
|
|
* until we're activated via set_alt().
|
|
*/
|
|
|
|
- F_RNDIS_DBG("RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
|
|
- gadget_is_superspeed(c->cdev->gadget) ? "super" :
|
|
- gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
|
|
- rndis->port.in_ep->name, rndis->port.out_ep->name,
|
|
- rndis->notify->name);
|
|
DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
|
|
gadget_is_superspeed(c->cdev->gadget) ? "super" :
|
|
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
|
|
@@ -955,76 +843,6 @@ fail:
|
|
return status;
|
|
}
|
|
|
|
-static void
|
|
-rndis_old_unbind(struct usb_configuration *c, struct usb_function *f)
|
|
-{
|
|
- struct f_rndis *rndis = func_to_rndis(f);
|
|
-
|
|
- F_RNDIS_DBG("\n");
|
|
-
|
|
- rndis_deregister(rndis->config);
|
|
-
|
|
- usb_free_all_descriptors(f);
|
|
-
|
|
- kfree(rndis->notify_req->buf);
|
|
- usb_ep_free_request(rndis->notify, rndis->notify_req);
|
|
-
|
|
- kfree(rndis);
|
|
-}
|
|
-
|
|
-int
|
|
-rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
|
|
- u32 vendorID, const char *manufacturer, struct eth_dev *dev)
|
|
-{
|
|
- struct f_rndis *rndis;
|
|
- int status;
|
|
-
|
|
- /* allocate and initialize one new instance */
|
|
- status = -ENOMEM;
|
|
- rndis = kzalloc(sizeof(*rndis), GFP_KERNEL);
|
|
- if (!rndis)
|
|
- goto fail;
|
|
-
|
|
- ether_addr_copy(rndis->ethaddr, ethaddr);
|
|
- rndis->vendorID = vendorID;
|
|
- rndis->manufacturer = manufacturer;
|
|
-
|
|
- rndis->port.ioport = dev;
|
|
- /* RNDIS activates when the host changes this filter */
|
|
- rndis->port.cdc_filter = 0;
|
|
-
|
|
- /* RNDIS has special (and complex) framing */
|
|
- rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
|
|
- rndis->port.wrap = rndis_add_header;
|
|
- rndis->port.unwrap = rndis_rm_hdr;
|
|
- rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
|
|
- rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
|
|
-
|
|
- rndis->port.func.name = "rndis";
|
|
- /* descriptors are per-instance copies */
|
|
- rndis->port.func.bind = rndis_bind;
|
|
- /* note here use rndis_old_unbind */
|
|
- rndis->port.func.unbind = rndis_old_unbind;
|
|
- rndis->port.func.set_alt = rndis_set_alt;
|
|
- rndis->port.func.setup = rndis_setup;
|
|
- rndis->port.func.disable = rndis_disable;
|
|
-
|
|
- status = rndis_register(rndis_response_available, rndis);
|
|
- if (status < 0) {
|
|
- kfree(rndis);
|
|
- return status;
|
|
- }
|
|
- rndis->config = status;
|
|
-
|
|
- status = usb_add_function(c, &rndis->port.func);
|
|
- if (status)
|
|
- kfree(rndis);
|
|
-fail:
|
|
-
|
|
- F_RNDIS_DBG("done, status %d\n", status);
|
|
- return status;
|
|
-}
|
|
-
|
|
void rndis_borrow_net(struct usb_function_instance *f, struct net_device *net)
|
|
{
|
|
struct f_rndis_opts *opts;
|
|
@@ -1139,7 +957,6 @@ static void rndis_unbind(struct usb_configuration *c, struct usb_function *f)
|
|
{
|
|
struct f_rndis *rndis = func_to_rndis(f);
|
|
|
|
- F_RNDIS_DBG("\n");
|
|
kfree(f->os_desc_table);
|
|
f->os_desc_n = 0;
|
|
usb_free_all_descriptors(f);
|
|
@@ -1176,8 +993,6 @@ static struct usb_function *rndis_alloc(struct usb_function_instance *fi)
|
|
rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
|
|
rndis->port.wrap = rndis_add_header;
|
|
rndis->port.unwrap = rndis_rm_hdr;
|
|
- rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
|
|
- rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
|
|
|
|
rndis->port.func.name = "rndis";
|
|
/* descriptors are per-instance copies */
|
|
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
|
|
index 411ba6f934ad..95d2324f6977 100644
|
|
--- a/drivers/usb/gadget/function/rndis.c
|
|
+++ b/drivers/usb/gadget/function/rndis.c
|
|
@@ -49,7 +49,7 @@
|
|
* and will be happier if you provide the host_addr module parameter.
|
|
*/
|
|
|
|
-#if 1
|
|
+#if 0
|
|
static int rndis_debug = 0;
|
|
module_param (rndis_debug, int, 0);
|
|
MODULE_PARM_DESC (rndis_debug, "enable debugging");
|
|
@@ -59,16 +59,6 @@ MODULE_PARM_DESC (rndis_debug, "enable debugging");
|
|
|
|
#define RNDIS_MAX_CONFIGS 1
|
|
|
|
-int rndis_ul_max_pkt_per_xfer_rcvd;
|
|
-module_param(rndis_ul_max_pkt_per_xfer_rcvd, int, S_IRUGO);
|
|
-MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer_rcvd,
|
|
- "Max num of REMOTE_NDIS_PACKET_MSGs received in a single transfer");
|
|
-
|
|
-int rndis_ul_max_xfer_size_rcvd;
|
|
-module_param(rndis_ul_max_xfer_size_rcvd, int, S_IRUGO);
|
|
-MODULE_PARM_DESC(rndis_ul_max_xfer_size_rcvd,
|
|
- "Max size of bus transfer received");
|
|
-
|
|
|
|
static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
|
|
|
|
@@ -168,7 +158,6 @@ static const u32 oid_supported_list[] =
|
|
#endif /* RNDIS_PM */
|
|
};
|
|
|
|
-#define RNDIS_DBG(fmt, args...) pr_debug("RNDIS,%s, " fmt, __func__, ## args)
|
|
|
|
/* NDIS Functions */
|
|
static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
@@ -206,9 +195,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
net = rndis_per_dev_params[configNr].dev;
|
|
stats = dev_get_stats(net, &temp);
|
|
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID is 0x%x, configNr is %d\n", OID, configNr);
|
|
-
|
|
switch (OID) {
|
|
|
|
/* general oids (table 4-1) */
|
|
@@ -271,17 +257,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
else
|
|
*outbuf = cpu_to_le32(
|
|
rndis_per_dev_params[configNr].speed);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_GEN_LINK_SPEED, speed %d, data: rx_in %lu\n"
|
|
- "rx_out %lu, rx_nomem %lu, rx_error %lu, tx_in %lu\n"
|
|
- "tx_out %lu, tx_busy %lu, tx_complete %lu, last_msg_id: 0x%x\n"
|
|
- "last_resp_id: 0x%x, RNDIS reset cnt: 0x%lu, queue stopped? %d\n",
|
|
- rndis_per_dev_params[configNr].speed, rndis_test_rx_usb_in,
|
|
- rndis_test_rx_net_out, rndis_test_rx_nomem, rndis_test_rx_error,
|
|
- rndis_test_tx_net_in, rndis_test_tx_usb_out, rndis_test_tx_busy,
|
|
- rndis_test_tx_complete, rndis_test_last_msg_id,
|
|
- rndis_test_last_resp_id, rndis_test_reset_msg_cnt,
|
|
- netif_queue_stopped(rndis_per_dev_params[configNr].dev));
|
|
retval = 0;
|
|
break;
|
|
|
|
@@ -355,9 +330,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
|
|
*outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
|
|
.media_state);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_GEN_MEDIA_CONNECT_STATUS, media_state %d\n",
|
|
- rndis_per_dev_params[configNr].media_state);
|
|
retval = 0;
|
|
break;
|
|
|
|
@@ -409,8 +381,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
|
|
if (stats) {
|
|
*outbuf = cpu_to_le32(stats->tx_errors);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_GEN_XMIT_ERROR, stats->tx_errors %llu\n", stats->tx_errors);
|
|
retval = 0;
|
|
}
|
|
break;
|
|
@@ -421,8 +391,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
|
|
if (stats) {
|
|
*outbuf = cpu_to_le32(stats->rx_errors);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_GEN_RCV_ERROR, stats->rx_errors %llu\n", stats->rx_errors);
|
|
retval = 0;
|
|
}
|
|
break;
|
|
@@ -432,8 +400,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
|
|
if (stats) {
|
|
*outbuf = cpu_to_le32(stats->rx_dropped);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_GEN_RCV_NO_BUFFER, stats->rx_dropped %llu\n", stats->rx_dropped);
|
|
retval = 0;
|
|
}
|
|
break;
|
|
@@ -449,10 +415,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
rndis_per_dev_params[configNr].host_mac,
|
|
length);
|
|
retval = 0;
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_802_3_PERMANENT_ADDRESS, host_mac is %x %x ....\n",
|
|
- rndis_per_dev_params[configNr].host_mac[0],
|
|
- rndis_per_dev_params[configNr].host_mac[1]);
|
|
}
|
|
break;
|
|
|
|
@@ -464,10 +426,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
memcpy(outbuf,
|
|
rndis_per_dev_params [configNr].host_mac,
|
|
length);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_802_3_CURRENT_ADDRESS, host_mac is %x %x ....\n",
|
|
- rndis_per_dev_params[configNr].host_mac[0],
|
|
- rndis_per_dev_params[configNr].host_mac[1]);
|
|
retval = 0;
|
|
}
|
|
break;
|
|
@@ -500,9 +458,6 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
|
|
pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
|
|
if (stats) {
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID_802_3_RCV_ERROR_ALIGNMENT, stats->rx_frame_errors is %llu\n",
|
|
- stats->rx_frame_errors);
|
|
*outbuf = cpu_to_le32(stats->rx_frame_errors);
|
|
retval = 0;
|
|
}
|
|
@@ -526,10 +481,8 @@ static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
|
|
pr_warning("%s: query unknown OID 0x%08X\n",
|
|
__func__, OID);
|
|
}
|
|
- if (retval < 0) {
|
|
+ if (retval < 0)
|
|
length = 0;
|
|
- RNDIS_DBG("bad gen_ndis_query_resp, retval is 0x%d\n", retval);
|
|
- }
|
|
|
|
resp->InformationBufferLength = cpu_to_le32(length);
|
|
r->length = length + sizeof(*resp);
|
|
@@ -561,9 +514,6 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
|
|
}
|
|
}
|
|
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("OID is 0x%x\n", OID);
|
|
-
|
|
params = &rndis_per_dev_params[configNr];
|
|
switch (OID) {
|
|
case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
|
|
@@ -606,8 +556,6 @@ static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
|
|
__func__, OID, buf_len);
|
|
}
|
|
|
|
- if (retval)
|
|
- RNDIS_DBG("retval is 0x%d\n", retval);
|
|
return retval;
|
|
}
|
|
|
|
@@ -637,12 +585,12 @@ static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
|
|
resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
|
|
resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
|
|
resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
|
|
- resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
|
|
- resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
|
|
- (params->dev->mtu
|
|
+ resp->MaxPacketsPerTransfer = cpu_to_le32(1);
|
|
+ resp->MaxTransferSize = cpu_to_le32(
|
|
+ params->dev->mtu
|
|
+ sizeof(struct ethhdr)
|
|
+ sizeof(struct rndis_packet_msg_type)
|
|
- + 22));
|
|
+ + 22);
|
|
resp->PacketAlignmentFactor = cpu_to_le32(0);
|
|
resp->AFListOffset = cpu_to_le32(0);
|
|
resp->AFListSize = cpu_to_le32(0);
|
|
@@ -669,10 +617,8 @@ static int rndis_query_response(int configNr, rndis_query_msg_type *buf)
|
|
*/
|
|
r = rndis_add_response(configNr,
|
|
sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type));
|
|
- if (!r) {
|
|
- RNDIS_DBG("rndis_add_response return NULL\n");
|
|
+ if (!r)
|
|
return -ENOMEM;
|
|
- }
|
|
resp = (rndis_query_cmplt_type *)r->buf;
|
|
|
|
resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C);
|
|
@@ -703,10 +649,8 @@ static int rndis_set_response(int configNr, rndis_set_msg_type *buf)
|
|
struct rndis_params *params = rndis_per_dev_params + configNr;
|
|
|
|
r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type));
|
|
- if (!r) {
|
|
- RNDIS_DBG("rndis_add_response return NULL\n");
|
|
+ if (!r)
|
|
return -ENOMEM;
|
|
- }
|
|
resp = (rndis_set_cmplt_type *)r->buf;
|
|
|
|
BufLength = le32_to_cpu(buf->InformationBufferLength);
|
|
@@ -742,15 +686,6 @@ static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
|
|
rndis_reset_cmplt_type *resp;
|
|
rndis_resp_t *r;
|
|
struct rndis_params *params = rndis_per_dev_params + configNr;
|
|
- u32 length;
|
|
- u8 *xbuf;
|
|
-
|
|
- RNDIS_DBG("reset, clean old response\n");
|
|
- /* drain the response queue */
|
|
- while ((xbuf = rndis_get_next_response(configNr, &length)))
|
|
- rndis_free_response(configNr, xbuf);
|
|
-
|
|
- rndis_test_reset_msg_cnt++;
|
|
|
|
r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
|
|
if (!r)
|
|
@@ -777,10 +712,8 @@ static int rndis_keepalive_response(int configNr,
|
|
/* host "should" check only in RNDIS_DATA_INITIALIZED state */
|
|
|
|
r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type));
|
|
- if (!r) {
|
|
- RNDIS_DBG("rndis_add_response return NULL\n");
|
|
+ if (!r)
|
|
return -ENOMEM;
|
|
- }
|
|
resp = (rndis_keepalive_cmplt_type *)r->buf;
|
|
|
|
resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
|
|
@@ -789,17 +722,6 @@ static int rndis_keepalive_response(int configNr,
|
|
resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
|
|
|
|
params->resp_avail(params->v);
|
|
-
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("data: rx_in %lu\n"
|
|
- "rx_out %lu, rx_nomem %lu, rx_error %lu, tx_in %lu\n"
|
|
- "tx_out %lu, tx_busy %lu, tx_complete %lu, last_msg_id: 0x%x\n"
|
|
- "last_resp_id: 0x%x, RNDIS reset cnt: 0x%lu, queue stopped? %d\n",
|
|
- rndis_test_rx_usb_in, rndis_test_rx_net_out, rndis_test_rx_nomem, rndis_test_rx_error,
|
|
- rndis_test_tx_net_in, rndis_test_tx_usb_out, rndis_test_tx_busy,
|
|
- rndis_test_tx_complete, rndis_test_last_msg_id,
|
|
- rndis_test_last_resp_id,
|
|
- rndis_test_reset_msg_cnt, netif_queue_stopped(params->dev));
|
|
return 0;
|
|
}
|
|
|
|
@@ -876,7 +798,7 @@ EXPORT_SYMBOL_GPL(rndis_set_host_mac);
|
|
*/
|
|
int rndis_msg_parser(u8 configNr, u8 *buf)
|
|
{
|
|
- u32 MsgType, MsgLength, MsgID;
|
|
+ u32 MsgType, MsgLength;
|
|
__le32 *tmp;
|
|
struct rndis_params *params;
|
|
|
|
@@ -886,10 +808,6 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
|
|
tmp = (__le32 *)buf;
|
|
MsgType = get_unaligned_le32(tmp++);
|
|
MsgLength = get_unaligned_le32(tmp++);
|
|
- MsgID = get_unaligned_le32(tmp++);
|
|
- if (rndis_debug)
|
|
- RNDIS_DBG("MsgType is %d,configNr is %d, RequestID is 0x%x\n", MsgType, configNr, MsgID);
|
|
- rndis_test_last_msg_id = MsgID;
|
|
|
|
if (configNr >= RNDIS_MAX_CONFIGS)
|
|
return -ENOTSUPP;
|
|
@@ -949,27 +867,8 @@ int rndis_msg_parser(u8 configNr, u8 *buf)
|
|
*/
|
|
pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
|
|
__func__, MsgType, MsgLength);
|
|
- {
|
|
- unsigned i;
|
|
-
|
|
- for (i = 0; i < MsgLength; i += 16) {
|
|
- pr_debug("%03d\n"
|
|
- "%02x %02x %02x %02x\n"
|
|
- "%02x %02x %02x %02x\n"
|
|
- "%02x %02x %02x %02x\n"
|
|
- "%02x %02x %02x %02x\n",
|
|
- i, buf[i], buf[i+1],
|
|
- buf[i+2], buf[i+3],
|
|
- buf[i+4], buf[i+5],
|
|
- buf[i+6], buf[i+7],
|
|
- buf[i+8], buf[i+9],
|
|
- buf[i+10], buf[i+11],
|
|
- buf[i+12], buf[i+13],
|
|
- buf[i+14], buf[i+15]);
|
|
- }
|
|
- }
|
|
- /* print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
|
|
- buf, MsgLength); */
|
|
+ print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
|
|
+ buf, MsgLength);
|
|
break;
|
|
}
|
|
|
|
@@ -989,7 +888,6 @@ int rndis_register(void (*resp_avail)(void *v), void *v)
|
|
rndis_per_dev_params[i].used = 1;
|
|
rndis_per_dev_params[i].resp_avail = resp_avail;
|
|
rndis_per_dev_params[i].v = v;
|
|
- rndis_per_dev_params[i].max_pkt_per_xfer = 1;
|
|
pr_debug("%s: configNr = %d\n", __func__, i);
|
|
return i;
|
|
}
|
|
@@ -1019,8 +917,6 @@ int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
|
|
rndis_per_dev_params[configNr].dev = dev;
|
|
rndis_per_dev_params[configNr].filter = cdc_filter;
|
|
|
|
- rndis_ul_max_xfer_size_rcvd = 0;
|
|
- rndis_ul_max_pkt_per_xfer_rcvd = 0;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(rndis_set_param_dev);
|
|
@@ -1050,13 +946,6 @@ int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
|
|
}
|
|
EXPORT_SYMBOL_GPL(rndis_set_param_medium);
|
|
|
|
-void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer)
|
|
-{
|
|
- pr_debug("%s:\n", __func__);
|
|
-
|
|
- rndis_per_dev_params[configNr].max_pkt_per_xfer = max_pkt_per_xfer;
|
|
-}
|
|
-
|
|
void rndis_add_hdr(struct sk_buff *skb)
|
|
{
|
|
struct rndis_packet_msg_type *header;
|
|
@@ -1080,9 +969,6 @@ void rndis_free_response(int configNr, u8 *buf)
|
|
list_for_each_safe(act, tmp,
|
|
&(rndis_per_dev_params[configNr].resp_queue))
|
|
{
|
|
- if (!act)
|
|
- continue;
|
|
-
|
|
r = list_entry(act, rndis_resp_t, list);
|
|
if (r && r->buf == buf) {
|
|
list_del(&r->list);
|
|
@@ -1135,73 +1021,23 @@ int rndis_rm_hdr(struct gether *port,
|
|
struct sk_buff *skb,
|
|
struct sk_buff_head *list)
|
|
{
|
|
- int num_pkts = 1;
|
|
-
|
|
- if (skb->len > rndis_ul_max_xfer_size_rcvd)
|
|
- rndis_ul_max_xfer_size_rcvd = skb->len;
|
|
-
|
|
- while (skb->len) {
|
|
- struct rndis_packet_msg_type *hdr;
|
|
- struct sk_buff *skb2;
|
|
- u32 msg_len, data_offset, data_len;
|
|
-
|
|
- /* some rndis hosts send extra byte to avoid zlp, ignore it */
|
|
- if (skb->len == 1) {
|
|
- dev_kfree_skb_any(skb);
|
|
- return 0;
|
|
- }
|
|
-
|
|
- if (skb->len < sizeof *hdr) {
|
|
- pr_err("invalid rndis pkt: skblen:%u hdr_len:%u",
|
|
- (unsigned int)(skb->len), (unsigned int)sizeof(*hdr));
|
|
- dev_kfree_skb_any(skb);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- hdr = (void *)skb->data;
|
|
- msg_len = le32_to_cpu(hdr->MessageLength);
|
|
- data_offset = le32_to_cpu(hdr->DataOffset);
|
|
- data_len = le32_to_cpu(hdr->DataLength);
|
|
-
|
|
- if (skb->len < msg_len ||
|
|
- ((data_offset + data_len + 8) > msg_len)) {
|
|
- pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
|
|
- le32_to_cpu(hdr->MessageType),
|
|
- msg_len, data_offset, data_len, skb->len);
|
|
- dev_kfree_skb_any(skb);
|
|
- return -EOVERFLOW;
|
|
- }
|
|
- if (le32_to_cpu(hdr->MessageType) != RNDIS_MSG_PACKET) {
|
|
- pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
|
|
- le32_to_cpu(hdr->MessageType),
|
|
- msg_len, data_offset, data_len, skb->len);
|
|
- dev_kfree_skb_any(skb);
|
|
- return -EINVAL;
|
|
- }
|
|
+ /* tmp points to a struct rndis_packet_msg_type */
|
|
+ __le32 *tmp = (void *)skb->data;
|
|
|
|
- skb_pull(skb, data_offset + 8);
|
|
-
|
|
- if (msg_len == skb->len) {
|
|
- skb_trim(skb, data_len);
|
|
- break;
|
|
- }
|
|
-
|
|
- skb2 = skb_clone(skb, GFP_ATOMIC);
|
|
- if (!skb2) {
|
|
- pr_err("%s:skb clone failed\n", __func__);
|
|
- dev_kfree_skb_any(skb);
|
|
- return -ENOMEM;
|
|
- }
|
|
-
|
|
- skb_pull(skb, msg_len - sizeof *hdr);
|
|
- skb_trim(skb2, data_len);
|
|
- skb_queue_tail(list, skb2);
|
|
-
|
|
- num_pkts++;
|
|
+ /* MessageType, MessageLength */
|
|
+ if (cpu_to_le32(RNDIS_MSG_PACKET)
|
|
+ != get_unaligned(tmp++)) {
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return -EINVAL;
|
|
}
|
|
+ tmp++;
|
|
|
|
- if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
|
|
- rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;
|
|
+ /* DataOffset, DataLength */
|
|
+ if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
|
|
+ dev_kfree_skb_any(skb);
|
|
+ return -EOVERFLOW;
|
|
+ }
|
|
+ skb_trim(skb, get_unaligned_le32(tmp++));
|
|
|
|
skb_queue_tail(list, skb);
|
|
return 0;
|
|
@@ -1222,9 +1058,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
|
|
"speed : %d\n"
|
|
"cable : %s\n"
|
|
"vendor ID : 0x%08X\n"
|
|
- "vendor : %s\n"
|
|
- "ul-max-xfer-size:%d max-xfer-size-rcvd: %d\n"
|
|
- "ul-max-pkts-per-xfer:%d max-pkts-per-xfer-rcvd:%d\n",
|
|
+ "vendor : %s\n",
|
|
param->confignr, (param->used) ? "y" : "n",
|
|
({ char *s = "?";
|
|
switch (param->state) {
|
|
@@ -1238,13 +1072,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
|
|
param->medium,
|
|
(param->media_state) ? 0 : param->speed*100,
|
|
(param->media_state) ? "disconnected" : "connected",
|
|
- param->vendorID, param->vendorDescr,
|
|
- param->max_pkt_per_xfer *
|
|
- (param->dev->mtu + sizeof(struct ethhdr) +
|
|
- sizeof(struct rndis_packet_msg_type) + 22),
|
|
- rndis_ul_max_xfer_size_rcvd,
|
|
- param->max_pkt_per_xfer,
|
|
- rndis_ul_max_pkt_per_xfer_rcvd);
|
|
+ param->vendorID, param->vendorDescr);
|
|
return 0;
|
|
}
|
|
|
|
@@ -1307,12 +1135,13 @@ static const struct file_operations rndis_proc_fops = {
|
|
.write = rndis_proc_write,
|
|
};
|
|
|
|
-define NAME_TEMPLATE "driver/rndis-%03d"
|
|
+#define NAME_TEMPLATE "driver/rndis-%03d"
|
|
|
|
static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
|
|
|
|
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
|
|
|
|
+
|
|
int rndis_init(void)
|
|
{
|
|
u8 i;
|
|
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
|
|
index 145f01b18190..0f4abb4c3775 100644
|
|
--- a/drivers/usb/gadget/function/rndis.h
|
|
+++ b/drivers/usb/gadget/function/rndis.h
|
|
@@ -190,7 +190,6 @@ typedef struct rndis_params
|
|
struct net_device *dev;
|
|
|
|
u32 vendorID;
|
|
- u8 max_pkt_per_xfer;
|
|
const char *vendorDescr;
|
|
void (*resp_avail)(void *v);
|
|
void *v;
|
|
@@ -206,7 +205,6 @@ int rndis_set_param_dev (u8 configNr, struct net_device *dev,
|
|
int rndis_set_param_vendor (u8 configNr, u32 vendorID,
|
|
const char *vendorDescr);
|
|
int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
|
|
-void rndis_set_max_pkt_xfer(u8 configNr, u8 max_pkt_per_xfer);
|
|
void rndis_add_hdr (struct sk_buff *skb);
|
|
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
|
|
struct sk_buff_head *list);
|
|
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
|
|
index 05cef7e1b718..33011f8ee865 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.c
|
|
+++ b/drivers/usb/gadget/function/u_ether.c
|
|
@@ -23,8 +23,7 @@
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include "u_ether.h"
|
|
-#include "usb_boost.h"
|
|
-#include "rndis.h"
|
|
+
|
|
|
|
/*
|
|
* This component encapsulates the Ethernet link glue needed to provide
|
|
@@ -49,9 +48,6 @@
|
|
|
|
#define UETH__VERSION "29-May-2008"
|
|
|
|
-static struct workqueue_struct *uether_wq;
|
|
-static struct workqueue_struct *uether_wq1;
|
|
-
|
|
struct eth_dev {
|
|
/* lock is held while accessing port_usb
|
|
*/
|
|
@@ -61,32 +57,21 @@ struct eth_dev {
|
|
struct net_device *net;
|
|
struct usb_gadget *gadget;
|
|
|
|
- spinlock_t req_lock; /* guard {tx}_reqs */
|
|
- spinlock_t reqrx_lock; /* guard {rx}_reqs */
|
|
+ spinlock_t req_lock; /* guard {rx,tx}_reqs */
|
|
struct list_head tx_reqs, rx_reqs;
|
|
- unsigned tx_qlen;
|
|
-/* Minimum number of TX USB request queued to UDC */
|
|
-#define TX_REQ_THRESHOLD 5
|
|
- int no_tx_req_used;
|
|
- int tx_skb_hold_count;
|
|
- u32 tx_req_bufsize;
|
|
+ atomic_t tx_qlen;
|
|
|
|
struct sk_buff_head rx_frames;
|
|
|
|
unsigned qmult;
|
|
|
|
unsigned header_len;
|
|
- unsigned ul_max_pkts_per_xfer;
|
|
- unsigned dl_max_pkts_per_xfer;
|
|
- uint32_t dl_max_xfer_size;
|
|
struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
|
|
int (*unwrap)(struct gether *,
|
|
struct sk_buff *skb,
|
|
struct sk_buff_head *list);
|
|
|
|
struct work_struct work;
|
|
- struct work_struct rx_work;
|
|
- struct work_struct rx_work1;
|
|
|
|
unsigned long todo;
|
|
#define WORK_RX_MEMORY 0
|
|
@@ -102,14 +87,6 @@ struct eth_dev {
|
|
|
|
#define DEFAULT_QLEN 2 /* double buffering by default */
|
|
|
|
-static unsigned tx_wakeup_threshold = 13;
|
|
-module_param(tx_wakeup_threshold, uint, S_IRUGO|S_IWUSR);
|
|
-MODULE_PARM_DESC(tx_wakeup_threshold, "tx wakeup threshold value");
|
|
-
|
|
-#define U_ETHER_RX_PENDING_TSHOLD 100
|
|
-static unsigned int u_ether_rx_pending_thld = U_ETHER_RX_PENDING_TSHOLD;
|
|
-module_param(u_ether_rx_pending_thld, uint, S_IRUGO | S_IWUSR);
|
|
-
|
|
/* for dual-speed hardware, use deeper queues at high/super speed */
|
|
static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
|
|
{
|
|
@@ -156,23 +133,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
|
|
xprintk(dev , KERN_INFO , fmt , ## args)
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
-unsigned int rndis_test_last_resp_id;
|
|
-unsigned int rndis_test_last_msg_id;
|
|
-
|
|
-unsigned long rndis_test_reset_msg_cnt;
|
|
-
|
|
-unsigned long rndis_test_rx_usb_in;
|
|
-unsigned long rndis_test_rx_net_out;
|
|
-unsigned long rndis_test_rx_nomem;
|
|
-unsigned long rndis_test_rx_error;
|
|
-
|
|
-unsigned long rndis_test_tx_net_in;
|
|
-unsigned long rndis_test_tx_busy;
|
|
-unsigned long rndis_test_tx_stop;
|
|
-
|
|
-unsigned long rndis_test_tx_usb_out;
|
|
-unsigned long rndis_test_tx_complete;
|
|
-#define U_ETHER_DBG(fmt, args...) pr_debug("U_ETHER,%s, " fmt, __func__, ## args)
|
|
|
|
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
|
|
|
|
@@ -191,7 +151,6 @@ static int ueth_change_mtu(struct net_device *net, int new_mtu)
|
|
else
|
|
net->mtu = new_mtu;
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
- U_ETHER_DBG("mtu to %d, status is %d\n", new_mtu , status);
|
|
|
|
return status;
|
|
}
|
|
@@ -228,7 +187,6 @@ static void defer_kevent(struct eth_dev *dev, int flag)
|
|
}
|
|
|
|
static void rx_complete(struct usb_ep *ep, struct usb_request *req);
|
|
-static void tx_complete(struct usb_ep *ep, struct usb_request *req);
|
|
|
|
static int
|
|
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
|
|
@@ -267,18 +225,12 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
|
|
size += out->maxpacket - 1;
|
|
size -= size % out->maxpacket;
|
|
|
|
- if (dev->ul_max_pkts_per_xfer)
|
|
- size *= dev->ul_max_pkts_per_xfer;
|
|
-
|
|
if (dev->port_usb->is_fixed)
|
|
size = max_t(size_t, size, dev->port_usb->fixed_out_len);
|
|
|
|
- U_ETHER_DBG("size:%d, mtu:%d, hdr_len:%d, maxpacket:%d, ul_max_pkts_per_xfer:%d",
|
|
- (int)size, dev->net->mtu, dev->port_usb->header_len, out->maxpacket, dev->ul_max_pkts_per_xfer);
|
|
skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
|
|
if (skb == NULL) {
|
|
- U_ETHER_DBG("no rx skb\n");
|
|
- rndis_test_rx_nomem++;
|
|
+ DBG(dev, "no rx skb\n");
|
|
goto enomem;
|
|
}
|
|
|
|
@@ -290,6 +242,7 @@ rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
|
|
|
|
req->buf = skb->data;
|
|
req->length = size;
|
|
+ req->complete = rx_complete;
|
|
req->context = skb;
|
|
|
|
retval = usb_ep_queue(out, req, gfp_flags);
|
|
@@ -300,22 +253,23 @@ enomem:
|
|
DBG(dev, "rx submit --> %d\n", retval);
|
|
if (skb)
|
|
dev_kfree_skb_any(skb);
|
|
+ spin_lock_irqsave(&dev->req_lock, flags);
|
|
+ list_add(&req->list, &dev->rx_reqs);
|
|
+ spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
}
|
|
return retval;
|
|
}
|
|
|
|
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
- struct sk_buff *skb = req->context;
|
|
+ struct sk_buff *skb = req->context, *skb2;
|
|
struct eth_dev *dev = ep->driver_data;
|
|
int status = req->status;
|
|
- bool queue = 0;
|
|
|
|
switch (status) {
|
|
|
|
/* normal completion */
|
|
case 0:
|
|
- U_ETHER_DBG("len(%d)\n", req->actual);
|
|
skb_put(skb, req->actual);
|
|
|
|
if (dev->unwrap) {
|
|
@@ -326,10 +280,6 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|
status = dev->unwrap(dev->port_usb,
|
|
skb,
|
|
&dev->rx_frames);
|
|
- if (status == -EINVAL)
|
|
- dev->net->stats.rx_errors++;
|
|
- else if (status == -EOVERFLOW)
|
|
- dev->net->stats.rx_over_errors++;
|
|
} else {
|
|
dev_kfree_skb_any(skb);
|
|
status = -ENOTCONN;
|
|
@@ -338,10 +288,30 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
|
|
} else {
|
|
skb_queue_tail(&dev->rx_frames, skb);
|
|
}
|
|
+ skb = NULL;
|
|
+
|
|
+ skb2 = skb_dequeue(&dev->rx_frames);
|
|
+ while (skb2) {
|
|
+ if (status < 0
|
|
+ || ETH_HLEN > skb2->len
|
|
+ || skb2->len > VLAN_ETH_FRAME_LEN) {
|
|
+ dev->net->stats.rx_errors++;
|
|
+ dev->net->stats.rx_length_errors++;
|
|
+ DBG(dev, "rx length %d\n", skb2->len);
|
|
+ dev_kfree_skb_any(skb2);
|
|
+ goto next_frame;
|
|
+ }
|
|
+ skb2->protocol = eth_type_trans(skb2, dev->net);
|
|
+ dev->net->stats.rx_packets++;
|
|
+ dev->net->stats.rx_bytes += skb2->len;
|
|
|
|
- if (!status)
|
|
- queue = 1;
|
|
- rndis_test_rx_usb_in++;
|
|
+ /* no buffer copies needed, unless hardware can't
|
|
+ * use skb buffers.
|
|
+ */
|
|
+ status = netif_rx(skb2);
|
|
+next_frame:
|
|
+ skb2 = skb_dequeue(&dev->rx_frames);
|
|
+ }
|
|
break;
|
|
|
|
/* software-driven interface shutdown */
|
|
@@ -364,37 +334,28 @@ quiesce:
|
|
/* FALLTHROUGH */
|
|
|
|
default:
|
|
- queue = 1;
|
|
- dev_kfree_skb_any(skb);
|
|
dev->net->stats.rx_errors++;
|
|
DBG(dev, "rx status %d\n", status);
|
|
break;
|
|
}
|
|
|
|
+ if (skb)
|
|
+ dev_kfree_skb_any(skb);
|
|
+ if (!netif_running(dev->net)) {
|
|
clean:
|
|
- if (queue && dev->rx_frames.qlen <= u_ether_rx_pending_thld) {
|
|
- if (rx_submit(dev, req, GFP_ATOMIC) < 0) {
|
|
- spin_lock(&dev->reqrx_lock);
|
|
- list_add(&req->list, &dev->rx_reqs);
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
- }
|
|
- } else {
|
|
- spin_lock(&dev->reqrx_lock);
|
|
+ spin_lock(&dev->req_lock);
|
|
list_add(&req->list, &dev->rx_reqs);
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
- }
|
|
-
|
|
- if (queue) {
|
|
- queue_work(uether_wq, &dev->rx_work);
|
|
- queue_work(uether_wq1, &dev->rx_work1);
|
|
+ spin_unlock(&dev->req_lock);
|
|
+ req = NULL;
|
|
}
|
|
+ if (req)
|
|
+ rx_submit(dev, req, GFP_ATOMIC);
|
|
}
|
|
|
|
static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
|
|
{
|
|
unsigned i;
|
|
struct usb_request *req;
|
|
- bool usb_in;
|
|
|
|
if (!n)
|
|
return -ENOMEM;
|
|
@@ -405,21 +366,10 @@ static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
|
|
if (i-- == 0)
|
|
goto extra;
|
|
}
|
|
-
|
|
- if (ep->desc->bEndpointAddress & USB_DIR_IN)
|
|
- usb_in = true;
|
|
- else
|
|
- usb_in = false;
|
|
-
|
|
while (i--) {
|
|
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
|
|
if (!req)
|
|
return list_empty(list) ? -ENOMEM : 0;
|
|
- /* update completion handler */
|
|
- if (usb_in)
|
|
- req->complete = tx_complete;
|
|
- else
|
|
- req->complete = rx_complete;
|
|
list_add(&req->list, list);
|
|
}
|
|
return 0;
|
|
@@ -447,22 +397,16 @@ static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
|
|
|
|
spin_lock(&dev->req_lock);
|
|
status = prealloc(&dev->tx_reqs, link->in_ep, n);
|
|
- if (status < 0) {
|
|
- spin_unlock(&dev->req_lock);
|
|
- U_ETHER_DBG("can't alloc tx requests\n");
|
|
- return status;
|
|
- }
|
|
- spin_unlock(&dev->req_lock);
|
|
-
|
|
- spin_lock(&dev->reqrx_lock);
|
|
+ if (status < 0)
|
|
+ goto fail;
|
|
status = prealloc(&dev->rx_reqs, link->out_ep, n);
|
|
- if (status < 0) {
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
- U_ETHER_DBG("can't alloc rx requests\n");
|
|
- return status;
|
|
- }
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
-
|
|
+ if (status < 0)
|
|
+ goto fail;
|
|
+ goto done;
|
|
+fail:
|
|
+ DBG(dev, "can't alloc requests\n");
|
|
+done:
|
|
+ spin_unlock(&dev->req_lock);
|
|
return status;
|
|
}
|
|
|
|
@@ -470,69 +414,23 @@ static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
|
|
{
|
|
struct usb_request *req;
|
|
unsigned long flags;
|
|
- int req_cnt = 0;
|
|
|
|
/* fill unused rxq slots with some skb */
|
|
- spin_lock_irqsave(&dev->reqrx_lock, flags);
|
|
+ spin_lock_irqsave(&dev->req_lock, flags);
|
|
while (!list_empty(&dev->rx_reqs)) {
|
|
- /* break the nexus of continuous completion and re-submission*/
|
|
- if (++req_cnt > qlen(dev->gadget, dev->qmult))
|
|
- break;
|
|
-
|
|
req = container_of(dev->rx_reqs.next,
|
|
struct usb_request, list);
|
|
list_del_init(&req->list);
|
|
- spin_unlock_irqrestore(&dev->reqrx_lock, flags);
|
|
+ spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
|
|
if (rx_submit(dev, req, gfp_flags) < 0) {
|
|
- spin_lock_irqsave(&dev->reqrx_lock, flags);
|
|
- list_add(&req->list, &dev->rx_reqs);
|
|
- spin_unlock_irqrestore(&dev->reqrx_lock, flags);
|
|
defer_kevent(dev, WORK_RX_MEMORY);
|
|
return;
|
|
}
|
|
|
|
- spin_lock_irqsave(&dev->reqrx_lock, flags);
|
|
- }
|
|
- spin_unlock_irqrestore(&dev->reqrx_lock, flags);
|
|
-}
|
|
-
|
|
-static void process_rx_w(struct work_struct *work)
|
|
-{
|
|
- struct eth_dev *dev = container_of(work, struct eth_dev, rx_work);
|
|
- struct sk_buff *skb;
|
|
- int status = 0;
|
|
-
|
|
- if (!dev->port_usb)
|
|
- return;
|
|
-
|
|
- while ((skb = skb_dequeue(&dev->rx_frames))) {
|
|
- if (status < 0
|
|
- || ETH_HLEN > skb->len
|
|
- || skb->len > ETH_FRAME_LEN) {
|
|
- dev->net->stats.rx_errors++;
|
|
- dev->net->stats.rx_length_errors++;
|
|
- rndis_test_rx_error++;
|
|
- DBG(dev, "rx length %d\n", skb->len);
|
|
- dev_kfree_skb_any(skb);
|
|
- continue;
|
|
- }
|
|
- skb->protocol = eth_type_trans(skb, dev->net);
|
|
- dev->net->stats.rx_packets++;
|
|
- dev->net->stats.rx_bytes += skb->len;
|
|
-#if defined(NETDEV_TRACE) && defined(NETDEV_UL_TRACE)
|
|
- skb->dbg_flag = 0x4;
|
|
-#endif
|
|
-
|
|
- rndis_test_rx_net_out++;
|
|
- status = netif_rx_ni(skb);
|
|
+ spin_lock_irqsave(&dev->req_lock, flags);
|
|
}
|
|
-
|
|
-/* move to another workthread */
|
|
-#if 0
|
|
- if (netif_running(dev->net))
|
|
- rx_fill(dev, GFP_KERNEL);
|
|
-#endif
|
|
+ spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
}
|
|
|
|
static void eth_work(struct work_struct *work)
|
|
@@ -548,39 +446,10 @@ static void eth_work(struct work_struct *work)
|
|
DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
|
|
}
|
|
|
|
-static void process_rx_w1(struct work_struct *work)
|
|
-{
|
|
- struct eth_dev *dev = container_of(work, struct eth_dev, rx_work1);
|
|
-
|
|
- if (!dev->port_usb)
|
|
- return;
|
|
-
|
|
- if (netif_running(dev->net))
|
|
- rx_fill(dev, GFP_KERNEL);
|
|
-}
|
|
-
|
|
static void tx_complete(struct usb_ep *ep, struct usb_request *req)
|
|
{
|
|
- struct sk_buff *skb;
|
|
- struct eth_dev *dev;
|
|
- struct net_device *net;
|
|
- struct usb_request *new_req;
|
|
- struct usb_ep *in;
|
|
- int length;
|
|
- int retval;
|
|
-
|
|
- if (!ep->driver_data) {
|
|
- usb_ep_free_request(ep, req);
|
|
- return;
|
|
- }
|
|
-
|
|
- dev = ep->driver_data;
|
|
- net = dev->net;
|
|
-
|
|
- if (!dev->port_usb) {
|
|
- usb_ep_free_request(ep, req);
|
|
- return;
|
|
- }
|
|
+ struct sk_buff *skb = req->context;
|
|
+ struct eth_dev *dev = ep->driver_data;
|
|
|
|
switch (req->status) {
|
|
default:
|
|
@@ -591,104 +460,18 @@ static void tx_complete(struct usb_ep *ep, struct usb_request *req)
|
|
case -ESHUTDOWN: /* disconnect etc */
|
|
break;
|
|
case 0:
|
|
- if (!req->zero)
|
|
- dev->net->stats.tx_bytes += req->length-1;
|
|
- else
|
|
- dev->net->stats.tx_bytes += req->length;
|
|
+ dev->net->stats.tx_bytes += skb->len;
|
|
}
|
|
dev->net->stats.tx_packets++;
|
|
- rndis_test_tx_complete++;
|
|
|
|
spin_lock(&dev->req_lock);
|
|
- list_add_tail(&req->list, &dev->tx_reqs);
|
|
-
|
|
- if (dev->port_usb->multi_pkt_xfer && !req->context) {
|
|
- dev->no_tx_req_used--;
|
|
- req->length = 0;
|
|
- in = dev->port_usb->in_ep;
|
|
-
|
|
- if (!list_empty(&dev->tx_reqs)) {
|
|
- new_req = container_of(dev->tx_reqs.next,
|
|
- struct usb_request, list);
|
|
- list_del(&new_req->list);
|
|
- spin_unlock(&dev->req_lock);
|
|
- if (new_req->length > 0) {
|
|
- length = new_req->length;
|
|
-
|
|
- /* NCM requires no zlp if transfer is
|
|
- * dwNtbInMaxSize */
|
|
- if (dev->port_usb->is_fixed &&
|
|
- length == dev->port_usb->fixed_in_len &&
|
|
- (length % in->maxpacket) == 0)
|
|
- new_req->zero = 0;
|
|
- else
|
|
- new_req->zero = 1;
|
|
-
|
|
- /* use zlp framing on tx for strict CDC-Ether
|
|
- * conformance, though any robust network rx
|
|
- * path ignores extra padding. and some hardware
|
|
- * doesn't like to write zlps.
|
|
- */
|
|
- if (new_req->zero && !dev->zlp &&
|
|
- (length % in->maxpacket) == 0) {
|
|
- new_req->zero = 0;
|
|
- length++;
|
|
- }
|
|
-
|
|
- new_req->length = length;
|
|
- retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
|
|
- switch (retval) {
|
|
- default:
|
|
- DBG(dev, "tx queue err %d\n", retval);
|
|
- new_req->length = 0;
|
|
- spin_lock(&dev->req_lock);
|
|
- list_add_tail(&new_req->list, &dev->tx_reqs);
|
|
- spin_unlock(&dev->req_lock);
|
|
- break;
|
|
- case 0:
|
|
- spin_lock(&dev->req_lock);
|
|
- dev->no_tx_req_used++;
|
|
- spin_unlock(&dev->req_lock);
|
|
- net->trans_start = jiffies;
|
|
- }
|
|
- } else {
|
|
- spin_lock(&dev->req_lock);
|
|
- /*
|
|
- * Put the idle request at the back of the
|
|
- * queue. The xmit function will put the
|
|
- * unfinished request at the beginning of the
|
|
- * queue.
|
|
- */
|
|
- list_add_tail(&new_req->list, &dev->tx_reqs);
|
|
- spin_unlock(&dev->req_lock);
|
|
- }
|
|
- } else {
|
|
- spin_unlock(&dev->req_lock);
|
|
- }
|
|
- } else {
|
|
- skb = req->context;
|
|
- /* Is aggregation already enabled and buffers allocated ? */
|
|
- if (dev->port_usb->multi_pkt_xfer && dev->tx_req_bufsize) {
|
|
-#if defined(CONFIG_64BIT) && defined(CONFIG_MTK_LM_MODE)
|
|
- req->buf = kzalloc(dev->tx_req_bufsize, GFP_ATOMIC | GFP_DMA);
|
|
-#else
|
|
- req->buf = kzalloc(dev->tx_req_bufsize, GFP_ATOMIC);
|
|
-#endif
|
|
- req->context = NULL;
|
|
- } else {
|
|
- req->buf = NULL;
|
|
- }
|
|
-
|
|
- spin_unlock(&dev->req_lock);
|
|
- dev_kfree_skb_any(skb);
|
|
- }
|
|
+ list_add(&req->list, &dev->tx_reqs);
|
|
+ spin_unlock(&dev->req_lock);
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
- if (netif_carrier_ok(dev->net)) {
|
|
- spin_lock(&dev->req_lock);
|
|
- if (dev->no_tx_req_used < tx_wakeup_threshold)
|
|
- netif_wake_queue(dev->net);
|
|
- spin_unlock(&dev->req_lock);
|
|
- }
|
|
+ atomic_dec(&dev->tx_qlen);
|
|
+ if (netif_carrier_ok(dev->net))
|
|
+ netif_wake_queue(dev->net);
|
|
}
|
|
|
|
static inline int is_promisc(u16 cdc_filter)
|
|
@@ -696,47 +479,6 @@ static inline int is_promisc(u16 cdc_filter)
|
|
return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
|
|
}
|
|
|
|
-static int alloc_tx_buffer(struct eth_dev *dev)
|
|
-{
|
|
- struct list_head *act;
|
|
- struct usb_request *req;
|
|
-
|
|
- dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
|
|
- (dev->net->mtu
|
|
- + sizeof(struct ethhdr)
|
|
- /* size of rndis_packet_msg_type */
|
|
- + 44
|
|
- + 22));
|
|
-
|
|
- list_for_each(act, &dev->tx_reqs) {
|
|
- req = container_of(act, struct usb_request, list);
|
|
- if (!req->buf) {
|
|
-#if defined(CONFIG_64BIT) && defined(CONFIG_MTK_LM_MODE)
|
|
- req->buf = kzalloc(dev->tx_req_bufsize,
|
|
- GFP_ATOMIC | GFP_DMA);
|
|
-#else
|
|
- req->buf = kzalloc(dev->tx_req_bufsize,
|
|
- GFP_ATOMIC);
|
|
-#endif
|
|
- if (!req->buf)
|
|
- goto free_buf;
|
|
- }
|
|
- /* req->context is not used for multi_pkt_xfers */
|
|
- req->context = NULL;
|
|
- }
|
|
- return 0;
|
|
-
|
|
-free_buf:
|
|
- /* tx_req_bufsize = 0 retries mem alloc on next eth_start_xmit */
|
|
- dev->tx_req_bufsize = 0;
|
|
- list_for_each(act, &dev->tx_reqs) {
|
|
- req = container_of(act, struct usb_request, list);
|
|
- kfree(req->buf);
|
|
- req->buf = NULL;
|
|
- }
|
|
- return -ENOMEM;
|
|
-}
|
|
-
|
|
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
struct net_device *net)
|
|
{
|
|
@@ -745,20 +487,16 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
int retval;
|
|
struct usb_request *req = NULL;
|
|
unsigned long flags;
|
|
- struct usb_ep *in = NULL;
|
|
- u16 cdc_filter = 0;
|
|
- bool multi_pkt_xfer = false;
|
|
- uint32_t max_size = 0;
|
|
- static unsigned int okCnt, busyCnt;
|
|
- static DEFINE_RATELIMIT_STATE(ratelimit1, 1 * HZ, 2);
|
|
- static DEFINE_RATELIMIT_STATE(ratelimit2, 1 * HZ, 2);
|
|
+ struct usb_ep *in;
|
|
+ u16 cdc_filter;
|
|
|
|
spin_lock_irqsave(&dev->lock, flags);
|
|
if (dev->port_usb) {
|
|
in = dev->port_usb->in_ep;
|
|
cdc_filter = dev->port_usb->cdc_filter;
|
|
- multi_pkt_xfer = dev->port_usb->multi_pkt_xfer;
|
|
- max_size = dev->dl_max_xfer_size;
|
|
+ } else {
|
|
+ in = NULL;
|
|
+ cdc_filter = 0;
|
|
}
|
|
spin_unlock_irqrestore(&dev->lock, flags);
|
|
|
|
@@ -767,7 +505,6 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
-
|
|
/* apply outgoing CDC or RNDIS filters */
|
|
if (skb && !is_promisc(cdc_filter)) {
|
|
u8 *dest = skb->data;
|
|
@@ -784,122 +521,57 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
|
|
if (!(cdc_filter & type)) {
|
|
dev_kfree_skb_any(skb);
|
|
- U_ETHER_DBG("cdc_filter error, cdc_filter is 0x%x , type is 0x%x\n",
|
|
- cdc_filter , type);
|
|
return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
|
|
}
|
|
- /*
|
|
- * No buffer copies needed, unless the network stack did it
|
|
- * or the hardware can't use skb buffers or there's not enough
|
|
- * enough space for extra headers we need.
|
|
- */
|
|
- spin_lock_irqsave(&dev->lock, flags);
|
|
- if (dev->wrap && dev->port_usb)
|
|
- skb = dev->wrap(dev->port_usb, skb);
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
-
|
|
- if (!skb) {
|
|
- if (!dev->port_usb->supports_multi_frame)
|
|
- dev->net->stats.tx_dropped++;
|
|
- /* no error code for dropped packets */
|
|
- return NETDEV_TX_OK;
|
|
- }
|
|
|
|
spin_lock_irqsave(&dev->req_lock, flags);
|
|
- /* Allocate memory for tx_reqs to support multi packet transfer */
|
|
- if (multi_pkt_xfer && !dev->tx_req_bufsize) {
|
|
- retval = alloc_tx_buffer(dev);
|
|
- if (retval < 0) {
|
|
- spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
- return -ENOMEM;
|
|
- }
|
|
- }
|
|
- if (__ratelimit(&ratelimit1)) {
|
|
- usb_boost();
|
|
- U_ETHER_DBG("spd %d,ms %d,rin %lu,rout %lu,rxmem %lu,rxerr %lu\n"
|
|
- "tin %lu,tout %lu,tb %lu,ts %lu,tx_com %lu,lmsg: 0x%x,lrsp:0x%x,rst:%lu\n",
|
|
- dev->gadget->speed, max_size, rndis_test_rx_usb_in, rndis_test_rx_net_out,
|
|
- rndis_test_rx_nomem, rndis_test_rx_error, rndis_test_tx_net_in,
|
|
- rndis_test_tx_usb_out, rndis_test_tx_busy, rndis_test_tx_stop,
|
|
- rndis_test_tx_complete, rndis_test_last_msg_id, rndis_test_last_resp_id,
|
|
- rndis_test_reset_msg_cnt);
|
|
- }
|
|
- rndis_test_tx_net_in++;
|
|
/*
|
|
* this freelist can be empty if an interrupt triggered disconnect()
|
|
* and reconfigured the gadget (shutting down this queue) after the
|
|
* network stack decided to xmit but before we got the spinlock.
|
|
*/
|
|
if (list_empty(&dev->tx_reqs)) {
|
|
- busyCnt++;
|
|
- if (__ratelimit(&ratelimit2))
|
|
- U_ETHER_DBG("okCnt : %u, busyCnt : %u\n",
|
|
- okCnt, busyCnt);
|
|
spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
- rndis_test_tx_busy++;
|
|
return NETDEV_TX_BUSY;
|
|
}
|
|
- okCnt++;
|
|
|
|
req = container_of(dev->tx_reqs.next, struct usb_request, list);
|
|
list_del(&req->list);
|
|
|
|
/* temporarily stop TX queue when the freelist empties */
|
|
- if (list_empty(&dev->tx_reqs)) {
|
|
- rndis_test_tx_stop++;
|
|
+ if (list_empty(&dev->tx_reqs))
|
|
netif_stop_queue(net);
|
|
- }
|
|
spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
|
|
- if (dev->port_usb == NULL) {
|
|
- dev_kfree_skb_any(skb);
|
|
- U_ETHER_DBG("port_usb NULL\n");
|
|
- return NETDEV_TX_OK;
|
|
- }
|
|
-
|
|
- if (multi_pkt_xfer) {
|
|
- pr_debug("req->length:%d header_len:%u\n"
|
|
- "skb->len:%d skb->data_len:%d\n",
|
|
- req->length, dev->header_len,
|
|
- skb->len, skb->data_len);
|
|
- /* Add RNDIS Header */
|
|
- memcpy(req->buf + req->length, dev->port_usb->header,
|
|
- dev->header_len);
|
|
- /* Increment req length by header size */
|
|
- req->length += dev->header_len;
|
|
- /* Copy received IP data from SKB */
|
|
- memcpy(req->buf + req->length, skb->data, skb->len);
|
|
- /* Increment req length by skb data length */
|
|
- req->length += skb->len;
|
|
- length = req->length;
|
|
- dev_kfree_skb_any(skb);
|
|
-
|
|
- spin_lock_irqsave(&dev->req_lock, flags);
|
|
- dev->tx_skb_hold_count++;
|
|
- /* if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) { */
|
|
- if ((dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer)
|
|
- && (length < (max_size - dev->net->mtu))) {
|
|
-
|
|
- if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
|
|
- list_add(&req->list, &dev->tx_reqs);
|
|
- spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
- goto success;
|
|
- }
|
|
+ /* no buffer copies needed, unless the network stack did it
|
|
+ * or the hardware can't use skb buffers.
|
|
+ * or there's not enough space for extra headers we need
|
|
+ */
|
|
+ if (dev->wrap) {
|
|
+ unsigned long flags;
|
|
+
|
|
+ spin_lock_irqsave(&dev->lock, flags);
|
|
+ if (dev->port_usb)
|
|
+ skb = dev->wrap(dev->port_usb, skb);
|
|
+ spin_unlock_irqrestore(&dev->lock, flags);
|
|
+ if (!skb) {
|
|
+ /* Multi frame CDC protocols may store the frame for
|
|
+ * later which is not a dropped frame.
|
|
+ */
|
|
+ if (dev->port_usb->supports_multi_frame)
|
|
+ goto multiframe;
|
|
+ goto drop;
|
|
}
|
|
-
|
|
- dev->no_tx_req_used++;
|
|
- dev->tx_skb_hold_count = 0;
|
|
- spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
-
|
|
- } else {
|
|
- length = skb->len;
|
|
- req->buf = skb->data;
|
|
- req->context = skb;
|
|
}
|
|
|
|
+ length = skb->len;
|
|
+ req->buf = skb->data;
|
|
+ req->context = skb;
|
|
+ req->complete = tx_complete;
|
|
+
|
|
/* NCM requires no zlp if transfer is dwNtbInMaxSize */
|
|
if (dev->port_usb->is_fixed &&
|
|
length == dev->port_usb->fixed_in_len &&
|
|
@@ -912,36 +584,32 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
|
|
* though any robust network rx path ignores extra padding.
|
|
* and some hardware doesn't like to write zlps.
|
|
*/
|
|
- if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
|
|
- req->zero = 0;
|
|
+ if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
|
|
length++;
|
|
- }
|
|
|
|
req->length = length;
|
|
|
|
retval = usb_ep_queue(in, req, GFP_ATOMIC);
|
|
switch (retval) {
|
|
default:
|
|
- U_ETHER_DBG("tx queue err %d\n", retval);
|
|
+ DBG(dev, "tx queue err %d\n", retval);
|
|
break;
|
|
case 0:
|
|
- rndis_test_tx_usb_out++;
|
|
net->trans_start = jiffies;
|
|
+ atomic_inc(&dev->tx_qlen);
|
|
}
|
|
|
|
if (retval) {
|
|
- if (!multi_pkt_xfer)
|
|
- dev_kfree_skb_any(skb);
|
|
- else
|
|
- req->length = 0;
|
|
+ dev_kfree_skb_any(skb);
|
|
+drop:
|
|
dev->net->stats.tx_dropped++;
|
|
+multiframe:
|
|
spin_lock_irqsave(&dev->req_lock, flags);
|
|
if (list_empty(&dev->tx_reqs))
|
|
netif_start_queue(net);
|
|
- list_add_tail(&req->list, &dev->tx_reqs);
|
|
+ list_add(&req->list, &dev->tx_reqs);
|
|
spin_unlock_irqrestore(&dev->req_lock, flags);
|
|
}
|
|
-success:
|
|
return NETDEV_TX_OK;
|
|
}
|
|
|
|
@@ -949,13 +617,13 @@ success:
|
|
|
|
static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
|
|
{
|
|
- U_ETHER_DBG("\n");
|
|
+ DBG(dev, "%s\n", __func__);
|
|
|
|
/* fill the rx queue */
|
|
rx_fill(dev, gfp_flags);
|
|
|
|
/* and open the tx floodgates */
|
|
- dev->tx_qlen = 0;
|
|
+ atomic_set(&dev->tx_qlen, 0);
|
|
netif_wake_queue(dev->net);
|
|
}
|
|
|
|
@@ -964,7 +632,7 @@ static int eth_open(struct net_device *net)
|
|
struct eth_dev *dev = netdev_priv(net);
|
|
struct gether *link;
|
|
|
|
- U_ETHER_DBG("\n");
|
|
+ DBG(dev, "%s\n", __func__);
|
|
if (netif_carrier_ok(dev->net))
|
|
eth_start(dev, GFP_KERNEL);
|
|
|
|
@@ -982,7 +650,7 @@ static int eth_stop(struct net_device *net)
|
|
struct eth_dev *dev = netdev_priv(net);
|
|
unsigned long flags;
|
|
|
|
- U_ETHER_DBG("\n");
|
|
+ VDBG(dev, "%s\n", __func__);
|
|
netif_stop_queue(net);
|
|
|
|
DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
|
|
@@ -1028,7 +696,6 @@ static int eth_stop(struct net_device *net)
|
|
|
|
/*-------------------------------------------------------------------------*/
|
|
|
|
-
|
|
static int get_ether_addr(const char *str, u8 *dev_addr)
|
|
{
|
|
if (str) {
|
|
@@ -1060,20 +727,6 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
|
|
dev_addr[3], dev_addr[4], dev_addr[5]);
|
|
return 18;
|
|
}
|
|
-/* defined but not used due to MAC customization */
|
|
-#if 0
|
|
-static u8 host_ethaddr[ETH_ALEN];
|
|
-static int get_host_ether_addr(u8 *str, u8 *dev_addr)
|
|
-{
|
|
- memcpy(dev_addr, str, ETH_ALEN);
|
|
- if (is_valid_ether_addr(dev_addr))
|
|
- return 0;
|
|
-
|
|
- random_ether_addr(dev_addr);
|
|
- memcpy(str, dev_addr, ETH_ALEN);
|
|
- return 1;
|
|
-}
|
|
-#endif
|
|
|
|
static const struct net_device_ops eth_netdev_ops = {
|
|
.ndo_open = eth_open,
|
|
@@ -1117,10 +770,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
|
|
dev = netdev_priv(net);
|
|
spin_lock_init(&dev->lock);
|
|
spin_lock_init(&dev->req_lock);
|
|
- spin_lock_init(&dev->reqrx_lock);
|
|
INIT_WORK(&dev->work, eth_work);
|
|
- INIT_WORK(&dev->rx_work, process_rx_w);
|
|
- INIT_WORK(&dev->rx_work1, process_rx_w1);
|
|
INIT_LIST_HEAD(&dev->tx_reqs);
|
|
INIT_LIST_HEAD(&dev->rx_reqs);
|
|
|
|
@@ -1134,7 +784,6 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
|
|
if (get_ether_addr(dev_addr, net->dev_addr))
|
|
dev_warn(&g->dev,
|
|
"using random %s ethernet address\n", "self");
|
|
-
|
|
if (get_ether_addr(host_addr, dev->host_mac))
|
|
dev_warn(&g->dev,
|
|
"using random %s ethernet address\n", "host");
|
|
@@ -1184,8 +833,6 @@ struct net_device *gether_setup_name_default(const char *netname)
|
|
spin_lock_init(&dev->lock);
|
|
spin_lock_init(&dev->req_lock);
|
|
INIT_WORK(&dev->work, eth_work);
|
|
- INIT_WORK(&dev->rx_work, process_rx_w);
|
|
- INIT_WORK(&dev->rx_work1, process_rx_w1);
|
|
INIT_LIST_HEAD(&dev->tx_reqs);
|
|
INIT_LIST_HEAD(&dev->rx_reqs);
|
|
|
|
@@ -1352,16 +999,6 @@ int gether_get_ifname(struct net_device *net, char *name, int len)
|
|
}
|
|
EXPORT_SYMBOL_GPL(gether_get_ifname);
|
|
|
|
-void gether_update_dl_max_xfer_size(struct gether *link, uint32_t s)
|
|
-{
|
|
- struct eth_dev *dev = link->ioport;
|
|
- unsigned long flags;
|
|
-
|
|
- spin_lock_irqsave(&dev->lock, flags);
|
|
- dev->dl_max_xfer_size = s;
|
|
- spin_unlock_irqrestore(&dev->lock, flags);
|
|
-}
|
|
-
|
|
/**
|
|
* gether_cleanup - remove Ethernet-over-USB device
|
|
* Context: may sleep
|
|
@@ -1402,13 +1039,6 @@ struct net_device *gether_connect(struct gether *link)
|
|
|
|
if (!dev)
|
|
return ERR_PTR(-EINVAL);
|
|
- link->header = kzalloc(sizeof(struct rndis_packet_msg_type),
|
|
- GFP_ATOMIC);
|
|
- if (!link->header) {
|
|
- result = -ENOMEM;
|
|
- goto fail;
|
|
- }
|
|
- U_ETHER_DBG("\n");
|
|
|
|
link->in_ep->driver_data = dev;
|
|
result = usb_ep_enable(link->in_ep);
|
|
@@ -1437,14 +1067,8 @@ struct net_device *gether_connect(struct gether *link)
|
|
dev->header_len = link->header_len;
|
|
dev->unwrap = link->unwrap;
|
|
dev->wrap = link->wrap;
|
|
- dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
|
|
- dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
|
|
- dev->dl_max_xfer_size = link->dl_max_transfer_len;
|
|
|
|
spin_lock(&dev->lock);
|
|
- dev->tx_skb_hold_count = 0;
|
|
- dev->no_tx_req_used = 0;
|
|
- dev->tx_req_bufsize = 0;
|
|
dev->port_usb = link;
|
|
if (netif_running(dev->net)) {
|
|
if (link->open)
|
|
@@ -1465,13 +1089,10 @@ struct net_device *gether_connect(struct gether *link)
|
|
fail1:
|
|
(void) usb_ep_disable(link->in_ep);
|
|
}
|
|
- /* caller is responsible for cleanup on error */
|
|
- if (result < 0) {
|
|
fail0:
|
|
- kfree(link->header);
|
|
-fail:
|
|
+ /* caller is responsible for cleanup on error */
|
|
+ if (result < 0)
|
|
return ERR_PTR(result);
|
|
- }
|
|
return dev->net;
|
|
}
|
|
EXPORT_SYMBOL_GPL(gether_connect);
|
|
@@ -1492,25 +1113,12 @@ void gether_disconnect(struct gether *link)
|
|
{
|
|
struct eth_dev *dev = link->ioport;
|
|
struct usb_request *req;
|
|
- struct sk_buff *skb;
|
|
|
|
WARN_ON(!dev);
|
|
if (!dev)
|
|
return;
|
|
|
|
- U_ETHER_DBG("\n");
|
|
-
|
|
- rndis_test_rx_usb_in = 0;
|
|
- rndis_test_rx_net_out = 0;
|
|
- rndis_test_rx_nomem = 0;
|
|
- rndis_test_rx_error = 0;
|
|
-
|
|
- rndis_test_tx_net_in = 0;
|
|
- rndis_test_tx_busy = 0;
|
|
- rndis_test_tx_stop = 0;
|
|
-
|
|
- rndis_test_tx_usb_out = 0;
|
|
- rndis_test_tx_complete = 0;
|
|
+ DBG(dev, "%s\n", __func__);
|
|
|
|
netif_stop_queue(dev->net);
|
|
netif_carrier_off(dev->net);
|
|
@@ -1527,37 +1135,25 @@ void gether_disconnect(struct gether *link)
|
|
list_del(&req->list);
|
|
|
|
spin_unlock(&dev->req_lock);
|
|
- if (link->multi_pkt_xfer) {
|
|
- kfree(req->buf);
|
|
- req->buf = NULL;
|
|
- }
|
|
usb_ep_free_request(link->in_ep, req);
|
|
spin_lock(&dev->req_lock);
|
|
}
|
|
- kfree(link->header);
|
|
- link->header = NULL;
|
|
spin_unlock(&dev->req_lock);
|
|
link->in_ep->driver_data = NULL;
|
|
link->in_ep->desc = NULL;
|
|
|
|
usb_ep_disable(link->out_ep);
|
|
- spin_lock(&dev->reqrx_lock);
|
|
+ spin_lock(&dev->req_lock);
|
|
while (!list_empty(&dev->rx_reqs)) {
|
|
req = container_of(dev->rx_reqs.next,
|
|
struct usb_request, list);
|
|
list_del(&req->list);
|
|
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
+ spin_unlock(&dev->req_lock);
|
|
usb_ep_free_request(link->out_ep, req);
|
|
- spin_lock(&dev->reqrx_lock);
|
|
+ spin_lock(&dev->req_lock);
|
|
}
|
|
- spin_unlock(&dev->reqrx_lock);
|
|
-
|
|
- spin_lock(&dev->rx_frames.lock);
|
|
- while ((skb = __skb_dequeue(&dev->rx_frames)))
|
|
- dev_kfree_skb_any(skb);
|
|
- spin_unlock(&dev->rx_frames.lock);
|
|
-
|
|
+ spin_unlock(&dev->req_lock);
|
|
link->out_ep->driver_data = NULL;
|
|
link->out_ep->desc = NULL;
|
|
|
|
@@ -1572,30 +1168,5 @@ void gether_disconnect(struct gether *link)
|
|
}
|
|
EXPORT_SYMBOL_GPL(gether_disconnect);
|
|
|
|
-static int __init gether_init(void)
|
|
-{
|
|
- uether_wq = create_singlethread_workqueue("uether");
|
|
- if (!uether_wq) {
|
|
- pr_err("%s: Unable to create workqueue: uether\n", __func__);
|
|
- return -ENOMEM;
|
|
- }
|
|
- uether_wq1 = create_singlethread_workqueue("uether_rx1");
|
|
- if (!uether_wq1) {
|
|
- destroy_workqueue(uether_wq);
|
|
- pr_err("%s: Unable to create workqueue: uether\n", __func__);
|
|
- return -ENOMEM;
|
|
- }
|
|
- return 0;
|
|
-}
|
|
-module_init(gether_init);
|
|
-
|
|
-static void __exit gether_exit(void)
|
|
-{
|
|
- destroy_workqueue(uether_wq);
|
|
- destroy_workqueue(uether_wq1);
|
|
-
|
|
-}
|
|
-module_exit(gether_exit);
|
|
+MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("David Brownell");
|
|
-MODULE_DESCRIPTION("ethernet over USB driver");
|
|
-MODULE_LICENSE("GPL v2");
|
|
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
|
|
index 22d82976ed8a..334b38947916 100644
|
|
--- a/drivers/usb/gadget/function/u_ether.h
|
|
+++ b/drivers/usb/gadget/function/u_ether.h
|
|
@@ -22,7 +22,7 @@
|
|
|
|
#include "gadget_chips.h"
|
|
|
|
-#define QMULT_DEFAULT 10
|
|
+#define QMULT_DEFAULT 5
|
|
|
|
/*
|
|
* dev_addr: initial value
|
|
@@ -35,11 +35,11 @@
|
|
MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");\
|
|
\
|
|
static char *dev_addr; \
|
|
- module_param(dev_addr, charp, S_IRUGO|S_IWUSR); \
|
|
+ module_param(dev_addr, charp, S_IRUGO); \
|
|
MODULE_PARM_DESC(dev_addr, "Device Ethernet Address"); \
|
|
\
|
|
static char *host_addr; \
|
|
- module_param(host_addr, charp, S_IRUGO|S_IWUSR); \
|
|
+ module_param(host_addr, charp, S_IRUGO); \
|
|
MODULE_PARM_DESC(host_addr, "Host Ethernet Address")
|
|
|
|
struct eth_dev;
|
|
@@ -75,10 +75,6 @@ struct gether {
|
|
bool is_fixed;
|
|
u32 fixed_out_len;
|
|
u32 fixed_in_len;
|
|
- unsigned ul_max_pkts_per_xfer;
|
|
- unsigned dl_max_pkts_per_xfer;
|
|
- unsigned dl_max_transfer_len;
|
|
- bool multi_pkt_xfer;
|
|
bool supports_multi_frame;
|
|
struct sk_buff *(*wrap)(struct gether *port,
|
|
struct sk_buff *skb);
|
|
@@ -89,7 +85,6 @@ struct gether {
|
|
/* called on network open/close */
|
|
void (*open)(struct gether *);
|
|
void (*close)(struct gether *);
|
|
- struct rndis_packet_msg_type *header;
|
|
};
|
|
|
|
#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
|
|
@@ -260,7 +255,6 @@ void gether_cleanup(struct eth_dev *dev);
|
|
/* connect/disconnect is handled by individual functions */
|
|
struct net_device *gether_connect(struct gether *);
|
|
void gether_disconnect(struct gether *);
|
|
-void gether_update_dl_max_xfer_size(struct gether *link, uint32_t s);
|
|
|
|
/* Some controllers can't support CDC Ethernet (ECM) ... */
|
|
static inline bool can_support_ecm(struct usb_gadget *gadget)
|
|
@@ -275,21 +269,4 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
|
|
return true;
|
|
}
|
|
|
|
-extern unsigned int rndis_test_last_resp_id;
|
|
-extern unsigned int rndis_test_last_msg_id;
|
|
-
|
|
-extern unsigned long rndis_test_reset_msg_cnt;
|
|
-
|
|
-extern unsigned long rndis_test_rx_usb_in;
|
|
-extern unsigned long rndis_test_rx_net_out;
|
|
-extern unsigned long rndis_test_rx_nomem;
|
|
-extern unsigned long rndis_test_rx_error;
|
|
-
|
|
-extern unsigned long rndis_test_tx_net_in;
|
|
-extern unsigned long rndis_test_tx_busy;
|
|
-extern unsigned long rndis_test_tx_stop;
|
|
-
|
|
-extern unsigned long rndis_test_tx_usb_out;
|
|
-extern unsigned long rndis_test_tx_complete;
|
|
-
|
|
#endif /* __U_ETHER_H */
|
|
|