From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Sat, 09 Dec 2023 07:24:11 +0000
Subject: [PATCH] add stmac read mac form eeprom
---
kernel/drivers/net/wireless/mediatek/mt76/usb.c | 955 +++++++++++++++++++++++++++++++++++++++--------------------
1 files changed, 629 insertions(+), 326 deletions(-)
diff --git a/kernel/drivers/net/wireless/mediatek/mt76/usb.c b/kernel/drivers/net/wireless/mediatek/mt76/usb.c
index dcf927d..f1ae9ff 100644
--- a/kernel/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/kernel/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include <linux/module.h>
#include "mt76.h"
#include "usb_trace.h"
#include "dma.h"
@@ -21,26 +11,31 @@
#define MT_VEND_REQ_MAX_RETRY 10
#define MT_VEND_REQ_TOUT_MS 300
-/* should be called with usb_ctrl_mtx locked */
+static bool disable_usb_sg;
+module_param_named(disable_usb_sg, disable_usb_sg, bool, 0644);
+MODULE_PARM_DESC(disable_usb_sg, "Disable usb scatter-gather support");
+
static int __mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
int i, ret;
+
+ lockdep_assert_held(&dev->usb.usb_ctrl_mtx);
pipe = (req_type & USB_DIR_IN) ? usb_rcvctrlpipe(udev, 0)
: usb_sndctrlpipe(udev, 0);
for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
return -EIO;
ret = usb_control_msg(udev, pipe, req, req_type, val,
offset, buf, len, MT_VEND_REQ_TOUT_MS);
if (ret == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
if (ret >= 0 || ret == -ENODEV)
return ret;
usleep_range(5000, 10000);
@@ -67,13 +62,25 @@
}
EXPORT_SYMBOL_GPL(mt76u_vendor_request);
-/* should be called with usb_ctrl_mtx locked */
-static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 ___mt76u_rr(struct mt76_dev *dev, u8 req, u32 addr)
{
struct mt76_usb *usb = &dev->usb;
u32 data = ~0;
- u16 offset;
int ret;
+
+ ret = __mt76u_vendor_request(dev, req,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ if (ret == sizeof(__le32))
+ data = get_unaligned_le32(usb->data);
+ trace_usb_reg_rr(dev, addr, data);
+
+ return data;
+}
+
+static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -87,19 +94,11 @@
req = MT_VEND_MULTI_READ;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
- ret = __mt76u_vendor_request(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR,
- 0, offset, usb->data, sizeof(__le32));
- if (ret == sizeof(__le32))
- data = get_unaligned_le32(usb->data);
- trace_usb_reg_rr(dev, addr, data);
-
- return data;
+ return ___mt76u_rr(dev, req, addr & ~MT_VEND_TYPE_MASK);
}
-u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
+static u32 mt76u_rr(struct mt76_dev *dev, u32 addr)
{
u32 ret;
@@ -110,11 +109,32 @@
return ret;
}
-/* should be called with usb_ctrl_mtx locked */
-static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static u32 mt76u_rr_ext(struct mt76_dev *dev, u32 addr)
+{
+ u32 ret;
+
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ret = ___mt76u_rr(dev, MT_VEND_READ_EXT, addr);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return ret;
+}
+
+static void ___mt76u_wr(struct mt76_dev *dev, u8 req,
+ u32 addr, u32 val)
{
struct mt76_usb *usb = &dev->usb;
- u16 offset;
+
+ put_unaligned_le32(val, usb->data);
+ __mt76u_vendor_request(dev, req,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ addr >> 16, addr, usb->data,
+ sizeof(__le32));
+ trace_usb_reg_wr(dev, addr, val);
+}
+
+static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+{
u8 req;
switch (addr & MT_VEND_TYPE_MASK) {
@@ -125,19 +145,20 @@
req = MT_VEND_MULTI_WRITE;
break;
}
- offset = addr & ~MT_VEND_TYPE_MASK;
-
- put_unaligned_le32(val, usb->data);
- __mt76u_vendor_request(dev, req,
- USB_DIR_OUT | USB_TYPE_VENDOR, 0,
- offset, usb->data, sizeof(__le32));
- trace_usb_reg_wr(dev, addr, val);
+ ___mt76u_wr(dev, req, addr & ~MT_VEND_TYPE_MASK, val);
}
-void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
+static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val)
{
mutex_lock(&dev->usb.usb_ctrl_mtx);
__mt76u_wr(dev, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+}
+
+static void mt76u_wr_ext(struct mt76_dev *dev, u32 addr, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
mutex_unlock(&dev->usb.usb_ctrl_mtx);
}
@@ -152,22 +173,94 @@
return val;
}
+static u32 mt76u_rmw_ext(struct mt76_dev *dev, u32 addr,
+ u32 mask, u32 val)
+{
+ mutex_lock(&dev->usb.usb_ctrl_mtx);
+ val |= ___mt76u_rr(dev, MT_VEND_READ_EXT, addr) & ~mask;
+ ___mt76u_wr(dev, MT_VEND_WRITE_EXT, addr, val);
+ mutex_unlock(&dev->usb.usb_ctrl_mtx);
+
+ return val;
+}
+
static void mt76u_copy(struct mt76_dev *dev, u32 offset,
const void *data, int len)
{
struct mt76_usb *usb = &dev->usb;
- const u32 *val = data;
- int i, ret;
+ const u8 *val = data;
+ int ret;
+ int current_batch_size;
+ int i = 0;
+
+ /* Assure that always a multiple of 4 bytes are copied,
+ * otherwise beacons can be corrupted.
+ * See: "mt76: round up length on mt76_wr_copy"
+ * Commit 850e8f6fbd5d0003b0
+ */
+ len = round_up(len, 4);
mutex_lock(&usb->usb_ctrl_mtx);
- for (i = 0; i < (len / 4); i++) {
- put_unaligned_le32(val[i], usb->data);
+ while (i < len) {
+ current_batch_size = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, current_batch_size);
ret = __mt76u_vendor_request(dev, MT_VEND_MULTI_WRITE,
USB_DIR_OUT | USB_TYPE_VENDOR,
- 0, offset + i * 4, usb->data,
- sizeof(__le32));
+ 0, offset + i, usb->data,
+ current_batch_size);
if (ret < 0)
break;
+
+ i += current_batch_size;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void mt76u_copy_ext(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int ret, i = 0, batch_len;
+ const u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ memcpy(usb->data, val + i, batch_len);
+ ret = __mt76u_vendor_request(dev, MT_VEND_WRITE_EXT,
+ USB_DIR_OUT | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ i += batch_len;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+}
+
+static void
+mt76u_read_copy_ext(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+ int i = 0, batch_len, ret;
+ u8 *val = data;
+
+ len = round_up(len, 4);
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (i < len) {
+ batch_len = min_t(int, usb->data_len, len - i);
+ ret = __mt76u_vendor_request(dev, MT_VEND_READ_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ (offset + i) >> 16, offset + i,
+ usb->data, batch_len);
+ if (ret < 0)
+ break;
+
+ memcpy(val + i, usb->data, batch_len);
+ i += batch_len;
}
mutex_unlock(&usb->usb_ctrl_mtx);
}
@@ -187,6 +280,70 @@
EXPORT_SYMBOL_GPL(mt76u_single_wr);
static int
+mt76u_req_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ __mt76u_wr(dev, base + data->reg, data->value);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_wr_rp(dev, base, data, n);
+ else
+ return mt76u_req_wr_rp(dev, base, data, n);
+}
+
+static int
+mt76u_req_rd_rp(struct mt76_dev *dev, u32 base, struct mt76_reg_pair *data,
+ int len)
+{
+ struct mt76_usb *usb = &dev->usb;
+
+ mutex_lock(&usb->usb_ctrl_mtx);
+ while (len > 0) {
+ data->value = __mt76u_rr(dev, base + data->reg);
+ len--;
+ data++;
+ }
+ mutex_unlock(&usb->usb_ctrl_mtx);
+
+ return 0;
+}
+
+static int
+mt76u_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int n)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_rd_rp(dev, base, data, n);
+ else
+ return mt76u_req_rd_rp(dev, base, data, n);
+}
+
+static bool mt76u_check_sg(struct mt76_dev *dev)
+{
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
+
+ return (!disable_usb_sg && udev->bus->sg_tablesize > 0 &&
+ (udev->bus->no_sg_constraint ||
+ udev->speed == USB_SPEED_WIRELESS));
+}
+
+static int
mt76u_set_endpoints(struct usb_interface *intf,
struct mt76_usb *usb)
{
@@ -200,12 +357,10 @@
if (usb_endpoint_is_bulk_in(ep_desc) &&
in_ep < __MT_EP_IN_MAX) {
usb->in_ep[in_ep] = usb_endpoint_num(ep_desc);
- usb->in_max_packet = usb_endpoint_maxp(ep_desc);
in_ep++;
} else if (usb_endpoint_is_bulk_out(ep_desc) &&
out_ep < __MT_EP_OUT_MAX) {
usb->out_ep[out_ep] = usb_endpoint_num(ep_desc);
- usb->out_max_packet = usb_endpoint_maxp(ep_desc);
out_ep++;
}
}
@@ -216,10 +371,9 @@
}
static int
-mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen)
+mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
+ int nsgs, gfp_t gfp)
{
- struct urb *urb = buf->urb;
int i;
for (i = 0; i < nsgs; i++) {
@@ -227,13 +381,13 @@
void *data;
int offset;
- data = netdev_alloc_frag(len);
+ data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
if (!data)
break;
page = virt_to_head_page(data);
offset = data - page_address(page);
- sg_set_page(&urb->sg[i], page, sglen, offset);
+ sg_set_page(&urb->sg[i], page, q->buf_size, offset);
}
if (i < nsgs) {
@@ -245,54 +399,83 @@
}
urb->num_sgs = max_t(int, i, urb->num_sgs);
- buf->len = urb->num_sgs * sglen,
+ urb->transfer_buffer_length = urb->num_sgs * q->buf_size;
sg_init_marker(urb->sg, urb->num_sgs);
return i ? : -ENOMEM;
}
-int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
- int nsgs, int len, int sglen, gfp_t gfp)
+static int
+mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
+ struct urb *urb, int nsgs, gfp_t gfp)
{
- buf->urb = usb_alloc_urb(0, gfp);
- if (!buf->urb)
- return -ENOMEM;
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
- buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg),
- gfp);
- if (!buf->urb->sg)
- return -ENOMEM;
+ if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
- sg_init_table(buf->urb->sg, nsgs);
- buf->dev = dev;
+ urb->transfer_buffer_length = q->buf_size;
+ urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
- return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
+ return urb->transfer_buffer ? 0 : -ENOMEM;
}
-EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
-void mt76u_buf_free(struct mt76u_buf *buf)
+static int
+mt76u_urb_alloc(struct mt76_dev *dev, struct mt76_queue_entry *e,
+ int sg_max_size)
{
- struct urb *urb = buf->urb;
- struct scatterlist *sg;
+ unsigned int size = sizeof(struct urb);
+
+ if (dev->usb.sg_en)
+ size += sg_max_size * sizeof(struct scatterlist);
+
+ e->urb = kzalloc(size, GFP_KERNEL);
+ if (!e->urb)
+ return -ENOMEM;
+
+ usb_init_urb(e->urb);
+
+ if (dev->usb.sg_en && sg_max_size > 0)
+ e->urb->sg = (struct scatterlist *)(e->urb + 1);
+
+ return 0;
+}
+
+static int
+mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int err, sg_size;
+
+ sg_size = qid == MT_RXQ_MAIN ? MT_RX_SG_MAX_SIZE : 0;
+ err = mt76u_urb_alloc(dev, e, sg_size);
+ if (err)
+ return err;
+
+ return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
+}
+
+static void mt76u_urb_free(struct urb *urb)
+{
int i;
- for (i = 0; i < urb->num_sgs; i++) {
- sg = &urb->sg[i];
- if (!sg)
- continue;
+ for (i = 0; i < urb->num_sgs; i++)
+ skb_free_frag(sg_virt(&urb->sg[i]));
- skb_free_frag(sg_virt(sg));
- }
- usb_free_urb(buf->urb);
+ if (urb->transfer_buffer)
+ skb_free_frag(urb->transfer_buffer);
+
+ usb_free_urb(urb);
}
-EXPORT_SYMBOL_GPL(mt76u_buf_free);
-int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index,
- struct mt76u_buf *buf, gfp_t gfp,
- usb_complete_t complete_fn, void *context)
+static void
+mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index,
+ struct urb *urb, usb_complete_t complete_fn,
+ void *context)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_interface *uintf = to_usb_interface(dev->dev);
+ struct usb_device *udev = interface_to_usbdev(uintf);
unsigned int pipe;
if (dir == USB_DIR_IN)
@@ -300,80 +483,115 @@
else
pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[index]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len,
- complete_fn, context);
-
- return usb_submit_urb(buf->urb, gfp);
+ urb->dev = udev;
+ urb->pipe = pipe;
+ urb->complete = complete_fn;
+ urb->context = context;
}
-EXPORT_SYMBOL_GPL(mt76u_submit_buf);
-static inline struct mt76u_buf
-*mt76u_get_next_rx_entry(struct mt76_queue *q)
+static struct urb *
+mt76u_get_next_rx_entry(struct mt76_queue *q)
{
- struct mt76u_buf *buf = NULL;
+ struct urb *urb = NULL;
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) {
- buf = &q->entry[q->head].ubuf;
- q->head = (q->head + 1) % q->ndesc;
+ urb = q->entry[q->tail].urb;
+ q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_irqrestore(&q->lock, flags);
- return buf;
+ return urb;
}
-static int mt76u_get_rx_entry_len(u8 *data, u32 data_len)
+static int
+mt76u_get_rx_entry_len(struct mt76_dev *dev, u8 *data,
+ u32 data_len)
{
u16 dma_len, min_len;
dma_len = get_unaligned_le16(data);
- min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN +
- MT_FCE_INFO_LEN;
+ if (dev->drv->drv_flags & MT_DRV_RX_DMA_HDR)
+ return dma_len;
- if (data_len < min_len || WARN_ON(!dma_len) ||
- WARN_ON(dma_len + MT_DMA_HDR_LEN > data_len) ||
- WARN_ON(dma_len & 0x3))
+ min_len = MT_DMA_HDR_LEN + MT_RX_RXWI_LEN + MT_FCE_INFO_LEN;
+ if (data_len < min_len || !dma_len ||
+ dma_len + MT_DMA_HDR_LEN > data_len ||
+ (dma_len & 0x3))
return -EINVAL;
return dma_len;
}
-static int
-mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb)
+static struct sk_buff *
+mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
+ int len, int buf_size)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- u8 *data = sg_virt(&urb->sg[0]);
- int data_len, len, nsgs = 1;
+ int head_room, drv_flags = dev->drv->drv_flags;
struct sk_buff *skb;
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->state))
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
+ struct page *page;
+
+ /* slow path, not enough space for data and
+ * skb_shared_info
+ */
+ skb = alloc_skb(MT_SKB_HEAD_LEN, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
+ data += head_room + MT_SKB_HEAD_LEN;
+ page = virt_to_head_page(data);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, data - page_address(page),
+ len - MT_SKB_HEAD_LEN, buf_size);
+
+ return skb;
+ }
+
+ /* fast path */
+ skb = build_skb(data, buf_size);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, head_room);
+ __skb_put(skb, len);
+
+ return skb;
+}
+
+static int
+mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
+ int buf_size)
+{
+ u8 *data = urb->num_sgs ? sg_virt(&urb->sg[0]) : urb->transfer_buffer;
+ int data_len = urb->num_sgs ? urb->sg[0].length : urb->actual_length;
+ int len, nsgs = 1, head_room, drv_flags = dev->drv->drv_flags;
+ struct sk_buff *skb;
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
return 0;
- len = mt76u_get_rx_entry_len(data, urb->actual_length);
+ len = mt76u_get_rx_entry_len(dev, data, urb->actual_length);
if (len < 0)
return 0;
- skb = build_skb(data, q->buf_size);
+ head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
+ data_len = min_t(int, len, data_len - head_room);
+ skb = mt76u_build_rx_skb(dev, data, data_len, buf_size);
if (!skb)
return 0;
- data_len = min_t(int, len, urb->sg[0].length - MT_DMA_HDR_LEN);
- skb_reserve(skb, MT_DMA_HDR_LEN);
- if (skb->tail + data_len > skb->end) {
- dev_kfree_skb(skb);
- return 1;
- }
-
- __skb_put(skb, data_len);
len -= data_len;
-
- while (len > 0) {
+ while (len > 0 && nsgs < urb->num_sgs) {
data_len = min_t(int, len, urb->sg[nsgs].length);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
sg_page(&urb->sg[nsgs]),
- urb->sg[nsgs].offset,
- data_len, q->buf_size);
+ urb->sg[nsgs].offset, data_len,
+ buf_size);
len -= data_len;
nsgs++;
}
@@ -384,9 +602,11 @@
static void mt76u_complete_rx(struct urb *urb)
{
- struct mt76_dev *dev = urb->context;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue *q = urb->context;
unsigned long flags;
+
+ trace_rx_urb(dev, urb);
switch (urb->status) {
case -ECONNRESET:
@@ -394,65 +614,82 @@
case -ENOENT:
return;
default:
- dev_err(dev->dev, "rx urb failed: %d\n", urb->status);
- /* fall through */
+ dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
+ urb->status);
+ fallthrough;
case 0:
break;
}
spin_lock_irqsave(&q->lock, flags);
- if (WARN_ONCE(q->entry[q->tail].ubuf.urb != urb, "rx urb mismatch"))
+ if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
goto out;
- q->tail = (q->tail + 1) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
tasklet_schedule(&dev->usb.rx_tasklet);
out:
spin_unlock_irqrestore(&q->lock, flags);
}
-static void mt76u_rx_tasklet(unsigned long data)
+static int
+mt76u_submit_rx_buf(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct urb *urb)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int err, nsgs, buf_len = q->buf_size;
- struct mt76u_buf *buf;
+ int ep = qid == MT_RXQ_MAIN ? MT_EP_IN_PKT_RX : MT_EP_IN_CMD_RESP;
- rcu_read_lock();
+ mt76u_fill_bulk_urb(dev, USB_DIR_IN, ep, urb,
+ mt76u_complete_rx, &dev->q_rx[qid]);
+ trace_submit_urb(dev, urb);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
+}
+
+static void
+mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ int qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ struct urb *urb;
+ int err, count;
while (true) {
- buf = mt76u_get_next_rx_entry(q);
- if (!buf)
+ urb = mt76u_get_next_rx_entry(q);
+ if (!urb)
break;
- nsgs = mt76u_process_rx_entry(dev, buf->urb);
- if (nsgs > 0) {
- err = mt76u_fill_rx_sg(dev, buf, nsgs,
- buf_len,
- SKB_WITH_OVERHEAD(buf_len));
+ count = mt76u_process_rx_entry(dev, urb, q->buf_size);
+ if (count > 0) {
+ err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
if (err < 0)
break;
}
- mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- buf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ mt76u_submit_rx_buf(dev, qid, urb);
}
- mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+ if (qid == MT_RXQ_MAIN)
+ mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
+}
+static void mt76u_rx_tasklet(unsigned long data)
+{
+ struct mt76_dev *dev = (struct mt76_dev *)data;
+ int i;
+
+ rcu_read_lock();
+ mt76_for_each_q_rx(dev, i)
+ mt76u_process_rx_queue(dev, &dev->q_rx[i]);
rcu_read_unlock();
}
-int mt76u_submit_rx_buffers(struct mt76_dev *dev)
+static int
+mt76u_submit_rx_buffers(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
+ struct mt76_queue *q = &dev->q_rx[qid];
unsigned long flags;
int i, err = 0;
spin_lock_irqsave(&q->lock, flags);
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_PKT_RX,
- &q->entry[i].ubuf, GFP_ATOMIC,
- mt76u_complete_rx, dev);
+ err = mt76u_submit_rx_buf(dev, qid, q->entry[i].urb);
if (err < 0)
break;
}
@@ -462,12 +699,12 @@
return err;
}
-EXPORT_SYMBOL_GPL(mt76u_submit_rx_buffers);
-static int mt76u_alloc_rx(struct mt76_dev *dev)
+static int
+mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
- int i, err, nsgs;
+ struct mt76_queue *q = &dev->q_rx[qid];
+ int i, err;
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
@@ -476,120 +713,118 @@
if (!q->entry)
return -ENOMEM;
- if (mt76u_check_sg(dev)) {
- q->buf_size = MT_RX_BUF_SIZE;
- nsgs = MT_SG_MAX_SIZE;
- } else {
- q->buf_size = PAGE_SIZE;
- nsgs = 1;
- }
-
q->ndesc = MT_NUM_RX_ENTRIES;
+ q->buf_size = PAGE_SIZE;
+
for (i = 0; i < q->ndesc; i++) {
- err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
- nsgs, q->buf_size,
- SKB_WITH_OVERHEAD(q->buf_size),
- GFP_KERNEL);
+ err = mt76u_rx_urb_alloc(dev, q, &q->entry[i]);
if (err < 0)
return err;
}
- return mt76u_submit_rx_buffers(dev);
+ return mt76u_submit_rx_buffers(dev, qid);
+}
+
+int mt76u_alloc_mcu_queue(struct mt76_dev *dev)
+{
+ return mt76u_alloc_rx_queue(dev, MT_RXQ_MCU);
+}
+EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
+
+static void
+mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < q->ndesc; i++)
+ mt76u_urb_free(q->entry[i].urb);
+
+ if (!q->rx_page.va)
+ return;
+
+ page = virt_to_page(q->rx_page.va);
+ __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
+ memset(&q->rx_page, 0, sizeof(q->rx_page));
}
static void mt76u_free_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i;
- for (i = 0; i < q->ndesc; i++)
- mt76u_buf_free(&q->entry[i].ubuf);
+ mt76_for_each_q_rx(dev, i)
+ mt76u_free_rx_queue(dev, &dev->q_rx[i]);
}
-static void mt76u_stop_rx(struct mt76_dev *dev)
+void mt76u_stop_rx(struct mt76_dev *dev)
{
- struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN];
int i;
- for (i = 0; i < q->ndesc; i++)
- usb_kill_urb(q->entry[i].ubuf.urb);
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+ int j;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_poison_urb(q->entry[j].urb);
+ }
+
+ tasklet_kill(&dev->usb.rx_tasklet);
}
+EXPORT_SYMBOL_GPL(mt76u_stop_rx);
-int mt76u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
+int mt76u_resume_rx(struct mt76_dev *dev)
{
- struct sk_buff *iter, *last = skb;
- u32 info, pad;
+ int i;
- /* Buffer layout:
- * | 4B | xfer len | pad | 4B |
- * | TXINFO | pkt/cmd | zero pad to 4B | zero |
- *
- * length field of TXINFO should be set to 'xfer len'.
- */
- info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) |
- FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
- put_unaligned_le32(info, skb_push(skb, sizeof(info)));
+ mt76_for_each_q_rx(dev, i) {
+ struct mt76_queue *q = &dev->q_rx[i];
+ int err, j;
- pad = round_up(skb->len, 4) + 4 - skb->len;
- skb_walk_frags(skb, iter) {
- last = iter;
- if (!iter->next) {
- skb->data_len += pad;
- skb->len += pad;
- break;
- }
+ for (j = 0; j < q->ndesc; j++)
+ usb_unpoison_urb(q->entry[j].urb);
+
+ err = mt76u_submit_rx_buffers(dev, i);
+ if (err < 0)
+ return err;
}
- if (unlikely(pad)) {
- if (__skb_pad(last, pad, true))
- return -ENOMEM;
- __skb_put(last, pad);
- }
return 0;
}
-EXPORT_SYMBOL_GPL(mt76u_skb_dma_info);
+EXPORT_SYMBOL_GPL(mt76u_resume_rx);
-static void mt76u_tx_tasklet(unsigned long data)
+static void mt76u_tx_worker(struct mt76_worker *w)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
- struct mt76u_buf *buf;
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
+ struct mt76_queue_entry entry;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ q = dev->q_tx[i];
- spin_lock_bh(&q->lock);
- while (true) {
- buf = &q->entry[q->head].ubuf;
- if (!buf->done || !q->queued)
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
break;
- dev->drv->tx_complete_skb(dev, q,
- &q->entry[q->head],
- false);
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- if (q->entry[q->head].schedule) {
- q->entry[q->head].schedule = false;
- q->swq_queued--;
- }
-
- q->head = (q->head + 1) % q->ndesc;
- q->queued--;
+ mt76_queue_tx_complete(dev, q, &entry);
}
- mt76_txq_schedule(dev, q);
- wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+ wake = q->stopped && q->queued < q->ndesc - 8;
+ if (wake)
+ q->stopped = false;
+
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
+ mt76_txq_schedule(&dev->phy, i);
- if (!test_and_set_bit(MT76_READING_STATS, &dev->state))
- ieee80211_queue_delayed_work(dev->hw,
- &dev->usb.stat_work,
- msecs_to_jiffies(10));
-
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->usb.stat_work);
if (wake)
ieee80211_wake_queue(dev->hw, i);
}
@@ -602,11 +837,11 @@
u8 update = 1;
u16 count = 0;
- usb = container_of(work, struct mt76_usb, stat_work.work);
+ usb = container_of(work, struct mt76_usb, stat_work);
dev = container_of(usb, struct mt76_dev, usb);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->state))
+ if (test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -614,76 +849,73 @@
count++;
}
- if (count && test_bit(MT76_STATE_RUNNING, &dev->state))
- ieee80211_queue_delayed_work(dev->hw, &usb->stat_work,
- msecs_to_jiffies(10));
+ if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
+ queue_work(dev->wq, &usb->stat_work);
else
- clear_bit(MT76_READING_STATS, &dev->state);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
}
static void mt76u_complete_tx(struct urb *urb)
{
- struct mt76u_buf *buf = urb->context;
- struct mt76_dev *dev = buf->dev;
+ struct mt76_dev *dev = dev_get_drvdata(&urb->dev->dev);
+ struct mt76_queue_entry *e = urb->context;
if (mt76u_urb_error(urb))
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
- buf->done = true;
+ e->done = true;
- tasklet_schedule(&dev->usb.tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
static int
-mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb)
+mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
+ struct urb *urb)
{
- int nsgs = 1 + skb_shinfo(skb)->nr_frags;
- struct sk_buff *iter;
+ urb->transfer_buffer_length = skb->len;
- skb_walk_frags(skb, iter)
- nsgs += 1 + skb_shinfo(iter)->nr_frags;
+ if (!dev->usb.sg_en) {
+ urb->transfer_buffer = skb->data;
+ return 0;
+ }
- memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE);
+ sg_init_table(urb->sg, MT_TX_SG_MAX_SIZE);
+ urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len);
+ if (!urb->num_sgs)
+ return -ENOMEM;
- nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs);
- sg_init_marker(urb->sg, nsgs);
- urb->num_sgs = nsgs;
-
- return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len);
+ return urb->num_sgs;
}
static int
-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct usb_interface *intf = to_usb_interface(dev->dev);
- struct usb_device *udev = interface_to_usbdev(intf);
- u8 ep = q2ep(q->hw_idx);
- struct mt76u_buf *buf;
- u16 idx = q->tail;
- unsigned int pipe;
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_tx_info tx_info = {
+ .skb = skb,
+ };
+ u16 idx = q->head;
int err;
if (q->queued == q->ndesc)
return -ENOSPC;
- err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL);
+ skb->prev = skb->next = NULL;
+ err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
if (err < 0)
return err;
- buf = &q->entry[idx].ubuf;
- buf->done = false;
-
- err = mt76u_tx_build_sg(skb, buf->urb);
+ err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb);
if (err < 0)
return err;
- pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]);
- usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, skb->len,
- mt76u_complete_tx, buf);
+ mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx),
+ q->entry[idx].urb, mt76u_complete_tx,
+ &q->entry[idx]);
- q->tail = (q->tail + 1) % q->ndesc;
- q->entry[idx].skb = skb;
+ q->head = (q->head + 1) % q->ndesc;
+ q->entry[idx].skb = tx_info.skb;
q->queued++;
return idx;
@@ -691,15 +923,17 @@
static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct mt76u_buf *buf;
+ struct urb *urb;
int err;
- while (q->first != q->tail) {
- buf = &q->entry[q->first].ubuf;
- err = usb_submit_urb(buf->urb, GFP_ATOMIC);
+ while (q->first != q->head) {
+ urb = q->entry[q->first].urb;
+
+ trace_submit_urb(dev, urb);
+ err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
if (err == -ENODEV)
- set_bit(MT76_REMOVED, &dev->state);
+ set_bit(MT76_REMOVED, &dev->phy.state);
else
dev_err(dev->dev, "tx urb submit failed:%d\n",
err);
@@ -709,19 +943,44 @@
}
}
+static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
+{
+ if (mt76_chip(dev) == 0x7663) {
+ static const u8 lmac_queue_map[] = {
+ /* ac to lmac mapping */
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 4,
+ };
+
+ if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
+ return 1; /* BE */
+
+ return lmac_queue_map[ac];
+ }
+
+ return mt76_ac_to_hwq(ac);
+}
+
static int mt76u_alloc_tx(struct mt76_dev *dev)
{
- struct mt76u_buf *buf;
struct mt76_queue *q;
- size_t size;
- int i, j;
+ int i, j, err;
- size = MT_SG_MAX_SIZE * sizeof(struct scatterlist);
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ for (i = 0; i <= MT_TXQ_PSD; i++) {
+ if (i >= IEEE80211_NUM_ACS) {
+ dev->q_tx[i] = dev->q_tx[0];
+ continue;
+ }
+
+ q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
+ if (!q)
+ return -ENOMEM;
+
spin_lock_init(&q->lock);
- INIT_LIST_HEAD(&q->swq);
- q->hw_idx = q2hwq(i);
+ q->hw_idx = mt76u_ac_to_hwq(dev, i);
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -731,16 +990,10 @@
q->ndesc = MT_NUM_TX_ENTRIES;
for (j = 0; j < q->ndesc; j++) {
- buf = &q->entry[j].ubuf;
- buf->dev = dev;
-
- buf->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!buf->urb)
- return -ENOMEM;
-
- buf->urb->sg = devm_kzalloc(dev->dev, size, GFP_KERNEL);
- if (!buf->urb->sg)
- return -ENOMEM;
+ err = mt76u_urb_alloc(dev, &q->entry[j],
+ MT_TX_SG_MAX_SIZE);
+ if (err < 0)
+ return err;
}
}
return 0;
@@ -748,48 +1001,74 @@
static void mt76u_free_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j;
+ int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
+ struct mt76_queue *q;
+ int j;
+
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
for (j = 0; j < q->ndesc; j++)
- usb_free_urb(q->entry[j].ubuf.urb);
+ usb_free_urb(q->entry[j].urb);
}
}
-static void mt76u_stop_tx(struct mt76_dev *dev)
+void mt76u_stop_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j;
+ int ret;
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = &dev->q_tx[i];
- for (j = 0; j < q->ndesc; j++)
- usb_kill_urb(q->entry[j].ubuf.urb);
+ ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
+ HZ / 5);
+ if (!ret) {
+ struct mt76_queue_entry entry;
+ struct mt76_queue *q;
+ int i, j;
+
+ dev_err(dev->dev, "timed out waiting for pending tx\n");
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
+ for (j = 0; j < q->ndesc; j++)
+ usb_kill_urb(q->entry[j].urb);
+ }
+
+ mt76_worker_disable(&dev->tx_worker);
+
+ /* On device removal we maight queue skb's, but mt76u_tx_kick()
+ * will fail to submit urb, cleanup those skb's manually.
+ */
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ q = dev->q_tx[i];
+ if (!q)
+ continue;
+
+ while (q->queued > 0) {
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
+ }
+
+ mt76_worker_enable(&dev->tx_worker);
}
-}
-void mt76u_stop_queues(struct mt76_dev *dev)
-{
- tasklet_disable(&dev->usb.rx_tasklet);
- tasklet_disable(&dev->usb.tx_tasklet);
+ cancel_work_sync(&dev->usb.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->phy.state);
- mt76u_stop_rx(dev);
- mt76u_stop_tx(dev);
+ mt76_tx_status_check(dev, NULL, true);
}
-EXPORT_SYMBOL_GPL(mt76u_stop_queues);
-
-void mt76u_stop_stat_wk(struct mt76_dev *dev)
-{
- cancel_delayed_work_sync(&dev->usb.stat_work);
- clear_bit(MT76_READING_STATS, &dev->state);
-}
-EXPORT_SYMBOL_GPL(mt76u_stop_stat_wk);
+EXPORT_SYMBOL_GPL(mt76u_stop_tx);
void mt76u_queues_deinit(struct mt76_dev *dev)
{
- mt76u_stop_queues(dev);
+ mt76u_stop_rx(dev);
+ mt76u_stop_tx(dev);
mt76u_free_rx(dev);
mt76u_free_tx(dev);
@@ -800,7 +1079,7 @@
{
int err;
- err = mt76u_alloc_rx(dev);
+ err = mt76u_alloc_rx_queue(dev, MT_RXQ_MAIN);
if (err < 0)
return err;
@@ -814,29 +1093,53 @@
};
int mt76u_init(struct mt76_dev *dev,
- struct usb_interface *intf)
+ struct usb_interface *intf, bool ext)
{
- static const struct mt76_bus_ops mt76u_ops = {
- .rr = mt76u_rr,
- .wr = mt76u_wr,
- .rmw = mt76u_rmw,
- .copy = mt76u_copy,
+ static struct mt76_bus_ops mt76u_ops = {
+ .read_copy = mt76u_read_copy_ext,
+ .wr_rp = mt76u_wr_rp,
+ .rd_rp = mt76u_rd_rp,
+ .type = MT76_BUS_USB,
};
+ struct usb_device *udev = interface_to_usbdev(intf);
struct mt76_usb *usb = &dev->usb;
+ int err = -ENOMEM;
+ mt76u_ops.rr = ext ? mt76u_rr_ext : mt76u_rr;
+ mt76u_ops.wr = ext ? mt76u_wr_ext : mt76u_wr;
+ mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
+ mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
+
+ dev->tx_worker.fn = mt76u_tx_worker;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
- tasklet_init(&usb->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
- INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data);
- skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]);
+ INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
- init_completion(&usb->mcu.cmpl);
- mutex_init(&usb->mcu.mutex);
+ usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
+ if (usb->data_len < 32)
+ usb->data_len = 32;
+
+ usb->data = devm_kmalloc(dev->dev, usb->data_len, GFP_KERNEL);
+ if (!usb->data)
+ goto error;
mutex_init(&usb->usb_ctrl_mtx);
dev->bus = &mt76u_ops;
dev->queue_ops = &usb_queue_ops;
- return mt76u_set_endpoints(intf, usb);
+ dev_set_drvdata(&udev->dev, dev);
+
+ usb->sg_en = mt76u_check_sg(dev);
+
+ err = mt76u_set_endpoints(intf, usb);
+ if (err < 0)
+ goto error;
+
+ return 0;
+
+error:
+ destroy_workqueue(dev->wq);
+
+ return err;
}
EXPORT_SYMBOL_GPL(mt76u_init);
--
Gitblit v1.6.2