From 8ac6c7a54ed1b98d142dce24b11c6de6a1e239a5 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Tue, 22 Oct 2024 10:36:11 +0000
Subject: [PATCH] 修改4g拨号为QMI,需要在系统里后台执行quectel-CM
---
kernel/drivers/net/wireless/ath/ath10k/ce.c | 276 +++++++++++++++++++++++++++++++++++++-----------------
1 files changed, 189 insertions(+), 87 deletions(-)
diff --git a/kernel/drivers/net/wireless/ath/ath10k/ce.c b/kernel/drivers/net/wireless/ath/ath10k/ce.c
index 2276d60..c45c814 100644
--- a/kernel/drivers/net/wireless/ath/ath10k/ce.c
+++ b/kernel/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hif.h"
@@ -228,11 +217,31 @@
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- unsigned int addr)
+ u32 ce_id,
+ u64 addr)
{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->sr_base_addr, addr);
+ ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
+ ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
@@ -313,11 +322,36 @@
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- u32 addr)
+ u32 ce_id,
+ u64 addr)
{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->dr_base_addr, addr);
+ ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
+ ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+ u32 reg_value;
+
+ reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi);
+ reg_value &= ~CE_DESC_ADDR_HI_MASK;
+ reg_value |= addr_hi;
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi, reg_value);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
@@ -557,7 +591,7 @@
addr = (__le32 *)&sdesc.addr;
- flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
addr[0] = __cpu_to_le32(buffer);
addr[1] = __cpu_to_le32(flags);
if (flags & CE_SEND_FLAG_GATHER)
@@ -731,7 +765,7 @@
return -ENOSPC;
desc->addr = __cpu_to_le64(paddr);
- desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+ desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
desc->nbytes = 0;
@@ -1032,8 +1066,8 @@
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp)
+static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@@ -1083,6 +1117,66 @@
src_ring->sw_index = sw_index;
return 0;
+}
+
+static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc_64 *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ return ce_state->ops->ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
}
EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
@@ -1205,29 +1299,24 @@
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
- spin_lock_bh(&ce->ce_lock);
-
- /* Clear the copy-complete interrupts that will be handled here. */
+ /*
+ * Clear before handling
+ *
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ *
+ * NOTE: When the last copy engine interrupt is cleared the
+ * hardware will go to sleep. Once this happens any access to
+ * the CE registers can cause a hardware fault.
+ */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
- wm_regs->cc_mask);
-
- spin_unlock_bh(&ce->ce_lock);
+ wm_regs->cc_mask | wm_regs->wm_mask);
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
-
- spin_lock_bh(&ce->ce_lock);
-
- /*
- * Misc CE interrupts are not being handled, but still need
- * to be cleared.
- */
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
-
- spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);
@@ -1278,35 +1367,55 @@
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
-int ath10k_ce_disable_interrupts(struct ath10k *ar)
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state;
+ u32 ctrl_addr;
+
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
+
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
{
int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
-
- ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
- ath10k_ce_error_intr_disable(ar, ctrl_addr);
- ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
- }
-
- return 0;
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_disable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
-void ath10k_ce_enable_interrupts(struct ath10k *ar)
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- int ce_id;
struct ath10k_ce_pipe *ce_state;
- /* Skip the last copy engine, CE7 the diagnostic window, as that
- * uses polling and isn't initialized for interrupts.
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ /* Enable interrupts for copy engine that
+ * are not using polling mode.
*/
- for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- ath10k_ce_per_engine_handler_adjust(ce_state);
- }
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_enable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
@@ -1336,7 +1445,7 @@
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
@@ -1375,7 +1484,7 @@
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
@@ -1416,10 +1525,8 @@
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc(sizeof(*src_ring) +
- (nentries *
- sizeof(*src_ring->per_transfer_context)),
- GFP_KERNEL);
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
@@ -1476,10 +1583,8 @@
nentries = roundup_pow_of_two(nentries);
- src_ring = kzalloc(sizeof(*src_ring) +
- (nentries *
- sizeof(*src_ring->per_transfer_context)),
- GFP_KERNEL);
+ src_ring = kzalloc(struct_size(src_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (!src_ring)
return ERR_PTR(-ENOMEM);
@@ -1534,10 +1639,8 @@
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc(sizeof(*dest_ring) +
- (nentries *
- sizeof(*dest_ring->per_transfer_context)),
- GFP_KERNEL);
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (dest_ring == NULL)
return ERR_PTR(-ENOMEM);
@@ -1549,10 +1652,10 @@
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
- dma_zalloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
- CE_DESC_RING_ALIGN),
- &base_addr, GFP_KERNEL);
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
@@ -1580,10 +1683,8 @@
nentries = roundup_pow_of_two(attr->dest_nentries);
- dest_ring = kzalloc(sizeof(*dest_ring) +
- (nentries *
- sizeof(*dest_ring->per_transfer_context)),
- GFP_KERNEL);
+ dest_ring = kzalloc(struct_size(dest_ring, per_transfer_context,
+ nentries), GFP_KERNEL);
if (!dest_ring)
return ERR_PTR(-ENOMEM);
@@ -1608,9 +1709,6 @@
/* Correctly initialize memory to 0 to prevent garbage
* data crashing system when download firmware
*/
- memset(dest_ring->base_addr_owner_space_unaligned, 0,
- nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
-
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
@@ -1659,7 +1757,7 @@
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
@@ -1669,7 +1767,7 @@
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
@@ -1759,7 +1857,7 @@
struct ath10k_ce_crash_data ce_data;
u32 addr, id;
- lockdep_assert_held(&ar->data_lock);
+ lockdep_assert_held(&ar->dump_mutex);
ath10k_err(ar, "Copy Engine register dump:\n");
@@ -1801,6 +1899,9 @@
.ce_extract_desc_data = ath10k_ce_extract_desc_data,
.ce_free_pipe = _ath10k_ce_free_pipe,
.ce_send_nolock = _ath10k_ce_send_nolock,
+ .ce_set_src_ring_base_addr_hi = NULL,
+ .ce_set_dest_ring_base_addr_hi = NULL,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
};
static const struct ath10k_ce_ops ce_64_ops = {
@@ -1813,6 +1914,9 @@
.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
.ce_free_pipe = _ath10k_ce_free_pipe_64,
.ce_send_nolock = _ath10k_ce_send_nolock_64,
+ .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
+ .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
};
static void ath10k_ce_set_ops(struct ath10k *ar,
@@ -1908,7 +2012,7 @@
lower_32_bits(ce->paddr_rri));
ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
(upper_32_bits(ce->paddr_rri) &
- CE_DESC_FLAGS_GET_MASK));
+ CE_DESC_ADDR_HI_MASK));
for (i = 0; i < CE_COUNT; i++) {
ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
@@ -1917,8 +2021,6 @@
value |= ar->hw_ce_regs->upd->mask;
ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
}
-
- memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
}
EXPORT_SYMBOL(ath10k_ce_alloc_rri);
--
Gitblit v1.6.2