From 1543e317f1da31b75942316931e8f491a8920811 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Thu, 04 Jan 2024 10:08:02 +0000
Subject: [PATCH] disable FB
---
kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.h | 178 ++++++++++++++++++++++++++++++++++++++++++++--------------
1 files changed, 134 insertions(+), 44 deletions(-)
diff --git a/kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 2f76572..2c16c21 100644
--- a/kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/kernel/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_ETH_COM_H_
@@ -67,11 +40,13 @@
enum ena_eth_io_l4_proto_index l4_proto;
bool l3_csum_err;
bool l4_csum_err;
+ u8 l4_csum_checked;
/* fragmented packet */
bool frag;
u32 hash;
u16 descs;
int max_bufs;
+ u8 pkt_offset;
};
int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
@@ -86,8 +61,6 @@
struct ena_com_buf *ena_buf,
u16 req_id);
-int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id);
-
bool ena_com_cq_empty(struct ena_com_io_cq *io_cq);
static inline void ena_com_unmask_intr(struct ena_com_io_cq *io_cq,
@@ -96,7 +69,7 @@
writel(intr_reg->intr_control, io_cq->unmask_reg);
}
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_q_entries(struct ena_com_io_sq *io_sq)
{
u16 tail, next_to_comp, cnt;
@@ -107,16 +80,87 @@
return io_sq->q_depth - 1 - cnt;
}
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+ u16 required_buffers)
+{
+ int temp;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+ return ena_com_free_q_entries(io_sq) >= required_buffers;
+
+ /* This calculation doesn't need to be 100% accurate. So to reduce
+ * the calculation overhead just Subtract 2 lines from the free descs
+ * (one for the header line and one to compensate the devision
+ * down calculation.
+ */
+ temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+ return ena_com_free_q_entries(io_sq) > temp;
+}
+
+static inline bool ena_com_meta_desc_changed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ if (!ena_tx_ctx->meta_valid)
+ return false;
+
+ return !!memcmp(&io_sq->cached_tx_meta,
+ &ena_tx_ctx->ena_meta,
+ sizeof(struct ena_com_tx_meta));
+}
+
+static inline bool is_llq_max_tx_burst_exists(struct ena_com_io_sq *io_sq)
+{
+ return (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) &&
+ io_sq->llq_info.max_entries_in_tx_burst > 0;
+}
+
+static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ struct ena_com_tx_ctx *ena_tx_ctx)
+{
+ struct ena_com_llq_info *llq_info;
+ int descs_after_first_entry;
+ int num_entries_needed = 1;
+ u16 num_descs;
+
+ if (!is_llq_max_tx_burst_exists(io_sq))
+ return false;
+
+ llq_info = &io_sq->llq_info;
+ num_descs = ena_tx_ctx->num_bufs;
+
+ if (llq_info->disable_meta_caching ||
+ unlikely(ena_com_meta_desc_changed(io_sq, ena_tx_ctx)))
+ ++num_descs;
+
+ if (num_descs > llq_info->descs_num_before_header) {
+ descs_after_first_entry = num_descs - llq_info->descs_num_before_header;
+ num_entries_needed += DIV_ROUND_UP(descs_after_first_entry,
+ llq_info->descs_per_entry);
+ }
+
+ pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ num_descs, num_entries_needed);
+
+ return num_entries_needed > io_sq->entries_in_tx_burst_left;
+}
+
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
- u16 tail;
+ u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
+ u16 tail = io_sq->tail;
- tail = io_sq->tail;
-
- pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
writel(tail, io_sq->db_addr);
+
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ pr_debug("Reset available entries in tx burst for queue %d to %d\n",
+ io_sq->qid, max_entries_in_tx_burst);
+ io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ }
return 0;
}
@@ -126,15 +170,17 @@
u16 unreported_comp, head;
bool need_update;
- head = io_cq->head;
- unreported_comp = head - io_cq->last_head_update;
- need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
+ if (unlikely(io_cq->cq_head_db_reg)) {
+ head = io_cq->head;
+ unreported_comp = head - io_cq->last_head_update;
+ need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
- if (io_cq->cq_head_db_reg && need_update) {
- pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
- io_cq->qid, head);
- writel(head, io_cq->cq_head_db_reg);
- io_cq->last_head_update = head;
+ if (unlikely(need_update)) {
+ pr_debug("Write completion queue doorbell for queue %d: head: %d\n",
+ io_cq->qid, head);
+ writel(head, io_cq->cq_head_db_reg);
+ io_cq->last_head_update = head;
+ }
}
return 0;
@@ -159,4 +205,48 @@
io_sq->next_to_comp += elem;
}
+static inline void ena_com_cq_inc_head(struct ena_com_io_cq *io_cq)
+{
+ io_cq->head++;
+
+ /* Switch phase bit in case of wrap around */
+ if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
+ io_cq->phase ^= 1;
+}
+
+static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+ u16 *req_id)
+{
+ u8 expected_phase, cdesc_phase;
+ struct ena_eth_io_tx_cdesc *cdesc;
+ u16 masked_head;
+
+ masked_head = io_cq->head & (io_cq->q_depth - 1);
+ expected_phase = io_cq->phase;
+
+ cdesc = (struct ena_eth_io_tx_cdesc *)
+ ((uintptr_t)io_cq->cdesc_addr.virt_addr +
+ (masked_head * io_cq->cdesc_entry_size_in_bytes));
+
+ /* When the current completion descriptor phase isn't the same as the
+ * expected, it mean that the device still didn't update
+ * this completion.
+ */
+ cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
+ if (cdesc_phase != expected_phase)
+ return -EAGAIN;
+
+ dma_rmb();
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+ pr_err("Invalid req id %d\n", cdesc->req_id);
+ return -EINVAL;
+ }
+
+ ena_com_cq_inc_head(io_cq);
+
+ return 0;
+}
+
#endif /* ENA_ETH_COM_H_ */
--
Gitblit v1.6.2