| .. | .. |
|---|
| 1 | | -/* |
|---|
| 2 | | - * aQuantia Corporation Network Driver |
|---|
| 3 | | - * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
|---|
| 2 | +/* Atlantic Network Driver |
|---|
| 4 | 3 | * |
|---|
| 5 | | - * This program is free software; you can redistribute it and/or modify it |
|---|
| 6 | | - * under the terms and conditions of the GNU General Public License, |
|---|
| 7 | | - * version 2, as published by the Free Software Foundation. |
|---|
| 4 | + * Copyright (C) 2014-2019 aQuantia Corporation |
|---|
| 5 | + * Copyright (C) 2019-2020 Marvell International Ltd. |
|---|
| 8 | 6 | */ |
|---|
| 9 | 7 | |
|---|
| 10 | 8 | /* File aq_ring.c: Definition of functions for Rx/Tx rings. */ |
|---|
| .. | .. |
|---|
| 12 | 10 | #include "aq_ring.h" |
|---|
| 13 | 11 | #include "aq_nic.h" |
|---|
| 14 | 12 | #include "aq_hw.h" |
|---|
| 13 | +#include "aq_hw_utils.h" |
|---|
| 14 | +#include "aq_ptp.h" |
|---|
| 15 | 15 | |
|---|
| 16 | 16 | #include <linux/netdevice.h> |
|---|
| 17 | 17 | #include <linux/etherdevice.h> |
|---|
| 18 | + |
|---|
| 19 | +static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev) |
|---|
| 20 | +{ |
|---|
| 21 | + unsigned int len = PAGE_SIZE << rxpage->order; |
|---|
| 22 | + |
|---|
| 23 | + dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE); |
|---|
| 24 | + |
|---|
| 25 | + /* Drop the ref for being in the ring. */ |
|---|
| 26 | + __free_pages(rxpage->page, rxpage->order); |
|---|
| 27 | + rxpage->page = NULL; |
|---|
| 28 | +} |
|---|
| 29 | + |
|---|
| 30 | +static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order, |
|---|
| 31 | + struct device *dev) |
|---|
| 32 | +{ |
|---|
| 33 | + struct page *page; |
|---|
| 34 | + int ret = -ENOMEM; |
|---|
| 35 | + dma_addr_t daddr; |
|---|
| 36 | + |
|---|
| 37 | + page = dev_alloc_pages(order); |
|---|
| 38 | + if (unlikely(!page)) |
|---|
| 39 | + goto err_exit; |
|---|
| 40 | + |
|---|
| 41 | + daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order, |
|---|
| 42 | + DMA_FROM_DEVICE); |
|---|
| 43 | + |
|---|
| 44 | + if (unlikely(dma_mapping_error(dev, daddr))) |
|---|
| 45 | + goto free_page; |
|---|
| 46 | + |
|---|
| 47 | + rxpage->page = page; |
|---|
| 48 | + rxpage->daddr = daddr; |
|---|
| 49 | + rxpage->order = order; |
|---|
| 50 | + rxpage->pg_off = 0; |
|---|
| 51 | + |
|---|
| 52 | + return 0; |
|---|
| 53 | + |
|---|
| 54 | +free_page: |
|---|
| 55 | + __free_pages(page, order); |
|---|
| 56 | + |
|---|
| 57 | +err_exit: |
|---|
| 58 | + return ret; |
|---|
| 59 | +} |
|---|
| 60 | + |
|---|
| 61 | +static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf, |
|---|
| 62 | + int order) |
|---|
| 63 | +{ |
|---|
| 64 | + int ret; |
|---|
| 65 | + |
|---|
| 66 | + if (rxbuf->rxdata.page) { |
|---|
| 67 | + /* One means ring is the only user and can reuse */ |
|---|
| 68 | + if (page_ref_count(rxbuf->rxdata.page) > 1) { |
|---|
| 69 | + /* Try reuse buffer */ |
|---|
| 70 | + rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX; |
|---|
| 71 | + if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <= |
|---|
| 72 | + (PAGE_SIZE << order)) { |
|---|
| 73 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 74 | + self->stats.rx.pg_flips++; |
|---|
| 75 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 76 | + } else { |
|---|
| 77 | + /* Buffer exhausted. We have other users and |
|---|
| 78 | + * should release this page and realloc |
|---|
| 79 | + */ |
|---|
| 80 | + aq_free_rxpage(&rxbuf->rxdata, |
|---|
| 81 | + aq_nic_get_dev(self->aq_nic)); |
|---|
| 82 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 83 | + self->stats.rx.pg_losts++; |
|---|
| 84 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 85 | + } |
|---|
| 86 | + } else { |
|---|
| 87 | + rxbuf->rxdata.pg_off = 0; |
|---|
| 88 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 89 | + self->stats.rx.pg_reuses++; |
|---|
| 90 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 91 | + } |
|---|
| 92 | + } |
|---|
| 93 | + |
|---|
| 94 | + if (!rxbuf->rxdata.page) { |
|---|
| 95 | + ret = aq_get_rxpage(&rxbuf->rxdata, order, |
|---|
| 96 | + aq_nic_get_dev(self->aq_nic)); |
|---|
| 97 | + if (ret) { |
|---|
| 98 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 99 | + self->stats.rx.alloc_fails++; |
|---|
| 100 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 101 | + } |
|---|
| 102 | + return ret; |
|---|
| 103 | + } |
|---|
| 104 | + |
|---|
| 105 | + return 0; |
|---|
| 106 | +} |
|---|
| 18 | 107 | |
|---|
| 19 | 108 | static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self, |
|---|
| 20 | 109 | struct aq_nic_s *aq_nic) |
|---|
| .. | .. |
|---|
| 29 | 118 | goto err_exit; |
|---|
| 30 | 119 | } |
|---|
| 31 | 120 | self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic), |
|---|
| 32 | | - self->size * self->dx_size, |
|---|
| 33 | | - &self->dx_ring_pa, GFP_KERNEL); |
|---|
| 121 | + self->size * self->dx_size, |
|---|
| 122 | + &self->dx_ring_pa, GFP_KERNEL); |
|---|
| 34 | 123 | if (!self->dx_ring) { |
|---|
| 35 | 124 | err = -ENOMEM; |
|---|
| 36 | 125 | goto err_exit; |
|---|
| .. | .. |
|---|
| 41 | 130 | aq_ring_free(self); |
|---|
| 42 | 131 | self = NULL; |
|---|
| 43 | 132 | } |
|---|
| 133 | + |
|---|
| 44 | 134 | return self; |
|---|
| 45 | 135 | } |
|---|
| 46 | 136 | |
|---|
| .. | .. |
|---|
| 67 | 157 | aq_ring_free(self); |
|---|
| 68 | 158 | self = NULL; |
|---|
| 69 | 159 | } |
|---|
| 160 | + |
|---|
| 70 | 161 | return self; |
|---|
| 71 | 162 | } |
|---|
| 72 | 163 | |
|---|
| .. | .. |
|---|
| 81 | 172 | self->idx = idx; |
|---|
| 82 | 173 | self->size = aq_nic_cfg->rxds; |
|---|
| 83 | 174 | self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size; |
|---|
| 175 | + self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + |
|---|
| 176 | + (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; |
|---|
| 177 | + |
|---|
| 178 | + if (aq_nic_cfg->rxpageorder > self->page_order) |
|---|
| 179 | + self->page_order = aq_nic_cfg->rxpageorder; |
|---|
| 84 | 180 | |
|---|
| 85 | 181 | self = aq_ring_alloc(self, aq_nic); |
|---|
| 86 | 182 | if (!self) { |
|---|
| .. | .. |
|---|
| 93 | 189 | aq_ring_free(self); |
|---|
| 94 | 190 | self = NULL; |
|---|
| 95 | 191 | } |
|---|
| 192 | + |
|---|
| 96 | 193 | return self; |
|---|
| 97 | 194 | } |
|---|
| 98 | 195 | |
|---|
| 99 | | -int aq_ring_init(struct aq_ring_s *self) |
|---|
| 196 | +struct aq_ring_s * |
|---|
| 197 | +aq_ring_hwts_rx_alloc(struct aq_ring_s *self, struct aq_nic_s *aq_nic, |
|---|
| 198 | + unsigned int idx, unsigned int size, unsigned int dx_size) |
|---|
| 199 | +{ |
|---|
| 200 | + struct device *dev = aq_nic_get_dev(aq_nic); |
|---|
| 201 | + size_t sz = size * dx_size + AQ_CFG_RXDS_DEF; |
|---|
| 202 | + |
|---|
| 203 | + memset(self, 0, sizeof(*self)); |
|---|
| 204 | + |
|---|
| 205 | + self->aq_nic = aq_nic; |
|---|
| 206 | + self->idx = idx; |
|---|
| 207 | + self->size = size; |
|---|
| 208 | + self->dx_size = dx_size; |
|---|
| 209 | + |
|---|
| 210 | + self->dx_ring = dma_alloc_coherent(dev, sz, &self->dx_ring_pa, |
|---|
| 211 | + GFP_KERNEL); |
|---|
| 212 | + if (!self->dx_ring) { |
|---|
| 213 | + aq_ring_free(self); |
|---|
| 214 | + return NULL; |
|---|
| 215 | + } |
|---|
| 216 | + |
|---|
| 217 | + return self; |
|---|
| 218 | +} |
|---|
| 219 | + |
|---|
| 220 | +int aq_ring_init(struct aq_ring_s *self, const enum atl_ring_type ring_type) |
|---|
| 100 | 221 | { |
|---|
| 101 | 222 | self->hw_head = 0; |
|---|
| 102 | 223 | self->sw_head = 0; |
|---|
| 103 | 224 | self->sw_tail = 0; |
|---|
| 225 | + self->ring_type = ring_type; |
|---|
| 226 | + |
|---|
| 227 | + if (self->ring_type == ATL_RING_RX) |
|---|
| 228 | + u64_stats_init(&self->stats.rx.syncp); |
|---|
| 229 | + else |
|---|
| 230 | + u64_stats_init(&self->stats.tx.syncp); |
|---|
| 231 | + |
|---|
| 104 | 232 | return 0; |
|---|
| 105 | 233 | } |
|---|
| 106 | 234 | |
|---|
| .. | .. |
|---|
| 122 | 250 | { |
|---|
| 123 | 251 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); |
|---|
| 124 | 252 | |
|---|
| 125 | | - if (__netif_subqueue_stopped(ndev, ring->idx)) { |
|---|
| 126 | | - netif_wake_subqueue(ndev, ring->idx); |
|---|
| 253 | + if (__netif_subqueue_stopped(ndev, |
|---|
| 254 | + AQ_NIC_RING2QMAP(ring->aq_nic, |
|---|
| 255 | + ring->idx))) { |
|---|
| 256 | + netif_wake_subqueue(ndev, |
|---|
| 257 | + AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); |
|---|
| 258 | + u64_stats_update_begin(&ring->stats.tx.syncp); |
|---|
| 127 | 259 | ring->stats.tx.queue_restarts++; |
|---|
| 260 | + u64_stats_update_end(&ring->stats.tx.syncp); |
|---|
| 128 | 261 | } |
|---|
| 129 | 262 | } |
|---|
| 130 | 263 | |
|---|
| .. | .. |
|---|
| 132 | 265 | { |
|---|
| 133 | 266 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); |
|---|
| 134 | 267 | |
|---|
| 135 | | - if (!__netif_subqueue_stopped(ndev, ring->idx)) |
|---|
| 136 | | - netif_stop_subqueue(ndev, ring->idx); |
|---|
| 268 | + if (!__netif_subqueue_stopped(ndev, |
|---|
| 269 | + AQ_NIC_RING2QMAP(ring->aq_nic, |
|---|
| 270 | + ring->idx))) |
|---|
| 271 | + netif_stop_subqueue(ndev, |
|---|
| 272 | + AQ_NIC_RING2QMAP(ring->aq_nic, ring->idx)); |
|---|
| 137 | 273 | } |
|---|
| 138 | 274 | |
|---|
| 139 | 275 | bool aq_ring_tx_clean(struct aq_ring_s *self) |
|---|
| .. | .. |
|---|
| 163 | 299 | } |
|---|
| 164 | 300 | |
|---|
| 165 | 301 | if (unlikely(buff->is_eop)) { |
|---|
| 166 | | - ++self->stats.rx.packets; |
|---|
| 302 | + u64_stats_update_begin(&self->stats.tx.syncp); |
|---|
| 303 | + ++self->stats.tx.packets; |
|---|
| 167 | 304 | self->stats.tx.bytes += buff->skb->len; |
|---|
| 305 | + u64_stats_update_end(&self->stats.tx.syncp); |
|---|
| 168 | 306 | |
|---|
| 169 | 307 | dev_kfree_skb_any(buff->skb); |
|---|
| 170 | 308 | } |
|---|
| .. | .. |
|---|
| 184 | 322 | return; |
|---|
| 185 | 323 | |
|---|
| 186 | 324 | if (unlikely(buff->is_cso_err)) { |
|---|
| 325 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 187 | 326 | ++self->stats.rx.errors; |
|---|
| 327 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 188 | 328 | skb->ip_summed = CHECKSUM_NONE; |
|---|
| 189 | 329 | return; |
|---|
| 190 | 330 | } |
|---|
| .. | .. |
|---|
| 206 | 346 | { |
|---|
| 207 | 347 | struct net_device *ndev = aq_nic_get_ndev(self->aq_nic); |
|---|
| 208 | 348 | int err = 0; |
|---|
| 209 | | - bool is_rsc_completed = true; |
|---|
| 210 | 349 | |
|---|
| 211 | 350 | for (; (self->sw_head != self->hw_head) && budget; |
|---|
| 212 | 351 | self->sw_head = aq_ring_next_dx(self, self->sw_head), |
|---|
| 213 | 352 | --budget, ++(*work_done)) { |
|---|
| 214 | 353 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
|---|
| 354 | + bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self); |
|---|
| 355 | + struct aq_ring_buff_s *buff_ = NULL; |
|---|
| 215 | 356 | struct sk_buff *skb = NULL; |
|---|
| 216 | 357 | unsigned int next_ = 0U; |
|---|
| 217 | 358 | unsigned int i = 0U; |
|---|
| 218 | | - struct aq_ring_buff_s *buff_ = NULL; |
|---|
| 219 | | - |
|---|
| 220 | | - if (buff->is_error) { |
|---|
| 221 | | - __free_pages(buff->page, 0); |
|---|
| 222 | | - continue; |
|---|
| 223 | | - } |
|---|
| 359 | + u16 hdr_len; |
|---|
| 224 | 360 | |
|---|
| 225 | 361 | if (buff->is_cleaned) |
|---|
| 226 | 362 | continue; |
|---|
| 227 | 363 | |
|---|
| 228 | 364 | if (!buff->is_eop) { |
|---|
| 229 | | - for (next_ = buff->next, |
|---|
| 230 | | - buff_ = &self->buff_ring[next_]; true; |
|---|
| 231 | | - next_ = buff_->next, |
|---|
| 232 | | - buff_ = &self->buff_ring[next_]) { |
|---|
| 365 | + unsigned int frag_cnt = 0U; |
|---|
| 366 | + buff_ = buff; |
|---|
| 367 | + do { |
|---|
| 368 | + bool is_rsc_completed = true; |
|---|
| 369 | + |
|---|
| 370 | + if (buff_->next >= self->size) { |
|---|
| 371 | + err = -EIO; |
|---|
| 372 | + goto err_exit; |
|---|
| 373 | + } |
|---|
| 374 | + |
|---|
| 375 | + frag_cnt++; |
|---|
| 376 | + next_ = buff_->next, |
|---|
| 377 | + buff_ = &self->buff_ring[next_]; |
|---|
| 233 | 378 | is_rsc_completed = |
|---|
| 234 | 379 | aq_ring_dx_in_range(self->sw_head, |
|---|
| 235 | 380 | next_, |
|---|
| 236 | 381 | self->hw_head); |
|---|
| 237 | 382 | |
|---|
| 238 | | - if (unlikely(!is_rsc_completed)) { |
|---|
| 239 | | - is_rsc_completed = false; |
|---|
| 240 | | - break; |
|---|
| 383 | + if (unlikely(!is_rsc_completed) || |
|---|
| 384 | + frag_cnt > MAX_SKB_FRAGS) { |
|---|
| 385 | + err = 0; |
|---|
| 386 | + goto err_exit; |
|---|
| 241 | 387 | } |
|---|
| 242 | 388 | |
|---|
| 243 | | - if (buff_->is_eop) |
|---|
| 244 | | - break; |
|---|
| 245 | | - } |
|---|
| 389 | + buff->is_error |= buff_->is_error; |
|---|
| 390 | + buff->is_cso_err |= buff_->is_cso_err; |
|---|
| 246 | 391 | |
|---|
| 247 | | - if (!is_rsc_completed) { |
|---|
| 248 | | - err = 0; |
|---|
| 249 | | - goto err_exit; |
|---|
| 392 | + } while (!buff_->is_eop); |
|---|
| 393 | + |
|---|
| 394 | + if (buff->is_error || |
|---|
| 395 | + (buff->is_lro && buff->is_cso_err)) { |
|---|
| 396 | + buff_ = buff; |
|---|
| 397 | + do { |
|---|
| 398 | + if (buff_->next >= self->size) { |
|---|
| 399 | + err = -EIO; |
|---|
| 400 | + goto err_exit; |
|---|
| 401 | + } |
|---|
| 402 | + next_ = buff_->next, |
|---|
| 403 | + buff_ = &self->buff_ring[next_]; |
|---|
| 404 | + |
|---|
| 405 | + buff_->is_cleaned = true; |
|---|
| 406 | + } while (!buff_->is_eop); |
|---|
| 407 | + |
|---|
| 408 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 409 | + ++self->stats.rx.errors; |
|---|
| 410 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 411 | + continue; |
|---|
| 250 | 412 | } |
|---|
| 251 | 413 | } |
|---|
| 252 | 414 | |
|---|
| 253 | | - /* for single fragment packets use build_skb() */ |
|---|
| 254 | | - if (buff->is_eop && |
|---|
| 255 | | - buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) { |
|---|
| 256 | | - skb = build_skb(page_address(buff->page), |
|---|
| 415 | + if (buff->is_error) { |
|---|
| 416 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 417 | + ++self->stats.rx.errors; |
|---|
| 418 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 419 | + continue; |
|---|
| 420 | + } |
|---|
| 421 | + |
|---|
| 422 | + dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), |
|---|
| 423 | + buff->rxdata.daddr, |
|---|
| 424 | + buff->rxdata.pg_off, |
|---|
| 425 | + buff->len, DMA_FROM_DEVICE); |
|---|
| 426 | + |
|---|
| 427 | + skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE); |
|---|
| 428 | + if (unlikely(!skb)) { |
|---|
| 429 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 430 | + self->stats.rx.skb_alloc_fails++; |
|---|
| 431 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 432 | + err = -ENOMEM; |
|---|
| 433 | + goto err_exit; |
|---|
| 434 | + } |
|---|
| 435 | + if (is_ptp_ring) |
|---|
| 436 | + buff->len -= |
|---|
| 437 | + aq_ptp_extract_ts(self->aq_nic, skb, |
|---|
| 438 | + aq_buf_vaddr(&buff->rxdata), |
|---|
| 439 | + buff->len); |
|---|
| 440 | + |
|---|
| 441 | + hdr_len = buff->len; |
|---|
| 442 | + if (hdr_len > AQ_CFG_RX_HDR_SIZE) |
|---|
| 443 | + hdr_len = eth_get_headlen(skb->dev, |
|---|
| 444 | + aq_buf_vaddr(&buff->rxdata), |
|---|
| 445 | + AQ_CFG_RX_HDR_SIZE); |
|---|
| 446 | + |
|---|
| 447 | + memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata), |
|---|
| 448 | + ALIGN(hdr_len, sizeof(long))); |
|---|
| 449 | + |
|---|
| 450 | + if (buff->len - hdr_len > 0) { |
|---|
| 451 | + skb_add_rx_frag(skb, i++, buff->rxdata.page, |
|---|
| 452 | + buff->rxdata.pg_off + hdr_len, |
|---|
| 453 | + buff->len - hdr_len, |
|---|
| 257 | 454 | AQ_CFG_RX_FRAME_MAX); |
|---|
| 258 | | - if (unlikely(!skb)) { |
|---|
| 259 | | - err = -ENOMEM; |
|---|
| 260 | | - goto err_exit; |
|---|
| 261 | | - } |
|---|
| 262 | | - |
|---|
| 263 | | - skb_put(skb, buff->len); |
|---|
| 264 | | - } else { |
|---|
| 265 | | - skb = netdev_alloc_skb(ndev, ETH_HLEN); |
|---|
| 266 | | - if (unlikely(!skb)) { |
|---|
| 267 | | - err = -ENOMEM; |
|---|
| 268 | | - goto err_exit; |
|---|
| 269 | | - } |
|---|
| 270 | | - skb_put(skb, ETH_HLEN); |
|---|
| 271 | | - memcpy(skb->data, page_address(buff->page), ETH_HLEN); |
|---|
| 272 | | - |
|---|
| 273 | | - skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN, |
|---|
| 274 | | - buff->len - ETH_HLEN, |
|---|
| 275 | | - SKB_TRUESIZE(buff->len - ETH_HLEN)); |
|---|
| 276 | | - |
|---|
| 277 | | - if (!buff->is_eop) { |
|---|
| 278 | | - for (i = 1U, next_ = buff->next, |
|---|
| 279 | | - buff_ = &self->buff_ring[next_]; |
|---|
| 280 | | - true; next_ = buff_->next, |
|---|
| 281 | | - buff_ = &self->buff_ring[next_], ++i) { |
|---|
| 282 | | - skb_add_rx_frag(skb, i, |
|---|
| 283 | | - buff_->page, 0, |
|---|
| 284 | | - buff_->len, |
|---|
| 285 | | - SKB_TRUESIZE(buff->len - |
|---|
| 286 | | - ETH_HLEN)); |
|---|
| 287 | | - buff_->is_cleaned = 1; |
|---|
| 288 | | - |
|---|
| 289 | | - if (buff_->is_eop) |
|---|
| 290 | | - break; |
|---|
| 291 | | - } |
|---|
| 292 | | - } |
|---|
| 455 | + page_ref_inc(buff->rxdata.page); |
|---|
| 293 | 456 | } |
|---|
| 457 | + |
|---|
| 458 | + if (!buff->is_eop) { |
|---|
| 459 | + buff_ = buff; |
|---|
| 460 | + do { |
|---|
| 461 | + next_ = buff_->next; |
|---|
| 462 | + buff_ = &self->buff_ring[next_]; |
|---|
| 463 | + |
|---|
| 464 | + dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic), |
|---|
| 465 | + buff_->rxdata.daddr, |
|---|
| 466 | + buff_->rxdata.pg_off, |
|---|
| 467 | + buff_->len, |
|---|
| 468 | + DMA_FROM_DEVICE); |
|---|
| 469 | + skb_add_rx_frag(skb, i++, |
|---|
| 470 | + buff_->rxdata.page, |
|---|
| 471 | + buff_->rxdata.pg_off, |
|---|
| 472 | + buff_->len, |
|---|
| 473 | + AQ_CFG_RX_FRAME_MAX); |
|---|
| 474 | + page_ref_inc(buff_->rxdata.page); |
|---|
| 475 | + buff_->is_cleaned = 1; |
|---|
| 476 | + |
|---|
| 477 | + buff->is_ip_cso &= buff_->is_ip_cso; |
|---|
| 478 | + buff->is_udp_cso &= buff_->is_udp_cso; |
|---|
| 479 | + buff->is_tcp_cso &= buff_->is_tcp_cso; |
|---|
| 480 | + buff->is_cso_err |= buff_->is_cso_err; |
|---|
| 481 | + |
|---|
| 482 | + } while (!buff_->is_eop); |
|---|
| 483 | + } |
|---|
| 484 | + |
|---|
| 485 | + if (buff->is_vlan) |
|---|
| 486 | + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), |
|---|
| 487 | + buff->vlan_rx_tag); |
|---|
| 294 | 488 | |
|---|
| 295 | 489 | skb->protocol = eth_type_trans(skb, ndev); |
|---|
| 296 | 490 | |
|---|
| .. | .. |
|---|
| 299 | 493 | skb_set_hash(skb, buff->rss_hash, |
|---|
| 300 | 494 | buff->is_hash_l4 ? PKT_HASH_TYPE_L4 : |
|---|
| 301 | 495 | PKT_HASH_TYPE_NONE); |
|---|
| 496 | + /* Send all PTP traffic to 0 queue */ |
|---|
| 497 | + skb_record_rx_queue(skb, |
|---|
| 498 | + is_ptp_ring ? 0 |
|---|
| 499 | + : AQ_NIC_RING2QMAP(self->aq_nic, |
|---|
| 500 | + self->idx)); |
|---|
| 302 | 501 | |
|---|
| 303 | | - skb_record_rx_queue(skb, self->idx); |
|---|
| 304 | | - |
|---|
| 502 | + u64_stats_update_begin(&self->stats.rx.syncp); |
|---|
| 305 | 503 | ++self->stats.rx.packets; |
|---|
| 306 | 504 | self->stats.rx.bytes += skb->len; |
|---|
| 505 | + u64_stats_update_end(&self->stats.rx.syncp); |
|---|
| 307 | 506 | |
|---|
| 308 | 507 | napi_gro_receive(napi, skb); |
|---|
| 309 | 508 | } |
|---|
| .. | .. |
|---|
| 312 | 511 | return err; |
|---|
| 313 | 512 | } |
|---|
| 314 | 513 | |
|---|
| 514 | +void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic) |
|---|
| 515 | +{ |
|---|
| 516 | +#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) |
|---|
| 517 | + while (self->sw_head != self->hw_head) { |
|---|
| 518 | + u64 ns; |
|---|
| 519 | + |
|---|
| 520 | + aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw, |
|---|
| 521 | + self->dx_ring + |
|---|
| 522 | + (self->sw_head * self->dx_size), |
|---|
| 523 | + self->dx_size, &ns); |
|---|
| 524 | + aq_ptp_tx_hwtstamp(aq_nic, ns); |
|---|
| 525 | + |
|---|
| 526 | + self->sw_head = aq_ring_next_dx(self, self->sw_head); |
|---|
| 527 | + } |
|---|
| 528 | +#endif |
|---|
| 529 | +} |
|---|
| 530 | + |
|---|
| 315 | 531 | int aq_ring_rx_fill(struct aq_ring_s *self) |
|---|
| 316 | 532 | { |
|---|
| 317 | | - unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE + |
|---|
| 318 | | - (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1; |
|---|
| 533 | + unsigned int page_order = self->page_order; |
|---|
| 319 | 534 | struct aq_ring_buff_s *buff = NULL; |
|---|
| 320 | 535 | int err = 0; |
|---|
| 321 | 536 | int i = 0; |
|---|
| 537 | + |
|---|
| 538 | + if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES, |
|---|
| 539 | + self->size / 2)) |
|---|
| 540 | + return err; |
|---|
| 322 | 541 | |
|---|
| 323 | 542 | for (i = aq_ring_avail_dx(self); i--; |
|---|
| 324 | 543 | self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) { |
|---|
| .. | .. |
|---|
| 327 | 546 | buff->flags = 0U; |
|---|
| 328 | 547 | buff->len = AQ_CFG_RX_FRAME_MAX; |
|---|
| 329 | 548 | |
|---|
| 330 | | - buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order); |
|---|
| 331 | | - if (!buff->page) { |
|---|
| 332 | | - err = -ENOMEM; |
|---|
| 549 | + err = aq_get_rxpages(self, buff, page_order); |
|---|
| 550 | + if (err) |
|---|
| 333 | 551 | goto err_exit; |
|---|
| 334 | | - } |
|---|
| 335 | 552 | |
|---|
| 336 | | - buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic), |
|---|
| 337 | | - buff->page, 0, |
|---|
| 338 | | - AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); |
|---|
| 339 | | - |
|---|
| 340 | | - if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) { |
|---|
| 341 | | - err = -ENOMEM; |
|---|
| 342 | | - goto err_exit; |
|---|
| 343 | | - } |
|---|
| 344 | | - |
|---|
| 553 | + buff->pa = aq_buf_daddr(&buff->rxdata); |
|---|
| 345 | 554 | buff = NULL; |
|---|
| 346 | 555 | } |
|---|
| 347 | 556 | |
|---|
| 348 | 557 | err_exit: |
|---|
| 349 | | - if (err < 0) { |
|---|
| 350 | | - if (buff && buff->page) |
|---|
| 351 | | - __free_pages(buff->page, 0); |
|---|
| 352 | | - } |
|---|
| 353 | | - |
|---|
| 354 | 558 | return err; |
|---|
| 355 | 559 | } |
|---|
| 356 | 560 | |
|---|
| 357 | 561 | void aq_ring_rx_deinit(struct aq_ring_s *self) |
|---|
| 358 | 562 | { |
|---|
| 359 | 563 | if (!self) |
|---|
| 360 | | - goto err_exit; |
|---|
| 564 | + return; |
|---|
| 361 | 565 | |
|---|
| 362 | 566 | for (; self->sw_head != self->sw_tail; |
|---|
| 363 | 567 | self->sw_head = aq_ring_next_dx(self, self->sw_head)) { |
|---|
| 364 | 568 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
|---|
| 365 | 569 | |
|---|
| 366 | | - dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa, |
|---|
| 367 | | - AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE); |
|---|
| 368 | | - |
|---|
| 369 | | - __free_pages(buff->page, 0); |
|---|
| 570 | + aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic)); |
|---|
| 370 | 571 | } |
|---|
| 371 | | - |
|---|
| 372 | | -err_exit:; |
|---|
| 373 | 572 | } |
|---|
| 374 | 573 | |
|---|
| 375 | 574 | void aq_ring_free(struct aq_ring_s *self) |
|---|
| 376 | 575 | { |
|---|
| 377 | 576 | if (!self) |
|---|
| 378 | | - goto err_exit; |
|---|
| 577 | + return; |
|---|
| 379 | 578 | |
|---|
| 380 | 579 | kfree(self->buff_ring); |
|---|
| 381 | 580 | |
|---|
| .. | .. |
|---|
| 383 | 582 | dma_free_coherent(aq_nic_get_dev(self->aq_nic), |
|---|
| 384 | 583 | self->size * self->dx_size, self->dx_ring, |
|---|
| 385 | 584 | self->dx_ring_pa); |
|---|
| 585 | +} |
|---|
| 386 | 586 | |
|---|
| 387 | | -err_exit:; |
|---|
| 587 | +unsigned int aq_ring_fill_stats_data(struct aq_ring_s *self, u64 *data) |
|---|
| 588 | +{ |
|---|
| 589 | + unsigned int count; |
|---|
| 590 | + unsigned int start; |
|---|
| 591 | + |
|---|
| 592 | + if (self->ring_type == ATL_RING_RX) { |
|---|
| 593 | + /* This data should mimic aq_ethtool_queue_rx_stat_names structure */ |
|---|
| 594 | + do { |
|---|
| 595 | + count = 0; |
|---|
| 596 | + start = u64_stats_fetch_begin_irq(&self->stats.rx.syncp); |
|---|
| 597 | + data[count] = self->stats.rx.packets; |
|---|
| 598 | + data[++count] = self->stats.rx.jumbo_packets; |
|---|
| 599 | + data[++count] = self->stats.rx.lro_packets; |
|---|
| 600 | + data[++count] = self->stats.rx.errors; |
|---|
| 601 | + data[++count] = self->stats.rx.alloc_fails; |
|---|
| 602 | + data[++count] = self->stats.rx.skb_alloc_fails; |
|---|
| 603 | + data[++count] = self->stats.rx.polls; |
|---|
| 604 | + } while (u64_stats_fetch_retry_irq(&self->stats.rx.syncp, start)); |
|---|
| 605 | + } else { |
|---|
| 606 | + /* This data should mimic aq_ethtool_queue_tx_stat_names structure */ |
|---|
| 607 | + do { |
|---|
| 608 | + count = 0; |
|---|
| 609 | + start = u64_stats_fetch_begin_irq(&self->stats.tx.syncp); |
|---|
| 610 | + data[count] = self->stats.tx.packets; |
|---|
| 611 | + data[++count] = self->stats.tx.queue_restarts; |
|---|
| 612 | + } while (u64_stats_fetch_retry_irq(&self->stats.tx.syncp, start)); |
|---|
| 613 | + } |
|---|
| 614 | + |
|---|
| 615 | + return ++count; |
|---|
| 388 | 616 | } |
|---|