| .. | .. |
|---|
| 1 | 1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. |
|---|
| 2 | + * Copyright 2020 NXP |
|---|
| 2 | 3 | * |
|---|
| 3 | 4 | * Redistribution and use in source and binary forms, with or without |
|---|
| 4 | 5 | * modification, are permitted provided that the following conditions are met: |
|---|
| .. | .. |
|---|
| 51 | 52 | #include <linux/percpu.h> |
|---|
| 52 | 53 | #include <linux/dma-mapping.h> |
|---|
| 53 | 54 | #include <linux/sort.h> |
|---|
| 55 | +#include <linux/phy_fixed.h> |
|---|
| 54 | 56 | #include <soc/fsl/bman.h> |
|---|
| 55 | 57 | #include <soc/fsl/qman.h> |
|---|
| 56 | | - |
|---|
| 57 | 58 | #include "fman.h" |
|---|
| 58 | 59 | #include "fman_port.h" |
|---|
| 59 | 60 | #include "mac.h" |
|---|
| .. | .. |
|---|
| 86 | 87 | |
|---|
| 87 | 88 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ |
|---|
| 88 | 89 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ |
|---|
| 89 | | - NETIF_MSG_IFDOWN) |
|---|
| 90 | + NETIF_MSG_IFDOWN | NETIF_MSG_HW) |
|---|
| 90 | 91 | |
|---|
| 91 | 92 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 |
|---|
| 92 | 93 | /* Ingress congestion threshold on FMan ports |
|---|
| .. | .. |
|---|
| 123 | 124 | #define FSL_QMAN_MAX_OAL 127 |
|---|
| 124 | 125 | |
|---|
| 125 | 126 | /* Default alignment for start of data in an Rx FD */ |
|---|
| 127 | +#ifdef CONFIG_DPAA_ERRATUM_A050385 |
|---|
| 128 | +/* aligning data start to 64 avoids DMA transaction splits, unless the buffer |
|---|
| 129 | + * is crossing a 4k page boundary |
|---|
| 130 | + */ |
|---|
| 131 | +#define DPAA_FD_DATA_ALIGNMENT (fman_has_errata_a050385() ? 64 : 16) |
|---|
| 132 | +/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary |
|---|
| 133 | + * crossings; also, all SG fragments except the last must have a size multiple |
|---|
| 134 | + * of 256 to avoid DMA transaction splits |
|---|
| 135 | + */ |
|---|
| 136 | +#define DPAA_A050385_ALIGN 256 |
|---|
| 137 | +#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \ |
|---|
| 138 | + DPAA_A050385_ALIGN : 16) |
|---|
| 139 | +#else |
|---|
| 126 | 140 | #define DPAA_FD_DATA_ALIGNMENT 16 |
|---|
| 141 | +#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT |
|---|
| 142 | +#endif |
|---|
| 127 | 143 | |
|---|
| 128 | 144 | /* The DPAA requires 256 bytes reserved and mapped for the SGT */ |
|---|
| 129 | 145 | #define DPAA_SGT_SIZE 256 |
|---|
| .. | .. |
|---|
| 158 | 174 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) |
|---|
| 159 | 175 | #define DPAA_TIME_STAMP_SIZE 8 |
|---|
| 160 | 176 | #define DPAA_HASH_RESULTS_SIZE 8 |
|---|
| 161 | | -#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ |
|---|
| 177 | +#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \ |
|---|
| 178 | + + DPAA_HASH_RESULTS_SIZE) |
|---|
| 179 | +#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \ |
|---|
| 162 | 180 | dpaa_rx_extra_headroom) |
|---|
| 181 | +#ifdef CONFIG_DPAA_ERRATUM_A050385 |
|---|
| 182 | +#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE) |
|---|
| 183 | +#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \ |
|---|
| 184 | + DPAA_RX_PRIV_DATA_A050385_SIZE : \ |
|---|
| 185 | + DPAA_RX_PRIV_DATA_DEFAULT_SIZE) |
|---|
| 186 | +#else |
|---|
| 187 | +#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE |
|---|
| 188 | +#endif |
|---|
| 163 | 189 | |
|---|
| 164 | 190 | #define DPAA_ETH_PCD_RXQ_NUM 128 |
|---|
| 165 | 191 | |
|---|
| .. | .. |
|---|
| 178 | 204 | /* All the dpa bps in use at any moment */ |
|---|
| 179 | 205 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; |
|---|
| 180 | 206 | |
|---|
| 181 | | -/* The raw buffer size must be cacheline aligned */ |
|---|
| 182 | 207 | #define DPAA_BP_RAW_SIZE 4096 |
|---|
| 183 | | -/* When using more than one buffer pool, the raw sizes are as follows: |
|---|
| 184 | | - * 1 bp: 4KB |
|---|
| 185 | | - * 2 bp: 2KB, 4KB |
|---|
| 186 | | - * 3 bp: 1KB, 2KB, 4KB |
|---|
| 187 | | - * 4 bp: 1KB, 2KB, 4KB, 8KB |
|---|
| 188 | | - */ |
|---|
| 189 | | -static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) |
|---|
| 190 | | -{ |
|---|
| 191 | | - size_t res = DPAA_BP_RAW_SIZE / 4; |
|---|
| 192 | | - u8 i; |
|---|
| 193 | 208 | |
|---|
| 194 | | - for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) |
|---|
| 195 | | - res *= 2; |
|---|
| 196 | | - return res; |
|---|
| 197 | | -} |
|---|
| 198 | | - |
|---|
| 199 | | -/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is |
|---|
| 200 | | - * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, |
|---|
| 201 | | - * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us |
|---|
| 202 | | - * half-page-aligned buffers, so we reserve some more space for start-of-buffer |
|---|
| 203 | | - * alignment. |
|---|
| 204 | | - */ |
|---|
| 205 | | -#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) |
|---|
| 209 | +#ifdef CONFIG_DPAA_ERRATUM_A050385 |
|---|
| 210 | +#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \ |
|---|
| 211 | + ~(DPAA_A050385_ALIGN - 1)) |
|---|
| 212 | +#else |
|---|
| 213 | +#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size) |
|---|
| 214 | +#endif |
|---|
| 206 | 215 | |
|---|
| 207 | 216 | static int dpaa_max_frm; |
|---|
| 208 | 217 | |
|---|
| .. | .. |
|---|
| 255 | 264 | net_dev->features |= net_dev->hw_features; |
|---|
| 256 | 265 | net_dev->vlan_features = net_dev->features; |
|---|
| 257 | 266 | |
|---|
| 258 | | - memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); |
|---|
| 259 | | - memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); |
|---|
| 267 | + if (is_valid_ether_addr(mac_addr)) { |
|---|
| 268 | + memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); |
|---|
| 269 | + memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); |
|---|
| 270 | + } else { |
|---|
| 271 | + eth_hw_addr_random(net_dev); |
|---|
| 272 | + err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac, |
|---|
| 273 | + (enet_addr_t *)net_dev->dev_addr); |
|---|
| 274 | + if (err) { |
|---|
| 275 | + dev_err(dev, "Failed to set random MAC address\n"); |
|---|
| 276 | + return -EINVAL; |
|---|
| 277 | + } |
|---|
| 278 | + dev_info(dev, "Using random MAC address: %pM\n", |
|---|
| 279 | + net_dev->dev_addr); |
|---|
| 280 | + } |
|---|
| 260 | 281 | |
|---|
| 261 | 282 | net_dev->ethtool_ops = &dpaa_ethtool_ops; |
|---|
| 262 | 283 | |
|---|
| .. | .. |
|---|
| 288 | 309 | /* Allow the Fman (Tx) port to process in-flight frames before we |
|---|
| 289 | 310 | * try switching it off. |
|---|
| 290 | 311 | */ |
|---|
| 291 | | - usleep_range(5000, 10000); |
|---|
| 312 | + msleep(200); |
|---|
| 292 | 313 | |
|---|
| 293 | 314 | err = mac_dev->stop(mac_dev); |
|---|
| 294 | 315 | if (err < 0) |
|---|
| .. | .. |
|---|
| 305 | 326 | phy_disconnect(net_dev->phydev); |
|---|
| 306 | 327 | net_dev->phydev = NULL; |
|---|
| 307 | 328 | |
|---|
| 329 | + msleep(200); |
|---|
| 330 | + |
|---|
| 308 | 331 | return err; |
|---|
| 309 | 332 | } |
|---|
| 310 | 333 | |
|---|
| 311 | | -static void dpaa_tx_timeout(struct net_device *net_dev) |
|---|
| 334 | +static void dpaa_tx_timeout(struct net_device *net_dev, unsigned int txqueue) |
|---|
| 312 | 335 | { |
|---|
| 313 | 336 | struct dpaa_percpu_priv *percpu_priv; |
|---|
| 314 | 337 | const struct dpaa_priv *priv; |
|---|
| .. | .. |
|---|
| 485 | 508 | static bool dpaa_bpid2pool_use(int bpid) |
|---|
| 486 | 509 | { |
|---|
| 487 | 510 | if (dpaa_bpid2pool(bpid)) { |
|---|
| 488 | | - atomic_inc(&dpaa_bp_array[bpid]->refs); |
|---|
| 511 | + refcount_inc(&dpaa_bp_array[bpid]->refs); |
|---|
| 489 | 512 | return true; |
|---|
| 490 | 513 | } |
|---|
| 491 | 514 | |
|---|
| .. | .. |
|---|
| 496 | 519 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) |
|---|
| 497 | 520 | { |
|---|
| 498 | 521 | dpaa_bp_array[bpid] = dpaa_bp; |
|---|
| 499 | | - atomic_set(&dpaa_bp->refs, 1); |
|---|
| 522 | + refcount_set(&dpaa_bp->refs, 1); |
|---|
| 500 | 523 | } |
|---|
| 501 | 524 | |
|---|
| 502 | 525 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) |
|---|
| .. | .. |
|---|
| 584 | 607 | if (!bp) |
|---|
| 585 | 608 | return; |
|---|
| 586 | 609 | |
|---|
| 587 | | - if (!atomic_dec_and_test(&bp->refs)) |
|---|
| 610 | + if (!refcount_dec_and_test(&bp->refs)) |
|---|
| 588 | 611 | return; |
|---|
| 589 | 612 | |
|---|
| 590 | 613 | if (bp->free_buf_cb) |
|---|
| .. | .. |
|---|
| 596 | 619 | |
|---|
| 597 | 620 | static void dpaa_bps_free(struct dpaa_priv *priv) |
|---|
| 598 | 621 | { |
|---|
| 599 | | - int i; |
|---|
| 600 | | - |
|---|
| 601 | | - for (i = 0; i < DPAA_BPS_NUM; i++) |
|---|
| 602 | | - dpaa_bp_free(priv->dpaa_bps[i]); |
|---|
| 622 | + dpaa_bp_free(priv->dpaa_bp); |
|---|
| 603 | 623 | } |
|---|
| 604 | 624 | |
|---|
| 605 | 625 | /* Use multiple WQs for FQ assignment: |
|---|
| .. | .. |
|---|
| 773 | 793 | qman_release_pool(rx_pool_channel); |
|---|
| 774 | 794 | } |
|---|
| 775 | 795 | |
|---|
| 776 | | -static void dpaa_eth_add_channel(u16 channel) |
|---|
| 796 | +static void dpaa_eth_add_channel(u16 channel, struct device *dev) |
|---|
| 777 | 797 | { |
|---|
| 778 | 798 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); |
|---|
| 779 | 799 | const cpumask_t *cpus = qman_affine_cpus(); |
|---|
| 780 | 800 | struct qman_portal *portal; |
|---|
| 781 | 801 | int cpu; |
|---|
| 782 | 802 | |
|---|
| 783 | | - for_each_cpu(cpu, cpus) { |
|---|
| 803 | + for_each_cpu_and(cpu, cpus, cpu_online_mask) { |
|---|
| 784 | 804 | portal = qman_get_affine_portal(cpu); |
|---|
| 785 | 805 | qman_p_static_dequeue_add(portal, pool); |
|---|
| 806 | + qman_start_using_portal(portal, dev); |
|---|
| 786 | 807 | } |
|---|
| 787 | 808 | } |
|---|
| 788 | 809 | |
|---|
| .. | .. |
|---|
| 896 | 917 | u16 channels[NR_CPUS]; |
|---|
| 897 | 918 | struct dpaa_fq *fq; |
|---|
| 898 | 919 | |
|---|
| 899 | | - for_each_cpu(cpu, affine_cpus) |
|---|
| 920 | + for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) |
|---|
| 900 | 921 | channels[num_portals++] = qman_affine_channel(cpu); |
|---|
| 901 | 922 | |
|---|
| 902 | 923 | if (num_portals == 0) |
|---|
| 903 | 924 | dev_err(priv->net_dev->dev.parent, |
|---|
| 904 | | - "No Qman software (affine) channels found"); |
|---|
| 925 | + "No Qman software (affine) channels found\n"); |
|---|
| 905 | 926 | |
|---|
| 906 | 927 | /* Initialize each FQ in the list */ |
|---|
| 907 | 928 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { |
|---|
| .. | .. |
|---|
| 929 | 950 | break; |
|---|
| 930 | 951 | case FQ_TYPE_TX_CONF_MQ: |
|---|
| 931 | 952 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; |
|---|
| 932 | | - /* fall through */ |
|---|
| 953 | + fallthrough; |
|---|
| 933 | 954 | case FQ_TYPE_TX_CONFIRM: |
|---|
| 934 | 955 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); |
|---|
| 935 | 956 | break; |
|---|
| .. | .. |
|---|
| 1197 | 1218 | return err; |
|---|
| 1198 | 1219 | } |
|---|
| 1199 | 1220 | |
|---|
| 1200 | | -static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, |
|---|
| 1201 | | - size_t count, struct dpaa_fq *errq, |
|---|
| 1221 | +static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp, |
|---|
| 1222 | + struct dpaa_fq *errq, |
|---|
| 1202 | 1223 | struct dpaa_fq *defq, struct dpaa_fq *pcdq, |
|---|
| 1203 | 1224 | struct dpaa_buffer_layout *buf_layout) |
|---|
| 1204 | 1225 | { |
|---|
| 1205 | 1226 | struct fman_buffer_prefix_content buf_prefix_content; |
|---|
| 1206 | 1227 | struct fman_port_rx_params *rx_p; |
|---|
| 1207 | 1228 | struct fman_port_params params; |
|---|
| 1208 | | - int i, err; |
|---|
| 1229 | + int err; |
|---|
| 1209 | 1230 | |
|---|
| 1210 | 1231 | memset(¶ms, 0, sizeof(params)); |
|---|
| 1211 | 1232 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); |
|---|
| .. | .. |
|---|
| 1214 | 1235 | buf_prefix_content.pass_prs_result = true; |
|---|
| 1215 | 1236 | buf_prefix_content.pass_hash_result = true; |
|---|
| 1216 | 1237 | buf_prefix_content.pass_time_stamp = true; |
|---|
| 1217 | | - buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; |
|---|
| 1238 | + buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT; |
|---|
| 1218 | 1239 | |
|---|
| 1219 | 1240 | rx_p = ¶ms.specific_params.rx_params; |
|---|
| 1220 | 1241 | rx_p->err_fqid = errq->fqid; |
|---|
| .. | .. |
|---|
| 1224 | 1245 | rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; |
|---|
| 1225 | 1246 | } |
|---|
| 1226 | 1247 | |
|---|
| 1227 | | - count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); |
|---|
| 1228 | | - rx_p->ext_buf_pools.num_of_pools_used = (u8)count; |
|---|
| 1229 | | - for (i = 0; i < count; i++) { |
|---|
| 1230 | | - rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; |
|---|
| 1231 | | - rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; |
|---|
| 1232 | | - } |
|---|
| 1248 | + rx_p->ext_buf_pools.num_of_pools_used = 1; |
|---|
| 1249 | + rx_p->ext_buf_pools.ext_buf_pool[0].id = bp->bpid; |
|---|
| 1250 | + rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size; |
|---|
| 1233 | 1251 | |
|---|
| 1234 | 1252 | err = fman_port_config(port, ¶ms); |
|---|
| 1235 | 1253 | if (err) { |
|---|
| .. | .. |
|---|
| 1252 | 1270 | } |
|---|
| 1253 | 1271 | |
|---|
| 1254 | 1272 | static int dpaa_eth_init_ports(struct mac_device *mac_dev, |
|---|
| 1255 | | - struct dpaa_bp **bps, size_t count, |
|---|
| 1273 | + struct dpaa_bp *bp, |
|---|
| 1256 | 1274 | struct fm_port_fqs *port_fqs, |
|---|
| 1257 | 1275 | struct dpaa_buffer_layout *buf_layout, |
|---|
| 1258 | 1276 | struct device *dev) |
|---|
| .. | .. |
|---|
| 1266 | 1284 | if (err) |
|---|
| 1267 | 1285 | return err; |
|---|
| 1268 | 1286 | |
|---|
| 1269 | | - err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, |
|---|
| 1287 | + err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq, |
|---|
| 1270 | 1288 | port_fqs->rx_defq, port_fqs->rx_pcdq, |
|---|
| 1271 | 1289 | &buf_layout[RX]); |
|---|
| 1272 | 1290 | |
|---|
| .. | .. |
|---|
| 1280 | 1298 | |
|---|
| 1281 | 1299 | err = bman_release(dpaa_bp->pool, bmb, cnt); |
|---|
| 1282 | 1300 | /* Should never occur, address anyway to avoid leaking the buffers */ |
|---|
| 1283 | | - if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) |
|---|
| 1301 | + if (WARN_ON(err) && dpaa_bp->free_buf_cb) |
|---|
| 1284 | 1302 | while (cnt-- > 0) |
|---|
| 1285 | 1303 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); |
|---|
| 1286 | 1304 | |
|---|
| .. | .. |
|---|
| 1335 | 1353 | vaddr = phys_to_virt(qm_fd_addr(fd)); |
|---|
| 1336 | 1354 | sgt = vaddr + qm_fd_get_offset(fd); |
|---|
| 1337 | 1355 | |
|---|
| 1338 | | - dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, |
|---|
| 1339 | | - DMA_FROM_DEVICE); |
|---|
| 1356 | + dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd), |
|---|
| 1357 | + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); |
|---|
| 1340 | 1358 | |
|---|
| 1341 | 1359 | dpaa_release_sgt_members(sgt); |
|---|
| 1342 | 1360 | |
|---|
| 1343 | | - addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, |
|---|
| 1344 | | - DMA_FROM_DEVICE); |
|---|
| 1345 | | - if (dma_mapping_error(dpaa_bp->dev, addr)) { |
|---|
| 1346 | | - dev_err(dpaa_bp->dev, "DMA mapping failed"); |
|---|
| 1361 | + addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, |
|---|
| 1362 | + virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE, |
|---|
| 1363 | + DMA_FROM_DEVICE); |
|---|
| 1364 | + if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) { |
|---|
| 1365 | + netdev_err(net_dev, "DMA mapping failed\n"); |
|---|
| 1347 | 1366 | return; |
|---|
| 1348 | 1367 | } |
|---|
| 1349 | 1368 | bm_buffer_set64(&bmb, addr); |
|---|
| .. | .. |
|---|
| 1396 | 1415 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, |
|---|
| 1397 | 1416 | struct sk_buff *skb, |
|---|
| 1398 | 1417 | struct qm_fd *fd, |
|---|
| 1399 | | - char *parse_results) |
|---|
| 1418 | + void *parse_results) |
|---|
| 1400 | 1419 | { |
|---|
| 1401 | 1420 | struct fman_prs_result *parse_result; |
|---|
| 1402 | 1421 | u16 ethertype = ntohs(skb->protocol); |
|---|
| .. | .. |
|---|
| 1488 | 1507 | |
|---|
| 1489 | 1508 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) |
|---|
| 1490 | 1509 | { |
|---|
| 1491 | | - struct device *dev = dpaa_bp->dev; |
|---|
| 1510 | + struct net_device *net_dev = dpaa_bp->priv->net_dev; |
|---|
| 1492 | 1511 | struct bm_buffer bmb[8]; |
|---|
| 1493 | 1512 | dma_addr_t addr; |
|---|
| 1494 | | - void *new_buf; |
|---|
| 1513 | + struct page *p; |
|---|
| 1495 | 1514 | u8 i; |
|---|
| 1496 | 1515 | |
|---|
| 1497 | 1516 | for (i = 0; i < 8; i++) { |
|---|
| 1498 | | - new_buf = netdev_alloc_frag(dpaa_bp->raw_size); |
|---|
| 1499 | | - if (unlikely(!new_buf)) { |
|---|
| 1500 | | - dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", |
|---|
| 1501 | | - dpaa_bp->raw_size); |
|---|
| 1517 | + p = dev_alloc_pages(0); |
|---|
| 1518 | + if (unlikely(!p)) { |
|---|
| 1519 | + netdev_err(net_dev, "dev_alloc_pages() failed\n"); |
|---|
| 1502 | 1520 | goto release_previous_buffs; |
|---|
| 1503 | 1521 | } |
|---|
| 1504 | | - new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); |
|---|
| 1505 | 1522 | |
|---|
| 1506 | | - addr = dma_map_single(dev, new_buf, |
|---|
| 1507 | | - dpaa_bp->size, DMA_FROM_DEVICE); |
|---|
| 1508 | | - if (unlikely(dma_mapping_error(dev, addr))) { |
|---|
| 1509 | | - dev_err(dpaa_bp->dev, "DMA map failed"); |
|---|
| 1523 | + addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0, |
|---|
| 1524 | + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); |
|---|
| 1525 | + if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev, |
|---|
| 1526 | + addr))) { |
|---|
| 1527 | + netdev_err(net_dev, "DMA map failed\n"); |
|---|
| 1510 | 1528 | goto release_previous_buffs; |
|---|
| 1511 | 1529 | } |
|---|
| 1512 | 1530 | |
|---|
| .. | .. |
|---|
| 1581 | 1599 | { |
|---|
| 1582 | 1600 | struct dpaa_bp *dpaa_bp; |
|---|
| 1583 | 1601 | int *countptr; |
|---|
| 1584 | | - int res, i; |
|---|
| 1602 | + int res; |
|---|
| 1585 | 1603 | |
|---|
| 1586 | | - for (i = 0; i < DPAA_BPS_NUM; i++) { |
|---|
| 1587 | | - dpaa_bp = priv->dpaa_bps[i]; |
|---|
| 1588 | | - if (!dpaa_bp) |
|---|
| 1589 | | - return -EINVAL; |
|---|
| 1590 | | - countptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1591 | | - res = dpaa_eth_refill_bpool(dpaa_bp, countptr); |
|---|
| 1592 | | - if (res) |
|---|
| 1593 | | - return res; |
|---|
| 1594 | | - } |
|---|
| 1604 | + dpaa_bp = priv->dpaa_bp; |
|---|
| 1605 | + if (!dpaa_bp) |
|---|
| 1606 | + return -EINVAL; |
|---|
| 1607 | + countptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1608 | + res = dpaa_eth_refill_bpool(dpaa_bp, countptr); |
|---|
| 1609 | + if (res) |
|---|
| 1610 | + return res; |
|---|
| 1611 | + |
|---|
| 1595 | 1612 | return 0; |
|---|
| 1596 | 1613 | } |
|---|
| 1597 | 1614 | |
|---|
| .. | .. |
|---|
| 1614 | 1631 | struct device *dev = priv->net_dev->dev.parent; |
|---|
| 1615 | 1632 | struct skb_shared_hwtstamps shhwtstamps; |
|---|
| 1616 | 1633 | dma_addr_t addr = qm_fd_addr(fd); |
|---|
| 1634 | + void *vaddr = phys_to_virt(addr); |
|---|
| 1617 | 1635 | const struct qm_sg_entry *sgt; |
|---|
| 1618 | | - struct sk_buff **skbh, *skb; |
|---|
| 1619 | | - int nr_frags, i; |
|---|
| 1636 | + struct sk_buff *skb; |
|---|
| 1620 | 1637 | u64 ns; |
|---|
| 1621 | | - |
|---|
| 1622 | | - skbh = (struct sk_buff **)phys_to_virt(addr); |
|---|
| 1623 | | - skb = *skbh; |
|---|
| 1638 | + int i; |
|---|
| 1624 | 1639 | |
|---|
| 1625 | 1640 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { |
|---|
| 1626 | | - nr_frags = skb_shinfo(skb)->nr_frags; |
|---|
| 1627 | | - dma_unmap_single(dev, addr, |
|---|
| 1628 | | - qm_fd_get_offset(fd) + DPAA_SGT_SIZE, |
|---|
| 1629 | | - dma_dir); |
|---|
| 1641 | + dma_unmap_page(priv->tx_dma_dev, addr, |
|---|
| 1642 | + qm_fd_get_offset(fd) + DPAA_SGT_SIZE, |
|---|
| 1643 | + dma_dir); |
|---|
| 1630 | 1644 | |
|---|
| 1631 | 1645 | /* The sgt buffer has been allocated with netdev_alloc_frag(), |
|---|
| 1632 | 1646 | * it's from lowmem. |
|---|
| 1633 | 1647 | */ |
|---|
| 1634 | | - sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); |
|---|
| 1648 | + sgt = vaddr + qm_fd_get_offset(fd); |
|---|
| 1635 | 1649 | |
|---|
| 1636 | 1650 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ |
|---|
| 1637 | | - dma_unmap_single(dev, qm_sg_addr(&sgt[0]), |
|---|
| 1651 | + dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]), |
|---|
| 1638 | 1652 | qm_sg_entry_get_len(&sgt[0]), dma_dir); |
|---|
| 1639 | 1653 | |
|---|
| 1640 | 1654 | /* remaining pages were mapped with skb_frag_dma_map() */ |
|---|
| 1641 | | - for (i = 1; i <= nr_frags; i++) { |
|---|
| 1655 | + for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) && |
|---|
| 1656 | + !qm_sg_entry_is_final(&sgt[i - 1]); i++) { |
|---|
| 1642 | 1657 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); |
|---|
| 1643 | 1658 | |
|---|
| 1644 | | - dma_unmap_page(dev, qm_sg_addr(&sgt[i]), |
|---|
| 1659 | + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]), |
|---|
| 1645 | 1660 | qm_sg_entry_get_len(&sgt[i]), dma_dir); |
|---|
| 1646 | 1661 | } |
|---|
| 1647 | 1662 | } else { |
|---|
| 1648 | | - dma_unmap_single(dev, addr, |
|---|
| 1649 | | - skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); |
|---|
| 1663 | + dma_unmap_single(priv->tx_dma_dev, addr, |
|---|
| 1664 | + priv->tx_headroom + qm_fd_get_length(fd), |
|---|
| 1665 | + dma_dir); |
|---|
| 1650 | 1666 | } |
|---|
| 1667 | + |
|---|
| 1668 | + skb = *(struct sk_buff **)vaddr; |
|---|
| 1651 | 1669 | |
|---|
| 1652 | 1670 | /* DMA unmapping is required before accessing the HW provided info */ |
|---|
| 1653 | 1671 | if (ts && priv->tx_tstamp && |
|---|
| 1654 | 1672 | skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { |
|---|
| 1655 | 1673 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
|---|
| 1656 | 1674 | |
|---|
| 1657 | | - if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh, |
|---|
| 1675 | + if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr, |
|---|
| 1658 | 1676 | &ns)) { |
|---|
| 1659 | 1677 | shhwtstamps.hwtstamp = ns_to_ktime(ns); |
|---|
| 1660 | 1678 | skb_tstamp_tx(skb, &shhwtstamps); |
|---|
| .. | .. |
|---|
| 1664 | 1682 | } |
|---|
| 1665 | 1683 | |
|---|
| 1666 | 1684 | if (qm_fd_get_format(fd) == qm_fd_sg) |
|---|
| 1667 | | - /* Free the page frag that we allocated on Tx */ |
|---|
| 1668 | | - skb_free_frag(phys_to_virt(addr)); |
|---|
| 1685 | + /* Free the page that we allocated on Tx for the SGT */ |
|---|
| 1686 | + free_pages((unsigned long)vaddr, 0); |
|---|
| 1669 | 1687 | |
|---|
| 1670 | 1688 | return skb; |
|---|
| 1671 | 1689 | } |
|---|
| .. | .. |
|---|
| 1686 | 1704 | */ |
|---|
| 1687 | 1705 | return CHECKSUM_NONE; |
|---|
| 1688 | 1706 | } |
|---|
| 1707 | + |
|---|
| 1708 | +#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a))) |
|---|
| 1689 | 1709 | |
|---|
| 1690 | 1710 | /* Build a linear skb around the received buffer. |
|---|
| 1691 | 1711 | * We are guaranteed there is enough room at the end of the data buffer to |
|---|
| .. | .. |
|---|
| 1709 | 1729 | |
|---|
| 1710 | 1730 | skb = build_skb(vaddr, dpaa_bp->size + |
|---|
| 1711 | 1731 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
|---|
| 1712 | | - if (unlikely(!skb)) { |
|---|
| 1713 | | - WARN_ONCE(1, "Build skb failure on Rx\n"); |
|---|
| 1732 | + if (WARN_ONCE(!skb, "Build skb failure on Rx\n")) |
|---|
| 1714 | 1733 | goto free_buffer; |
|---|
| 1715 | | - } |
|---|
| 1716 | 1734 | WARN_ON(fd_off != priv->rx_headroom); |
|---|
| 1717 | 1735 | skb_reserve(skb, fd_off); |
|---|
| 1718 | 1736 | skb_put(skb, qm_fd_get_length(fd)); |
|---|
| .. | .. |
|---|
| 1722 | 1740 | return skb; |
|---|
| 1723 | 1741 | |
|---|
| 1724 | 1742 | free_buffer: |
|---|
| 1725 | | - skb_free_frag(vaddr); |
|---|
| 1743 | + free_pages((unsigned long)vaddr, 0); |
|---|
| 1726 | 1744 | return NULL; |
|---|
| 1727 | 1745 | } |
|---|
| 1728 | 1746 | |
|---|
| .. | .. |
|---|
| 1746 | 1764 | int page_offset; |
|---|
| 1747 | 1765 | unsigned int sz; |
|---|
| 1748 | 1766 | int *count_ptr; |
|---|
| 1749 | | - int i; |
|---|
| 1767 | + int i, j; |
|---|
| 1750 | 1768 | |
|---|
| 1751 | 1769 | vaddr = phys_to_virt(addr); |
|---|
| 1752 | 1770 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); |
|---|
| .. | .. |
|---|
| 1760 | 1778 | |
|---|
| 1761 | 1779 | sg_addr = qm_sg_addr(&sgt[i]); |
|---|
| 1762 | 1780 | sg_vaddr = phys_to_virt(sg_addr); |
|---|
| 1763 | | - WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, |
|---|
| 1764 | | - SMP_CACHE_BYTES)); |
|---|
| 1781 | + WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES)); |
|---|
| 1782 | + |
|---|
| 1783 | + dma_unmap_page(priv->rx_dma_dev, sg_addr, |
|---|
| 1784 | + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); |
|---|
| 1765 | 1785 | |
|---|
| 1766 | 1786 | /* We may use multiple Rx pools */ |
|---|
| 1767 | 1787 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
|---|
| 1768 | 1788 | if (!dpaa_bp) |
|---|
| 1769 | 1789 | goto free_buffers; |
|---|
| 1770 | 1790 | |
|---|
| 1771 | | - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1772 | | - dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, |
|---|
| 1773 | | - DMA_FROM_DEVICE); |
|---|
| 1774 | 1791 | if (!skb) { |
|---|
| 1775 | 1792 | sz = dpaa_bp->size + |
|---|
| 1776 | 1793 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
|---|
| 1777 | 1794 | skb = build_skb(sg_vaddr, sz); |
|---|
| 1778 | | - if (WARN_ON(unlikely(!skb))) |
|---|
| 1795 | + if (WARN_ON(!skb)) |
|---|
| 1779 | 1796 | goto free_buffers; |
|---|
| 1780 | 1797 | |
|---|
| 1781 | 1798 | skb->ip_summed = rx_csum_offload(priv, fd); |
|---|
| .. | .. |
|---|
| 1813 | 1830 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, |
|---|
| 1814 | 1831 | frag_len, dpaa_bp->size); |
|---|
| 1815 | 1832 | } |
|---|
| 1833 | + |
|---|
| 1816 | 1834 | /* Update the pool count for the current {cpu x bpool} */ |
|---|
| 1835 | + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1817 | 1836 | (*count_ptr)--; |
|---|
| 1818 | 1837 | |
|---|
| 1819 | 1838 | if (qm_sg_entry_is_final(&sgt[i])) |
|---|
| .. | .. |
|---|
| 1822 | 1841 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); |
|---|
| 1823 | 1842 | |
|---|
| 1824 | 1843 | /* free the SG table buffer */ |
|---|
| 1825 | | - skb_free_frag(vaddr); |
|---|
| 1844 | + free_pages((unsigned long)vaddr, 0); |
|---|
| 1826 | 1845 | |
|---|
| 1827 | 1846 | return skb; |
|---|
| 1828 | 1847 | |
|---|
| 1829 | 1848 | free_buffers: |
|---|
| 1830 | | - /* compensate sw bpool counter changes */ |
|---|
| 1831 | | - for (i--; i >= 0; i--) { |
|---|
| 1832 | | - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
|---|
| 1833 | | - if (dpaa_bp) { |
|---|
| 1834 | | - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1835 | | - (*count_ptr)++; |
|---|
| 1836 | | - } |
|---|
| 1837 | | - } |
|---|
| 1838 | 1849 | /* free all the SG entries */ |
|---|
| 1839 | | - for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { |
|---|
| 1840 | | - sg_addr = qm_sg_addr(&sgt[i]); |
|---|
| 1850 | + for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { |
|---|
| 1851 | + sg_addr = qm_sg_addr(&sgt[j]); |
|---|
| 1841 | 1852 | sg_vaddr = phys_to_virt(sg_addr); |
|---|
| 1842 | | - skb_free_frag(sg_vaddr); |
|---|
| 1843 | | - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
|---|
| 1844 | | - if (dpaa_bp) { |
|---|
| 1845 | | - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1846 | | - (*count_ptr)--; |
|---|
| 1853 | + /* all pages 0..i were unmaped */ |
|---|
| 1854 | + if (j > i) |
|---|
| 1855 | + dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), |
|---|
| 1856 | + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); |
|---|
| 1857 | + free_pages((unsigned long)sg_vaddr, 0); |
|---|
| 1858 | + /* counters 0..i-1 were decremented */ |
|---|
| 1859 | + if (j >= i) { |
|---|
| 1860 | + dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); |
|---|
| 1861 | + if (dpaa_bp) { |
|---|
| 1862 | + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); |
|---|
| 1863 | + (*count_ptr)--; |
|---|
| 1864 | + } |
|---|
| 1847 | 1865 | } |
|---|
| 1848 | 1866 | |
|---|
| 1849 | | - if (qm_sg_entry_is_final(&sgt[i])) |
|---|
| 1867 | + if (qm_sg_entry_is_final(&sgt[j])) |
|---|
| 1850 | 1868 | break; |
|---|
| 1851 | 1869 | } |
|---|
| 1852 | 1870 | /* free the SGT fragment */ |
|---|
| 1853 | | - skb_free_frag(vaddr); |
|---|
| 1871 | + free_pages((unsigned long)vaddr, 0); |
|---|
| 1854 | 1872 | |
|---|
| 1855 | 1873 | return NULL; |
|---|
| 1856 | 1874 | } |
|---|
| .. | .. |
|---|
| 1860 | 1878 | int *offset) |
|---|
| 1861 | 1879 | { |
|---|
| 1862 | 1880 | struct net_device *net_dev = priv->net_dev; |
|---|
| 1863 | | - struct device *dev = net_dev->dev.parent; |
|---|
| 1864 | 1881 | enum dma_data_direction dma_dir; |
|---|
| 1865 | | - unsigned char *buffer_start; |
|---|
| 1882 | + unsigned char *buff_start; |
|---|
| 1866 | 1883 | struct sk_buff **skbh; |
|---|
| 1867 | 1884 | dma_addr_t addr; |
|---|
| 1868 | 1885 | int err; |
|---|
| .. | .. |
|---|
| 1871 | 1888 | * available, so just use that for offset. |
|---|
| 1872 | 1889 | */ |
|---|
| 1873 | 1890 | fd->bpid = FSL_DPAA_BPID_INV; |
|---|
| 1874 | | - buffer_start = skb->data - priv->tx_headroom; |
|---|
| 1891 | + buff_start = skb->data - priv->tx_headroom; |
|---|
| 1875 | 1892 | dma_dir = DMA_TO_DEVICE; |
|---|
| 1876 | 1893 | |
|---|
| 1877 | | - skbh = (struct sk_buff **)buffer_start; |
|---|
| 1894 | + skbh = (struct sk_buff **)buff_start; |
|---|
| 1878 | 1895 | *skbh = skb; |
|---|
| 1879 | 1896 | |
|---|
| 1880 | 1897 | /* Enable L3/L4 hardware checksum computation. |
|---|
| .. | .. |
|---|
| 1883 | 1900 | * need to write into the skb. |
|---|
| 1884 | 1901 | */ |
|---|
| 1885 | 1902 | err = dpaa_enable_tx_csum(priv, skb, fd, |
|---|
| 1886 | | - ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); |
|---|
| 1903 | + buff_start + DPAA_TX_PRIV_DATA_SIZE); |
|---|
| 1887 | 1904 | if (unlikely(err < 0)) { |
|---|
| 1888 | 1905 | if (net_ratelimit()) |
|---|
| 1889 | 1906 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", |
|---|
| .. | .. |
|---|
| 1896 | 1913 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
|---|
| 1897 | 1914 | |
|---|
| 1898 | 1915 | /* Map the entire buffer size that may be seen by FMan, but no more */ |
|---|
| 1899 | | - addr = dma_map_single(dev, skbh, |
|---|
| 1900 | | - skb_tail_pointer(skb) - buffer_start, dma_dir); |
|---|
| 1901 | | - if (unlikely(dma_mapping_error(dev, addr))) { |
|---|
| 1916 | + addr = dma_map_single(priv->tx_dma_dev, buff_start, |
|---|
| 1917 | + priv->tx_headroom + skb->len, dma_dir); |
|---|
| 1918 | + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { |
|---|
| 1902 | 1919 | if (net_ratelimit()) |
|---|
| 1903 | 1920 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); |
|---|
| 1904 | 1921 | return -EINVAL; |
|---|
| .. | .. |
|---|
| 1914 | 1931 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; |
|---|
| 1915 | 1932 | const int nr_frags = skb_shinfo(skb)->nr_frags; |
|---|
| 1916 | 1933 | struct net_device *net_dev = priv->net_dev; |
|---|
| 1917 | | - struct device *dev = net_dev->dev.parent; |
|---|
| 1918 | 1934 | struct qm_sg_entry *sgt; |
|---|
| 1919 | 1935 | struct sk_buff **skbh; |
|---|
| 1920 | | - int i, j, err, sz; |
|---|
| 1921 | | - void *buffer_start; |
|---|
| 1936 | + void *buff_start; |
|---|
| 1922 | 1937 | skb_frag_t *frag; |
|---|
| 1923 | 1938 | dma_addr_t addr; |
|---|
| 1924 | 1939 | size_t frag_len; |
|---|
| 1925 | | - void *sgt_buf; |
|---|
| 1940 | + struct page *p; |
|---|
| 1941 | + int i, j, err; |
|---|
| 1926 | 1942 | |
|---|
| 1927 | | - /* get a page frag to store the SGTable */ |
|---|
| 1928 | | - sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE); |
|---|
| 1929 | | - sgt_buf = netdev_alloc_frag(sz); |
|---|
| 1930 | | - if (unlikely(!sgt_buf)) { |
|---|
| 1931 | | - netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", |
|---|
| 1932 | | - sz); |
|---|
| 1943 | + /* get a page to store the SGTable */ |
|---|
| 1944 | + p = dev_alloc_pages(0); |
|---|
| 1945 | + if (unlikely(!p)) { |
|---|
| 1946 | + netdev_err(net_dev, "dev_alloc_pages() failed\n"); |
|---|
| 1933 | 1947 | return -ENOMEM; |
|---|
| 1934 | 1948 | } |
|---|
| 1949 | + buff_start = page_address(p); |
|---|
| 1935 | 1950 | |
|---|
| 1936 | 1951 | /* Enable L3/L4 hardware checksum computation. |
|---|
| 1937 | 1952 | * |
|---|
| .. | .. |
|---|
| 1939 | 1954 | * need to write into the skb. |
|---|
| 1940 | 1955 | */ |
|---|
| 1941 | 1956 | err = dpaa_enable_tx_csum(priv, skb, fd, |
|---|
| 1942 | | - sgt_buf + DPAA_TX_PRIV_DATA_SIZE); |
|---|
| 1957 | + buff_start + DPAA_TX_PRIV_DATA_SIZE); |
|---|
| 1943 | 1958 | if (unlikely(err < 0)) { |
|---|
| 1944 | 1959 | if (net_ratelimit()) |
|---|
| 1945 | 1960 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", |
|---|
| .. | .. |
|---|
| 1948 | 1963 | } |
|---|
| 1949 | 1964 | |
|---|
| 1950 | 1965 | /* SGT[0] is used by the linear part */ |
|---|
| 1951 | | - sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); |
|---|
| 1966 | + sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom); |
|---|
| 1952 | 1967 | frag_len = skb_headlen(skb); |
|---|
| 1953 | 1968 | qm_sg_entry_set_len(&sgt[0], frag_len); |
|---|
| 1954 | 1969 | sgt[0].bpid = FSL_DPAA_BPID_INV; |
|---|
| 1955 | 1970 | sgt[0].offset = 0; |
|---|
| 1956 | | - addr = dma_map_single(dev, skb->data, |
|---|
| 1971 | + addr = dma_map_single(priv->tx_dma_dev, skb->data, |
|---|
| 1957 | 1972 | skb_headlen(skb), dma_dir); |
|---|
| 1958 | | - if (unlikely(dma_mapping_error(dev, addr))) { |
|---|
| 1959 | | - dev_err(dev, "DMA mapping failed"); |
|---|
| 1973 | + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { |
|---|
| 1974 | + netdev_err(priv->net_dev, "DMA mapping failed\n"); |
|---|
| 1960 | 1975 | err = -EINVAL; |
|---|
| 1961 | 1976 | goto sg0_map_failed; |
|---|
| 1962 | 1977 | } |
|---|
| .. | .. |
|---|
| 1965 | 1980 | /* populate the rest of SGT entries */ |
|---|
| 1966 | 1981 | for (i = 0; i < nr_frags; i++) { |
|---|
| 1967 | 1982 | frag = &skb_shinfo(skb)->frags[i]; |
|---|
| 1968 | | - frag_len = frag->size; |
|---|
| 1983 | + frag_len = skb_frag_size(frag); |
|---|
| 1969 | 1984 | WARN_ON(!skb_frag_page(frag)); |
|---|
| 1970 | | - addr = skb_frag_dma_map(dev, frag, 0, |
|---|
| 1985 | + addr = skb_frag_dma_map(priv->tx_dma_dev, frag, 0, |
|---|
| 1971 | 1986 | frag_len, dma_dir); |
|---|
| 1972 | | - if (unlikely(dma_mapping_error(dev, addr))) { |
|---|
| 1973 | | - dev_err(dev, "DMA mapping failed"); |
|---|
| 1987 | + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { |
|---|
| 1988 | + netdev_err(priv->net_dev, "DMA mapping failed\n"); |
|---|
| 1974 | 1989 | err = -EINVAL; |
|---|
| 1975 | 1990 | goto sg_map_failed; |
|---|
| 1976 | 1991 | } |
|---|
| .. | .. |
|---|
| 1986 | 2001 | /* Set the final bit in the last used entry of the SGT */ |
|---|
| 1987 | 2002 | qm_sg_entry_set_f(&sgt[nr_frags], frag_len); |
|---|
| 1988 | 2003 | |
|---|
| 2004 | + /* set fd offset to priv->tx_headroom */ |
|---|
| 1989 | 2005 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); |
|---|
| 1990 | 2006 | |
|---|
| 1991 | 2007 | /* DMA map the SGT page */ |
|---|
| 1992 | | - buffer_start = (void *)sgt - priv->tx_headroom; |
|---|
| 1993 | | - skbh = (struct sk_buff **)buffer_start; |
|---|
| 2008 | + skbh = (struct sk_buff **)buff_start; |
|---|
| 1994 | 2009 | *skbh = skb; |
|---|
| 1995 | 2010 | |
|---|
| 1996 | | - addr = dma_map_single(dev, buffer_start, |
|---|
| 1997 | | - priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); |
|---|
| 1998 | | - if (unlikely(dma_mapping_error(dev, addr))) { |
|---|
| 1999 | | - dev_err(dev, "DMA mapping failed"); |
|---|
| 2011 | + addr = dma_map_page(priv->tx_dma_dev, p, 0, |
|---|
| 2012 | + priv->tx_headroom + DPAA_SGT_SIZE, dma_dir); |
|---|
| 2013 | + if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) { |
|---|
| 2014 | + netdev_err(priv->net_dev, "DMA mapping failed\n"); |
|---|
| 2000 | 2015 | err = -EINVAL; |
|---|
| 2001 | 2016 | goto sgt_map_failed; |
|---|
| 2002 | 2017 | } |
|---|
| .. | .. |
|---|
| 2010 | 2025 | sgt_map_failed: |
|---|
| 2011 | 2026 | sg_map_failed: |
|---|
| 2012 | 2027 | for (j = 0; j < i; j++) |
|---|
| 2013 | | - dma_unmap_page(dev, qm_sg_addr(&sgt[j]), |
|---|
| 2028 | + dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[j]), |
|---|
| 2014 | 2029 | qm_sg_entry_get_len(&sgt[j]), dma_dir); |
|---|
| 2015 | 2030 | sg0_map_failed: |
|---|
| 2016 | 2031 | csum_failed: |
|---|
| 2017 | | - skb_free_frag(sgt_buf); |
|---|
| 2032 | + free_pages((unsigned long)buff_start, 0); |
|---|
| 2018 | 2033 | |
|---|
| 2019 | 2034 | return err; |
|---|
| 2020 | 2035 | } |
|---|
| .. | .. |
|---|
| 2050 | 2065 | |
|---|
| 2051 | 2066 | return 0; |
|---|
| 2052 | 2067 | } |
|---|
| 2068 | + |
|---|
| 2069 | +#ifdef CONFIG_DPAA_ERRATUM_A050385 |
|---|
| 2070 | +static int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s) |
|---|
| 2071 | +{ |
|---|
| 2072 | + struct dpaa_priv *priv = netdev_priv(net_dev); |
|---|
| 2073 | + struct sk_buff *new_skb, *skb = *s; |
|---|
| 2074 | + unsigned char *start, i; |
|---|
| 2075 | + |
|---|
| 2076 | + /* check linear buffer alignment */ |
|---|
| 2077 | + if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN)) |
|---|
| 2078 | + goto workaround; |
|---|
| 2079 | + |
|---|
| 2080 | + /* linear buffers just need to have an aligned start */ |
|---|
| 2081 | + if (!skb_is_nonlinear(skb)) |
|---|
| 2082 | + return 0; |
|---|
| 2083 | + |
|---|
| 2084 | + /* linear data size for nonlinear skbs needs to be aligned */ |
|---|
| 2085 | + if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN)) |
|---|
| 2086 | + goto workaround; |
|---|
| 2087 | + |
|---|
| 2088 | + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
|---|
| 2089 | + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
|---|
| 2090 | + |
|---|
| 2091 | + /* all fragments need to have aligned start addresses */ |
|---|
| 2092 | + if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN)) |
|---|
| 2093 | + goto workaround; |
|---|
| 2094 | + |
|---|
| 2095 | + /* all but last fragment need to have aligned sizes */ |
|---|
| 2096 | + if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) && |
|---|
| 2097 | + (i < skb_shinfo(skb)->nr_frags - 1)) |
|---|
| 2098 | + goto workaround; |
|---|
| 2099 | + } |
|---|
| 2100 | + |
|---|
| 2101 | + return 0; |
|---|
| 2102 | + |
|---|
| 2103 | +workaround: |
|---|
| 2104 | + /* copy all the skb content into a new linear buffer */ |
|---|
| 2105 | + new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 + |
|---|
| 2106 | + priv->tx_headroom); |
|---|
| 2107 | + if (!new_skb) |
|---|
| 2108 | + return -ENOMEM; |
|---|
| 2109 | + |
|---|
| 2110 | + /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */ |
|---|
| 2111 | + skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD); |
|---|
| 2112 | + |
|---|
| 2113 | + /* Workaround for DPAA_A050385 requires data start to be aligned */ |
|---|
| 2114 | + start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN); |
|---|
| 2115 | + if (start - new_skb->data) |
|---|
| 2116 | + skb_reserve(new_skb, start - new_skb->data); |
|---|
| 2117 | + |
|---|
| 2118 | + skb_put(new_skb, skb->len); |
|---|
| 2119 | + skb_copy_bits(skb, 0, new_skb->data, skb->len); |
|---|
| 2120 | + skb_copy_header(new_skb, skb); |
|---|
| 2121 | + new_skb->dev = skb->dev; |
|---|
| 2122 | + |
|---|
| 2123 | + /* Copy relevant timestamp info from the old skb to the new */ |
|---|
| 2124 | + if (priv->tx_tstamp) { |
|---|
| 2125 | + skb_shinfo(new_skb)->tx_flags = skb_shinfo(skb)->tx_flags; |
|---|
| 2126 | + skb_shinfo(new_skb)->hwtstamps = skb_shinfo(skb)->hwtstamps; |
|---|
| 2127 | + skb_shinfo(new_skb)->tskey = skb_shinfo(skb)->tskey; |
|---|
| 2128 | + if (skb->sk) |
|---|
| 2129 | + skb_set_owner_w(new_skb, skb->sk); |
|---|
| 2130 | + } |
|---|
| 2131 | + |
|---|
| 2132 | + /* We move the headroom when we align it so we have to reset the |
|---|
| 2133 | + * network and transport header offsets relative to the new data |
|---|
| 2134 | + * pointer. The checksum offload relies on these offsets. |
|---|
| 2135 | + */ |
|---|
| 2136 | + skb_set_network_header(new_skb, skb_network_offset(skb)); |
|---|
| 2137 | + skb_set_transport_header(new_skb, skb_transport_offset(skb)); |
|---|
| 2138 | + |
|---|
| 2139 | + dev_kfree_skb(skb); |
|---|
| 2140 | + *s = new_skb; |
|---|
| 2141 | + |
|---|
| 2142 | + return 0; |
|---|
| 2143 | +} |
|---|
| 2144 | +#endif |
|---|
| 2053 | 2145 | |
|---|
| 2054 | 2146 | static netdev_tx_t |
|---|
| 2055 | 2147 | dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) |
|---|
| .. | .. |
|---|
| 2096 | 2188 | |
|---|
| 2097 | 2189 | nonlinear = skb_is_nonlinear(skb); |
|---|
| 2098 | 2190 | } |
|---|
| 2191 | + |
|---|
| 2192 | +#ifdef CONFIG_DPAA_ERRATUM_A050385 |
|---|
| 2193 | + if (unlikely(fman_has_errata_a050385())) { |
|---|
| 2194 | + if (dpaa_a050385_wa(net_dev, &skb)) |
|---|
| 2195 | + goto enomem; |
|---|
| 2196 | + nonlinear = skb_is_nonlinear(skb); |
|---|
| 2197 | + } |
|---|
| 2198 | +#endif |
|---|
| 2099 | 2199 | |
|---|
| 2100 | 2200 | if (nonlinear) { |
|---|
| 2101 | 2201 | /* Just create a S/G fd based on the skb */ |
|---|
| .. | .. |
|---|
| 2181 | 2281 | if (cleaned < budget) { |
|---|
| 2182 | 2282 | napi_complete_done(napi, cleaned); |
|---|
| 2183 | 2283 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
|---|
| 2184 | | - |
|---|
| 2185 | 2284 | } else if (np->down) { |
|---|
| 2186 | 2285 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
|---|
| 2187 | 2286 | } |
|---|
| .. | .. |
|---|
| 2312 | 2411 | return qman_cb_dqrr_consume; |
|---|
| 2313 | 2412 | } |
|---|
| 2314 | 2413 | |
|---|
| 2315 | | - dpaa_bp = dpaa_bpid2pool(fd->bpid); |
|---|
| 2316 | | - if (!dpaa_bp) |
|---|
| 2317 | | - return qman_cb_dqrr_consume; |
|---|
| 2318 | | - |
|---|
| 2319 | | - dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); |
|---|
| 2414 | + dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, |
|---|
| 2415 | + DMA_FROM_DEVICE); |
|---|
| 2320 | 2416 | |
|---|
| 2321 | 2417 | /* prefetch the first 64 bytes of the frame or the SGT start */ |
|---|
| 2322 | 2418 | vaddr = phys_to_virt(addr); |
|---|
| .. | .. |
|---|
| 2455 | 2551 | struct dpaa_percpu_priv *percpu_priv; |
|---|
| 2456 | 2552 | int i; |
|---|
| 2457 | 2553 | |
|---|
| 2458 | | - for_each_possible_cpu(i) { |
|---|
| 2554 | + for_each_online_cpu(i) { |
|---|
| 2459 | 2555 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
|---|
| 2460 | 2556 | |
|---|
| 2461 | 2557 | percpu_priv->np.down = 0; |
|---|
| .. | .. |
|---|
| 2468 | 2564 | struct dpaa_percpu_priv *percpu_priv; |
|---|
| 2469 | 2565 | int i; |
|---|
| 2470 | 2566 | |
|---|
| 2471 | | - for_each_possible_cpu(i) { |
|---|
| 2567 | + for_each_online_cpu(i) { |
|---|
| 2472 | 2568 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); |
|---|
| 2473 | 2569 | |
|---|
| 2474 | 2570 | percpu_priv->np.down = 1; |
|---|
| .. | .. |
|---|
| 2486 | 2582 | mac_dev->adjust_link(mac_dev); |
|---|
| 2487 | 2583 | } |
|---|
| 2488 | 2584 | |
|---|
| 2585 | +/* The Aquantia PHYs are capable of performing rate adaptation */ |
|---|
| 2586 | +#define PHY_VEND_AQUANTIA 0x03a1b400 |
|---|
| 2587 | + |
|---|
| 2489 | 2588 | static int dpaa_phy_init(struct net_device *net_dev) |
|---|
| 2490 | 2589 | { |
|---|
| 2590 | + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; |
|---|
| 2491 | 2591 | struct mac_device *mac_dev; |
|---|
| 2492 | 2592 | struct phy_device *phy_dev; |
|---|
| 2493 | 2593 | struct dpaa_priv *priv; |
|---|
| .. | .. |
|---|
| 2503 | 2603 | return -ENODEV; |
|---|
| 2504 | 2604 | } |
|---|
| 2505 | 2605 | |
|---|
| 2506 | | - /* Remove any features not supported by the controller */ |
|---|
| 2507 | | - phy_dev->supported &= mac_dev->if_support; |
|---|
| 2508 | | - phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
|---|
| 2509 | | - phy_dev->advertising = phy_dev->supported; |
|---|
| 2606 | + /* Unless the PHY is capable of rate adaptation */ |
|---|
| 2607 | + if (mac_dev->phy_if != PHY_INTERFACE_MODE_XGMII || |
|---|
| 2608 | + ((phy_dev->drv->phy_id & GENMASK(31, 10)) != PHY_VEND_AQUANTIA)) { |
|---|
| 2609 | + /* remove any features not supported by the controller */ |
|---|
| 2610 | + ethtool_convert_legacy_u32_to_link_mode(mask, |
|---|
| 2611 | + mac_dev->if_support); |
|---|
| 2612 | + linkmode_and(phy_dev->supported, phy_dev->supported, mask); |
|---|
| 2613 | + } |
|---|
| 2614 | + |
|---|
| 2615 | + phy_support_asym_pause(phy_dev); |
|---|
| 2510 | 2616 | |
|---|
| 2511 | 2617 | mac_dev->phy_dev = phy_dev; |
|---|
| 2512 | 2618 | net_dev->phydev = phy_dev; |
|---|
| .. | .. |
|---|
| 2627 | 2733 | .ndo_stop = dpaa_eth_stop, |
|---|
| 2628 | 2734 | .ndo_tx_timeout = dpaa_tx_timeout, |
|---|
| 2629 | 2735 | .ndo_get_stats64 = dpaa_get_stats64, |
|---|
| 2736 | + .ndo_change_carrier = fixed_phy_change_carrier, |
|---|
| 2630 | 2737 | .ndo_set_mac_address = dpaa_set_mac_address, |
|---|
| 2631 | 2738 | .ndo_validate_addr = eth_validate_addr, |
|---|
| 2632 | 2739 | .ndo_set_rx_mode = dpaa_set_rx_mode, |
|---|
| .. | .. |
|---|
| 2668 | 2775 | { |
|---|
| 2669 | 2776 | dma_addr_t addr = bm_buf_addr(bmb); |
|---|
| 2670 | 2777 | |
|---|
| 2671 | | - dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); |
|---|
| 2778 | + dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE, |
|---|
| 2779 | + DMA_FROM_DEVICE); |
|---|
| 2672 | 2780 | |
|---|
| 2673 | 2781 | skb_free_frag(phys_to_virt(addr)); |
|---|
| 2674 | 2782 | } |
|---|
| .. | .. |
|---|
| 2745 | 2853 | return err; |
|---|
| 2746 | 2854 | } |
|---|
| 2747 | 2855 | |
|---|
| 2748 | | -static const struct of_device_id dpaa_match[]; |
|---|
| 2749 | | - |
|---|
| 2750 | | -static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) |
|---|
| 2856 | +static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl, |
|---|
| 2857 | + enum port_type port) |
|---|
| 2751 | 2858 | { |
|---|
| 2752 | 2859 | u16 headroom; |
|---|
| 2753 | 2860 | |
|---|
| .. | .. |
|---|
| 2761 | 2868 | * |
|---|
| 2762 | 2869 | * Also make sure the headroom is a multiple of data_align bytes |
|---|
| 2763 | 2870 | */ |
|---|
| 2764 | | - headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + |
|---|
| 2765 | | - DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); |
|---|
| 2871 | + headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE); |
|---|
| 2766 | 2872 | |
|---|
| 2767 | | - return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); |
|---|
| 2873 | + if (port == RX) |
|---|
| 2874 | + return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT); |
|---|
| 2875 | + else |
|---|
| 2876 | + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); |
|---|
| 2768 | 2877 | } |
|---|
| 2769 | 2878 | |
|---|
| 2770 | 2879 | static int dpaa_eth_probe(struct platform_device *pdev) |
|---|
| 2771 | 2880 | { |
|---|
| 2772 | | - struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; |
|---|
| 2773 | 2881 | struct net_device *net_dev = NULL; |
|---|
| 2882 | + struct dpaa_bp *dpaa_bp = NULL; |
|---|
| 2774 | 2883 | struct dpaa_fq *dpaa_fq, *tmp; |
|---|
| 2775 | 2884 | struct dpaa_priv *priv = NULL; |
|---|
| 2776 | 2885 | struct fm_port_fqs port_fqs; |
|---|
| 2777 | 2886 | struct mac_device *mac_dev; |
|---|
| 2778 | | - int err = 0, i, channel; |
|---|
| 2887 | + int err = 0, channel; |
|---|
| 2779 | 2888 | struct device *dev; |
|---|
| 2780 | 2889 | |
|---|
| 2781 | | - /* device used for DMA mapping */ |
|---|
| 2782 | | - dev = pdev->dev.parent; |
|---|
| 2783 | | - err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
|---|
| 2784 | | - if (err) { |
|---|
| 2785 | | - dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); |
|---|
| 2786 | | - return err; |
|---|
| 2890 | + dev = &pdev->dev; |
|---|
| 2891 | + |
|---|
| 2892 | + err = bman_is_probed(); |
|---|
| 2893 | + if (!err) |
|---|
| 2894 | + return -EPROBE_DEFER; |
|---|
| 2895 | + if (err < 0) { |
|---|
| 2896 | + dev_err(dev, "failing probe due to bman probe error\n"); |
|---|
| 2897 | + return -ENODEV; |
|---|
| 2898 | + } |
|---|
| 2899 | + err = qman_is_probed(); |
|---|
| 2900 | + if (!err) |
|---|
| 2901 | + return -EPROBE_DEFER; |
|---|
| 2902 | + if (err < 0) { |
|---|
| 2903 | + dev_err(dev, "failing probe due to qman probe error\n"); |
|---|
| 2904 | + return -ENODEV; |
|---|
| 2905 | + } |
|---|
| 2906 | + err = bman_portals_probed(); |
|---|
| 2907 | + if (!err) |
|---|
| 2908 | + return -EPROBE_DEFER; |
|---|
| 2909 | + if (err < 0) { |
|---|
| 2910 | + dev_err(dev, |
|---|
| 2911 | + "failing probe due to bman portals probe error\n"); |
|---|
| 2912 | + return -ENODEV; |
|---|
| 2913 | + } |
|---|
| 2914 | + err = qman_portals_probed(); |
|---|
| 2915 | + if (!err) |
|---|
| 2916 | + return -EPROBE_DEFER; |
|---|
| 2917 | + if (err < 0) { |
|---|
| 2918 | + dev_err(dev, |
|---|
| 2919 | + "failing probe due to qman portals probe error\n"); |
|---|
| 2920 | + return -ENODEV; |
|---|
| 2787 | 2921 | } |
|---|
| 2788 | 2922 | |
|---|
| 2789 | 2923 | /* Allocate this early, so we can store relevant information in |
|---|
| .. | .. |
|---|
| 2796 | 2930 | } |
|---|
| 2797 | 2931 | |
|---|
| 2798 | 2932 | /* Do this here, so we can be verbose early */ |
|---|
| 2799 | | - SET_NETDEV_DEV(net_dev, dev); |
|---|
| 2933 | + SET_NETDEV_DEV(net_dev, dev->parent); |
|---|
| 2800 | 2934 | dev_set_drvdata(dev, net_dev); |
|---|
| 2801 | 2935 | |
|---|
| 2802 | 2936 | priv = netdev_priv(net_dev); |
|---|
| .. | .. |
|---|
| 2806 | 2940 | |
|---|
| 2807 | 2941 | mac_dev = dpaa_mac_dev_get(pdev); |
|---|
| 2808 | 2942 | if (IS_ERR(mac_dev)) { |
|---|
| 2809 | | - dev_err(dev, "dpaa_mac_dev_get() failed\n"); |
|---|
| 2943 | + netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); |
|---|
| 2810 | 2944 | err = PTR_ERR(mac_dev); |
|---|
| 2945 | + goto free_netdev; |
|---|
| 2946 | + } |
|---|
| 2947 | + |
|---|
| 2948 | + /* Devices used for DMA mapping */ |
|---|
| 2949 | + priv->rx_dma_dev = fman_port_get_device(mac_dev->port[RX]); |
|---|
| 2950 | + priv->tx_dma_dev = fman_port_get_device(mac_dev->port[TX]); |
|---|
| 2951 | + err = dma_coerce_mask_and_coherent(priv->rx_dma_dev, DMA_BIT_MASK(40)); |
|---|
| 2952 | + if (!err) |
|---|
| 2953 | + err = dma_coerce_mask_and_coherent(priv->tx_dma_dev, |
|---|
| 2954 | + DMA_BIT_MASK(40)); |
|---|
| 2955 | + if (err) { |
|---|
| 2956 | + netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n"); |
|---|
| 2811 | 2957 | goto free_netdev; |
|---|
| 2812 | 2958 | } |
|---|
| 2813 | 2959 | |
|---|
| .. | .. |
|---|
| 2827 | 2973 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ |
|---|
| 2828 | 2974 | |
|---|
| 2829 | 2975 | /* bp init */ |
|---|
| 2830 | | - for (i = 0; i < DPAA_BPS_NUM; i++) { |
|---|
| 2831 | | - dpaa_bps[i] = dpaa_bp_alloc(dev); |
|---|
| 2832 | | - if (IS_ERR(dpaa_bps[i])) { |
|---|
| 2833 | | - err = PTR_ERR(dpaa_bps[i]); |
|---|
| 2834 | | - goto free_dpaa_bps; |
|---|
| 2835 | | - } |
|---|
| 2836 | | - /* the raw size of the buffers used for reception */ |
|---|
| 2837 | | - dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); |
|---|
| 2838 | | - /* avoid runtime computations by keeping the usable size here */ |
|---|
| 2839 | | - dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); |
|---|
| 2840 | | - dpaa_bps[i]->dev = dev; |
|---|
| 2841 | | - |
|---|
| 2842 | | - err = dpaa_bp_alloc_pool(dpaa_bps[i]); |
|---|
| 2843 | | - if (err < 0) |
|---|
| 2844 | | - goto free_dpaa_bps; |
|---|
| 2845 | | - priv->dpaa_bps[i] = dpaa_bps[i]; |
|---|
| 2976 | + dpaa_bp = dpaa_bp_alloc(dev); |
|---|
| 2977 | + if (IS_ERR(dpaa_bp)) { |
|---|
| 2978 | + err = PTR_ERR(dpaa_bp); |
|---|
| 2979 | + goto free_dpaa_bps; |
|---|
| 2846 | 2980 | } |
|---|
| 2981 | + /* the raw size of the buffers used for reception */ |
|---|
| 2982 | + dpaa_bp->raw_size = DPAA_BP_RAW_SIZE; |
|---|
| 2983 | + /* avoid runtime computations by keeping the usable size here */ |
|---|
| 2984 | + dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size); |
|---|
| 2985 | + dpaa_bp->priv = priv; |
|---|
| 2986 | + |
|---|
| 2987 | + err = dpaa_bp_alloc_pool(dpaa_bp); |
|---|
| 2988 | + if (err < 0) |
|---|
| 2989 | + goto free_dpaa_bps; |
|---|
| 2990 | + priv->dpaa_bp = dpaa_bp; |
|---|
| 2847 | 2991 | |
|---|
| 2848 | 2992 | INIT_LIST_HEAD(&priv->dpaa_fq_list); |
|---|
| 2849 | 2993 | |
|---|
| .. | .. |
|---|
| 2869 | 3013 | /* Walk the CPUs with affine portals |
|---|
| 2870 | 3014 | * and add this pool channel to each's dequeue mask. |
|---|
| 2871 | 3015 | */ |
|---|
| 2872 | | - dpaa_eth_add_channel(priv->channel); |
|---|
| 3016 | + dpaa_eth_add_channel(priv->channel, &pdev->dev); |
|---|
| 2873 | 3017 | |
|---|
| 2874 | 3018 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); |
|---|
| 2875 | 3019 | |
|---|
| .. | .. |
|---|
| 2897 | 3041 | goto free_dpaa_fqs; |
|---|
| 2898 | 3042 | } |
|---|
| 2899 | 3043 | |
|---|
| 2900 | | - priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); |
|---|
| 2901 | | - priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); |
|---|
| 3044 | + priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX); |
|---|
| 3045 | + priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX); |
|---|
| 2902 | 3046 | |
|---|
| 2903 | 3047 | /* All real interfaces need their ports initialized */ |
|---|
| 2904 | | - err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, |
|---|
| 3048 | + err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs, |
|---|
| 2905 | 3049 | &priv->buf_layout[0], dev); |
|---|
| 2906 | 3050 | if (err) |
|---|
| 2907 | 3051 | goto free_dpaa_fqs; |
|---|
| .. | .. |
|---|
| 2960 | 3104 | struct device *dev; |
|---|
| 2961 | 3105 | int err; |
|---|
| 2962 | 3106 | |
|---|
| 2963 | | - dev = pdev->dev.parent; |
|---|
| 3107 | + dev = &pdev->dev; |
|---|
| 2964 | 3108 | net_dev = dev_get_drvdata(dev); |
|---|
| 2965 | 3109 | |
|---|
| 2966 | 3110 | priv = netdev_priv(net_dev); |
|---|