.. | .. |
---|
600 | 600 | /* disable the queue */ |
---|
601 | 601 | wr32(IGC_TXDCTL(reg_idx), 0); |
---|
602 | 602 | wrfl(); |
---|
603 | | - mdelay(10); |
---|
604 | 603 | |
---|
605 | 604 | wr32(IGC_TDLEN(reg_idx), |
---|
606 | 605 | ring->count * sizeof(union igc_adv_tx_desc)); |
---|
.. | .. |
---|
898 | 897 | return netdev_mc_count(netdev); |
---|
899 | 898 | } |
---|
900 | 899 | |
---|
901 | | -static __le32 igc_tx_launchtime(struct igc_adapter *adapter, ktime_t txtime) |
---|
| 900 | +static __le32 igc_tx_launchtime(struct igc_ring *ring, ktime_t txtime, |
---|
| 901 | + bool *first_flag, bool *insert_empty) |
---|
902 | 902 | { |
---|
| 903 | + struct igc_adapter *adapter = netdev_priv(ring->netdev); |
---|
903 | 904 | ktime_t cycle_time = adapter->cycle_time; |
---|
904 | 905 | ktime_t base_time = adapter->base_time; |
---|
905 | | - u32 launchtime; |
---|
| 906 | + ktime_t now = ktime_get_clocktai(); |
---|
| 907 | + ktime_t baset_est, end_of_cycle; |
---|
| 908 | + s32 launchtime; |
---|
| 909 | + s64 n; |
---|
906 | 910 | |
---|
907 | | - /* FIXME: when using ETF together with taprio, we may have a |
---|
908 | | - * case where 'delta' is larger than the cycle_time, this may |
---|
909 | | - * cause problems if we don't read the current value of |
---|
910 | | - * IGC_BASET, as the value writen into the launchtime |
---|
911 | | - * descriptor field may be misinterpreted. |
---|
| 911 | + n = div64_s64(ktime_sub_ns(now, base_time), cycle_time); |
---|
| 912 | + |
---|
| 913 | + baset_est = ktime_add_ns(base_time, cycle_time * (n)); |
---|
| 914 | + end_of_cycle = ktime_add_ns(baset_est, cycle_time); |
---|
| 915 | + |
---|
| 916 | + if (ktime_compare(txtime, end_of_cycle) >= 0) { |
---|
| 917 | + if (baset_est != ring->last_ff_cycle) { |
---|
| 918 | + *first_flag = true; |
---|
| 919 | + ring->last_ff_cycle = baset_est; |
---|
| 920 | + |
---|
| 921 | + if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) |
---|
| 922 | + *insert_empty = true; |
---|
| 923 | + } |
---|
| 924 | + } |
---|
| 925 | + |
---|
| 926 | + /* Introducing a window at end of cycle on which packets |
---|
| 927 | + * potentially not honor launchtime. Window of 5us chosen |
---|
| 928 | + * considering software update the tail pointer and packets |
---|
| 929 | + * are dma'ed to packet buffer. |
---|
912 | 930 | */ |
---|
913 | | - div_s64_rem(ktime_sub_ns(txtime, base_time), cycle_time, &launchtime); |
---|
| 931 | + if ((ktime_sub_ns(end_of_cycle, now) < 5 * NSEC_PER_USEC)) |
---|
| 932 | + netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", |
---|
| 933 | + txtime); |
---|
| 934 | + |
---|
| 935 | + ring->last_tx_cycle = end_of_cycle; |
---|
| 936 | + |
---|
| 937 | + launchtime = ktime_sub_ns(txtime, baset_est); |
---|
| 938 | + if (launchtime > 0) |
---|
| 939 | + div_s64_rem(launchtime, cycle_time, &launchtime); |
---|
| 940 | + else |
---|
| 941 | + launchtime = 0; |
---|
914 | 942 | |
---|
915 | 943 | return cpu_to_le32(launchtime); |
---|
916 | 944 | } |
---|
917 | 945 | |
---|
| 946 | +static int igc_init_empty_frame(struct igc_ring *ring, |
---|
| 947 | + struct igc_tx_buffer *buffer, |
---|
| 948 | + struct sk_buff *skb) |
---|
| 949 | +{ |
---|
| 950 | + unsigned int size; |
---|
| 951 | + dma_addr_t dma; |
---|
| 952 | + |
---|
| 953 | + size = skb_headlen(skb); |
---|
| 954 | + |
---|
| 955 | + dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); |
---|
| 956 | + if (dma_mapping_error(ring->dev, dma)) { |
---|
| 957 | + netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); |
---|
| 958 | + return -ENOMEM; |
---|
| 959 | + } |
---|
| 960 | + |
---|
| 961 | + buffer->skb = skb; |
---|
| 962 | + buffer->protocol = 0; |
---|
| 963 | + buffer->bytecount = skb->len; |
---|
| 964 | + buffer->gso_segs = 1; |
---|
| 965 | + buffer->time_stamp = jiffies; |
---|
| 966 | + dma_unmap_len_set(buffer, len, skb->len); |
---|
| 967 | + dma_unmap_addr_set(buffer, dma, dma); |
---|
| 968 | + |
---|
| 969 | + return 0; |
---|
| 970 | +} |
---|
| 971 | + |
---|
| 972 | +static int igc_init_tx_empty_descriptor(struct igc_ring *ring, |
---|
| 973 | + struct sk_buff *skb, |
---|
| 974 | + struct igc_tx_buffer *first) |
---|
| 975 | +{ |
---|
| 976 | + union igc_adv_tx_desc *desc; |
---|
| 977 | + u32 cmd_type, olinfo_status; |
---|
| 978 | + int err; |
---|
| 979 | + |
---|
| 980 | + if (!igc_desc_unused(ring)) |
---|
| 981 | + return -EBUSY; |
---|
| 982 | + |
---|
| 983 | + err = igc_init_empty_frame(ring, first, skb); |
---|
| 984 | + if (err) |
---|
| 985 | + return err; |
---|
| 986 | + |
---|
| 987 | + cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT | |
---|
| 988 | + IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD | |
---|
| 989 | + first->bytecount; |
---|
| 990 | + olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; |
---|
| 991 | + |
---|
| 992 | + desc = IGC_TX_DESC(ring, ring->next_to_use); |
---|
| 993 | + desc->read.cmd_type_len = cpu_to_le32(cmd_type); |
---|
| 994 | + desc->read.olinfo_status = cpu_to_le32(olinfo_status); |
---|
| 995 | + desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); |
---|
| 996 | + |
---|
| 997 | + netdev_tx_sent_queue(txring_txq(ring), skb->len); |
---|
| 998 | + |
---|
| 999 | + first->next_to_watch = desc; |
---|
| 1000 | + |
---|
| 1001 | + ring->next_to_use++; |
---|
| 1002 | + if (ring->next_to_use == ring->count) |
---|
| 1003 | + ring->next_to_use = 0; |
---|
| 1004 | + |
---|
| 1005 | + return 0; |
---|
| 1006 | +} |
---|
| 1007 | + |
---|
| 1008 | +#define IGC_EMPTY_FRAME_SIZE 60 |
---|
| 1009 | + |
---|
918 | 1010 | static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, |
---|
919 | | - struct igc_tx_buffer *first, |
---|
| 1011 | + __le32 launch_time, bool first_flag, |
---|
920 | 1012 | u32 vlan_macip_lens, u32 type_tucmd, |
---|
921 | 1013 | u32 mss_l4len_idx) |
---|
922 | 1014 | { |
---|
.. | .. |
---|
935 | 1027 | if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) |
---|
936 | 1028 | mss_l4len_idx |= tx_ring->reg_idx << 4; |
---|
937 | 1029 | |
---|
| 1030 | + if (first_flag) |
---|
| 1031 | + mss_l4len_idx |= IGC_ADVTXD_TSN_CNTX_FIRST; |
---|
| 1032 | + |
---|
938 | 1033 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
---|
939 | 1034 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
---|
940 | 1035 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
---|
941 | | - |
---|
942 | | - /* We assume there is always a valid Tx time available. Invalid times |
---|
943 | | - * should have been handled by the upper layers. |
---|
944 | | - */ |
---|
945 | | - if (tx_ring->launchtime_enable) { |
---|
946 | | - struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); |
---|
947 | | - ktime_t txtime = first->skb->tstamp; |
---|
948 | | - |
---|
949 | | - first->skb->tstamp = ktime_set(0, 0); |
---|
950 | | - context_desc->launch_time = igc_tx_launchtime(adapter, |
---|
951 | | - txtime); |
---|
952 | | - } else { |
---|
953 | | - context_desc->launch_time = 0; |
---|
954 | | - } |
---|
| 1036 | + context_desc->launch_time = launch_time; |
---|
955 | 1037 | } |
---|
956 | 1038 | |
---|
957 | | -static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb) |
---|
958 | | -{ |
---|
959 | | - unsigned int offset = 0; |
---|
960 | | - |
---|
961 | | - ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); |
---|
962 | | - |
---|
963 | | - return offset == skb_checksum_start_offset(skb); |
---|
964 | | -} |
---|
965 | | - |
---|
966 | | -static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) |
---|
| 1039 | +static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first, |
---|
| 1040 | + __le32 launch_time, bool first_flag) |
---|
967 | 1041 | { |
---|
968 | 1042 | struct sk_buff *skb = first->skb; |
---|
969 | 1043 | u32 vlan_macip_lens = 0; |
---|
.. | .. |
---|
985 | 1059 | break; |
---|
986 | 1060 | case offsetof(struct sctphdr, checksum): |
---|
987 | 1061 | /* validate that this is actually an SCTP request */ |
---|
988 | | - if ((first->protocol == htons(ETH_P_IP) && |
---|
989 | | - (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || |
---|
990 | | - (first->protocol == htons(ETH_P_IPV6) && |
---|
991 | | - igc_ipv6_csum_is_sctp(skb))) { |
---|
| 1062 | + if (skb_csum_is_sctp(skb)) { |
---|
992 | 1063 | type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP; |
---|
993 | 1064 | break; |
---|
994 | 1065 | } |
---|
.. | .. |
---|
1006 | 1077 | vlan_macip_lens |= skb_network_offset(skb) << IGC_ADVTXD_MACLEN_SHIFT; |
---|
1007 | 1078 | vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
---|
1008 | 1079 | |
---|
1009 | | - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); |
---|
| 1080 | + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
---|
| 1081 | + vlan_macip_lens, type_tucmd, 0); |
---|
1010 | 1082 | } |
---|
1011 | 1083 | |
---|
1012 | 1084 | static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) |
---|
.. | .. |
---|
1230 | 1302 | |
---|
1231 | 1303 | static int igc_tso(struct igc_ring *tx_ring, |
---|
1232 | 1304 | struct igc_tx_buffer *first, |
---|
| 1305 | + __le32 launch_time, bool first_flag, |
---|
1233 | 1306 | u8 *hdr_len) |
---|
1234 | 1307 | { |
---|
1235 | 1308 | u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; |
---|
.. | .. |
---|
1316 | 1389 | vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; |
---|
1317 | 1390 | vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; |
---|
1318 | 1391 | |
---|
1319 | | - igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, |
---|
1320 | | - type_tucmd, mss_l4len_idx); |
---|
| 1392 | + igc_tx_ctxtdesc(tx_ring, launch_time, first_flag, |
---|
| 1393 | + vlan_macip_lens, type_tucmd, mss_l4len_idx); |
---|
1321 | 1394 | |
---|
1322 | 1395 | return 1; |
---|
1323 | 1396 | } |
---|
.. | .. |
---|
1325 | 1398 | static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, |
---|
1326 | 1399 | struct igc_ring *tx_ring) |
---|
1327 | 1400 | { |
---|
| 1401 | + bool first_flag = false, insert_empty = false; |
---|
1328 | 1402 | u16 count = TXD_USE_COUNT(skb_headlen(skb)); |
---|
1329 | 1403 | __be16 protocol = vlan_get_protocol(skb); |
---|
1330 | 1404 | struct igc_tx_buffer *first; |
---|
| 1405 | + __le32 launch_time = 0; |
---|
1331 | 1406 | u32 tx_flags = 0; |
---|
1332 | 1407 | unsigned short f; |
---|
| 1408 | + ktime_t txtime; |
---|
1333 | 1409 | u8 hdr_len = 0; |
---|
1334 | 1410 | int tso = 0; |
---|
1335 | 1411 | |
---|
.. | .. |
---|
1343 | 1419 | count += TXD_USE_COUNT(skb_frag_size( |
---|
1344 | 1420 | &skb_shinfo(skb)->frags[f])); |
---|
1345 | 1421 | |
---|
1346 | | - if (igc_maybe_stop_tx(tx_ring, count + 3)) { |
---|
| 1422 | + if (igc_maybe_stop_tx(tx_ring, count + 5)) { |
---|
1347 | 1423 | /* this is a hard error */ |
---|
1348 | 1424 | return NETDEV_TX_BUSY; |
---|
1349 | 1425 | } |
---|
1350 | 1426 | |
---|
| 1427 | + if (!tx_ring->launchtime_enable) |
---|
| 1428 | + goto done; |
---|
| 1429 | + |
---|
| 1430 | + txtime = skb->tstamp; |
---|
| 1431 | + skb->tstamp = ktime_set(0, 0); |
---|
| 1432 | + launch_time = igc_tx_launchtime(tx_ring, txtime, &first_flag, &insert_empty); |
---|
| 1433 | + |
---|
| 1434 | + if (insert_empty) { |
---|
| 1435 | + struct igc_tx_buffer *empty_info; |
---|
| 1436 | + struct sk_buff *empty; |
---|
| 1437 | + void *data; |
---|
| 1438 | + |
---|
| 1439 | + empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
---|
| 1440 | + empty = alloc_skb(IGC_EMPTY_FRAME_SIZE, GFP_ATOMIC); |
---|
| 1441 | + if (!empty) |
---|
| 1442 | + goto done; |
---|
| 1443 | + |
---|
| 1444 | + data = skb_put(empty, IGC_EMPTY_FRAME_SIZE); |
---|
| 1445 | + memset(data, 0, IGC_EMPTY_FRAME_SIZE); |
---|
| 1446 | + |
---|
| 1447 | + igc_tx_ctxtdesc(tx_ring, 0, false, 0, 0, 0); |
---|
| 1448 | + |
---|
| 1449 | + if (igc_init_tx_empty_descriptor(tx_ring, |
---|
| 1450 | + empty, |
---|
| 1451 | + empty_info) < 0) |
---|
| 1452 | + dev_kfree_skb_any(empty); |
---|
| 1453 | + } |
---|
| 1454 | + |
---|
| 1455 | +done: |
---|
1351 | 1456 | /* record the location of the first descriptor for this packet */ |
---|
1352 | 1457 | first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; |
---|
1353 | 1458 | first->skb = skb; |
---|
.. | .. |
---|
1361 | 1466 | * the other timer registers before skipping the |
---|
1362 | 1467 | * timestamping request. |
---|
1363 | 1468 | */ |
---|
1364 | | - if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && |
---|
1365 | | - !test_and_set_bit_lock(__IGC_PTP_TX_IN_PROGRESS, |
---|
1366 | | - &adapter->state)) { |
---|
| 1469 | + unsigned long flags; |
---|
| 1470 | + |
---|
| 1471 | + spin_lock_irqsave(&adapter->ptp_tx_lock, flags); |
---|
| 1472 | + if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON && !adapter->ptp_tx_skb) { |
---|
1367 | 1473 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
---|
1368 | 1474 | tx_flags |= IGC_TX_FLAGS_TSTAMP; |
---|
1369 | 1475 | |
---|
.. | .. |
---|
1372 | 1478 | } else { |
---|
1373 | 1479 | adapter->tx_hwtstamp_skipped++; |
---|
1374 | 1480 | } |
---|
| 1481 | + |
---|
| 1482 | + spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); |
---|
1375 | 1483 | } |
---|
1376 | 1484 | |
---|
1377 | 1485 | /* record initial flags and protocol */ |
---|
1378 | 1486 | first->tx_flags = tx_flags; |
---|
1379 | 1487 | first->protocol = protocol; |
---|
1380 | 1488 | |
---|
1381 | | - tso = igc_tso(tx_ring, first, &hdr_len); |
---|
| 1489 | + tso = igc_tso(tx_ring, first, launch_time, first_flag, &hdr_len); |
---|
1382 | 1490 | if (tso < 0) |
---|
1383 | 1491 | goto out_drop; |
---|
1384 | 1492 | else if (!tso) |
---|
1385 | | - igc_tx_csum(tx_ring, first); |
---|
| 1493 | + igc_tx_csum(tx_ring, first, launch_time, first_flag); |
---|
1386 | 1494 | |
---|
1387 | 1495 | igc_tx_map(tx_ring, first, hdr_len); |
---|
1388 | 1496 | |
---|
.. | .. |
---|
1463 | 1571 | le32_to_cpu(rx_desc->wb.upper.status_error)); |
---|
1464 | 1572 | } |
---|
1465 | 1573 | |
---|
| 1574 | +/* Mapping HW RSS Type to enum pkt_hash_types */ |
---|
| 1575 | +static const enum pkt_hash_types igc_rss_type_table[IGC_RSS_TYPE_MAX_TABLE] = { |
---|
| 1576 | + [IGC_RSS_TYPE_NO_HASH] = PKT_HASH_TYPE_L2, |
---|
| 1577 | + [IGC_RSS_TYPE_HASH_TCP_IPV4] = PKT_HASH_TYPE_L4, |
---|
| 1578 | + [IGC_RSS_TYPE_HASH_IPV4] = PKT_HASH_TYPE_L3, |
---|
| 1579 | + [IGC_RSS_TYPE_HASH_TCP_IPV6] = PKT_HASH_TYPE_L4, |
---|
| 1580 | + [IGC_RSS_TYPE_HASH_IPV6_EX] = PKT_HASH_TYPE_L3, |
---|
| 1581 | + [IGC_RSS_TYPE_HASH_IPV6] = PKT_HASH_TYPE_L3, |
---|
| 1582 | + [IGC_RSS_TYPE_HASH_TCP_IPV6_EX] = PKT_HASH_TYPE_L4, |
---|
| 1583 | + [IGC_RSS_TYPE_HASH_UDP_IPV4] = PKT_HASH_TYPE_L4, |
---|
| 1584 | + [IGC_RSS_TYPE_HASH_UDP_IPV6] = PKT_HASH_TYPE_L4, |
---|
| 1585 | + [IGC_RSS_TYPE_HASH_UDP_IPV6_EX] = PKT_HASH_TYPE_L4, |
---|
| 1586 | + [10] = PKT_HASH_TYPE_NONE, /* RSS Type above 9 "Reserved" by HW */ |
---|
| 1587 | + [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */ |
---|
| 1588 | + [12] = PKT_HASH_TYPE_NONE, /* to handle future HW revisons */ |
---|
| 1589 | + [13] = PKT_HASH_TYPE_NONE, |
---|
| 1590 | + [14] = PKT_HASH_TYPE_NONE, |
---|
| 1591 | + [15] = PKT_HASH_TYPE_NONE, |
---|
| 1592 | +}; |
---|
| 1593 | + |
---|
1466 | 1594 | static inline void igc_rx_hash(struct igc_ring *ring, |
---|
1467 | 1595 | union igc_adv_rx_desc *rx_desc, |
---|
1468 | 1596 | struct sk_buff *skb) |
---|
1469 | 1597 | { |
---|
1470 | | - if (ring->netdev->features & NETIF_F_RXHASH) |
---|
1471 | | - skb_set_hash(skb, |
---|
1472 | | - le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), |
---|
1473 | | - PKT_HASH_TYPE_L3); |
---|
| 1598 | + if (ring->netdev->features & NETIF_F_RXHASH) { |
---|
| 1599 | + u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); |
---|
| 1600 | + u32 rss_type = igc_rss_type(rx_desc); |
---|
| 1601 | + |
---|
| 1602 | + skb_set_hash(skb, rss_hash, igc_rss_type_table[rss_type]); |
---|
| 1603 | + } |
---|
1474 | 1604 | } |
---|
1475 | 1605 | |
---|
1476 | 1606 | /** |
---|
.. | .. |
---|
4756 | 4886 | return false; |
---|
4757 | 4887 | |
---|
4758 | 4888 | for (n = 0; n < qopt->num_entries; n++) { |
---|
4759 | | - const struct tc_taprio_sched_entry *e; |
---|
| 4889 | + const struct tc_taprio_sched_entry *e, *prev; |
---|
4760 | 4890 | int i; |
---|
4761 | 4891 | |
---|
| 4892 | + prev = n ? &qopt->entries[n - 1] : NULL; |
---|
4762 | 4893 | e = &qopt->entries[n]; |
---|
4763 | 4894 | |
---|
4764 | 4895 | /* i225 only supports "global" frame preemption |
---|
.. | .. |
---|
4767 | 4898 | if (e->command != TC_TAPRIO_CMD_SET_GATES) |
---|
4768 | 4899 | return false; |
---|
4769 | 4900 | |
---|
4770 | | - for (i = 0; i < adapter->num_tx_queues; i++) { |
---|
4771 | | - if (e->gate_mask & BIT(i)) |
---|
| 4901 | + for (i = 0; i < adapter->num_tx_queues; i++) |
---|
| 4902 | + if (e->gate_mask & BIT(i)) { |
---|
4772 | 4903 | queue_uses[i]++; |
---|
4773 | 4904 | |
---|
4774 | | - if (queue_uses[i] > 1) |
---|
4775 | | - return false; |
---|
4776 | | - } |
---|
| 4905 | + /* There are limitations: A single queue cannot |
---|
| 4906 | + * be opened and closed multiple times per cycle |
---|
| 4907 | + * unless the gate stays open. Check for it. |
---|
| 4908 | + */ |
---|
| 4909 | + if (queue_uses[i] > 1 && |
---|
| 4910 | + !(prev->gate_mask & BIT(i))) |
---|
| 4911 | + return false; |
---|
| 4912 | + } |
---|
4777 | 4913 | } |
---|
4778 | 4914 | |
---|
4779 | 4915 | return true; |
---|
.. | .. |
---|
4798 | 4934 | static int igc_save_qbv_schedule(struct igc_adapter *adapter, |
---|
4799 | 4935 | struct tc_taprio_qopt_offload *qopt) |
---|
4800 | 4936 | { |
---|
| 4937 | + bool queue_configured[IGC_MAX_TX_QUEUES] = { }; |
---|
4801 | 4938 | u32 start_time = 0, end_time = 0; |
---|
4802 | 4939 | size_t n; |
---|
| 4940 | + int i; |
---|
4803 | 4941 | |
---|
4804 | 4942 | if (!qopt->enable) { |
---|
4805 | 4943 | adapter->base_time = 0; |
---|
4806 | 4944 | return 0; |
---|
4807 | 4945 | } |
---|
| 4946 | + |
---|
| 4947 | + if (qopt->base_time < 0) |
---|
| 4948 | + return -ERANGE; |
---|
4808 | 4949 | |
---|
4809 | 4950 | if (adapter->base_time) |
---|
4810 | 4951 | return -EALREADY; |
---|
.. | .. |
---|
4815 | 4956 | adapter->cycle_time = qopt->cycle_time; |
---|
4816 | 4957 | adapter->base_time = qopt->base_time; |
---|
4817 | 4958 | |
---|
4818 | | - /* FIXME: be a little smarter about cases when the gate for a |
---|
4819 | | - * queue stays open for more than one entry. |
---|
4820 | | - */ |
---|
4821 | 4959 | for (n = 0; n < qopt->num_entries; n++) { |
---|
4822 | 4960 | struct tc_taprio_sched_entry *e = &qopt->entries[n]; |
---|
4823 | | - int i; |
---|
4824 | 4961 | |
---|
4825 | 4962 | end_time += e->interval; |
---|
| 4963 | + |
---|
| 4964 | + /* If any of the conditions below are true, we need to manually |
---|
| 4965 | + * control the end time of the cycle. |
---|
| 4966 | + * 1. Qbv users can specify a cycle time that is not equal |
---|
| 4967 | + * to the total GCL intervals. Hence, recalculation is |
---|
| 4968 | + * necessary here to exclude the time interval that |
---|
| 4969 | + * exceeds the cycle time. |
---|
| 4970 | + * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, |
---|
| 4971 | + * once the end of the list is reached, it will switch |
---|
| 4972 | + * to the END_OF_CYCLE state and leave the gates in the |
---|
| 4973 | + * same state until the next cycle is started. |
---|
| 4974 | + */ |
---|
| 4975 | + if (end_time > adapter->cycle_time || |
---|
| 4976 | + n + 1 == qopt->num_entries) |
---|
| 4977 | + end_time = adapter->cycle_time; |
---|
4826 | 4978 | |
---|
4827 | 4979 | for (i = 0; i < adapter->num_tx_queues; i++) { |
---|
4828 | 4980 | struct igc_ring *ring = adapter->tx_ring[i]; |
---|
.. | .. |
---|
4830 | 4982 | if (!(e->gate_mask & BIT(i))) |
---|
4831 | 4983 | continue; |
---|
4832 | 4984 | |
---|
4833 | | - ring->start_time = start_time; |
---|
| 4985 | + /* Check whether a queue stays open for more than one |
---|
| 4986 | + * entry. If so, keep the start and advance the end |
---|
| 4987 | + * time. |
---|
| 4988 | + */ |
---|
| 4989 | + if (!queue_configured[i]) |
---|
| 4990 | + ring->start_time = start_time; |
---|
4834 | 4991 | ring->end_time = end_time; |
---|
| 4992 | + |
---|
| 4993 | + queue_configured[i] = true; |
---|
4835 | 4994 | } |
---|
4836 | 4995 | |
---|
4837 | 4996 | start_time += e->interval; |
---|
| 4997 | + } |
---|
| 4998 | + |
---|
| 4999 | + /* Check whether a queue gets configured. |
---|
| 5000 | + * If not, set the start and end time to be end time. |
---|
| 5001 | + */ |
---|
| 5002 | + for (i = 0; i < adapter->num_tx_queues; i++) { |
---|
| 5003 | + if (!queue_configured[i]) { |
---|
| 5004 | + struct igc_ring *ring = adapter->tx_ring[i]; |
---|
| 5005 | + |
---|
| 5006 | + ring->start_time = end_time; |
---|
| 5007 | + ring->end_time = end_time; |
---|
| 5008 | + } |
---|
4838 | 5009 | } |
---|
4839 | 5010 | |
---|
4840 | 5011 | return 0; |
---|
.. | .. |
---|
5110 | 5281 | netdev->features |= NETIF_F_TSO; |
---|
5111 | 5282 | netdev->features |= NETIF_F_TSO6; |
---|
5112 | 5283 | netdev->features |= NETIF_F_TSO_ECN; |
---|
| 5284 | + netdev->features |= NETIF_F_RXHASH; |
---|
5113 | 5285 | netdev->features |= NETIF_F_RXCSUM; |
---|
5114 | 5286 | netdev->features |= NETIF_F_HW_CSUM; |
---|
5115 | 5287 | netdev->features |= NETIF_F_SCTP_CRC; |
---|