| .. | .. |
|---|
| 232 | 232 | static void sdma_complete(struct kref *); |
|---|
| 233 | 233 | static void sdma_finalput(struct sdma_state *); |
|---|
| 234 | 234 | static void sdma_get(struct sdma_state *); |
|---|
| 235 | | -static void sdma_hw_clean_up_task(unsigned long); |
|---|
| 235 | +static void sdma_hw_clean_up_task(struct tasklet_struct *); |
|---|
| 236 | 236 | static void sdma_put(struct sdma_state *); |
|---|
| 237 | 237 | static void sdma_set_state(struct sdma_engine *, enum sdma_states); |
|---|
| 238 | 238 | static void sdma_start_hw_clean_up(struct sdma_engine *); |
|---|
| 239 | | -static void sdma_sw_clean_up_task(unsigned long); |
|---|
| 239 | +static void sdma_sw_clean_up_task(struct tasklet_struct *); |
|---|
| 240 | 240 | static void sdma_sendctrl(struct sdma_engine *, unsigned); |
|---|
| 241 | 241 | static void init_sdma_regs(struct sdma_engine *, u32, uint); |
|---|
| 242 | 242 | static void sdma_process_event( |
|---|
| .. | .. |
|---|
| 379 | 379 | __sdma_txclean(sde->dd, tx); |
|---|
| 380 | 380 | if (complete) |
|---|
| 381 | 381 | (*complete)(tx, res); |
|---|
| 382 | | - if (wait && iowait_sdma_dec(wait)) |
|---|
| 382 | + if (iowait_sdma_dec(wait)) |
|---|
| 383 | 383 | iowait_drain_wakeup(wait); |
|---|
| 384 | 384 | } |
|---|
| 385 | 385 | |
|---|
| .. | .. |
|---|
| 406 | 406 | struct sdma_txreq *txp, *txp_next; |
|---|
| 407 | 407 | LIST_HEAD(flushlist); |
|---|
| 408 | 408 | unsigned long flags; |
|---|
| 409 | + uint seq; |
|---|
| 409 | 410 | |
|---|
| 410 | 411 | /* flush from head to tail */ |
|---|
| 411 | 412 | sdma_flush_descq(sde); |
|---|
| .. | .. |
|---|
| 416 | 417 | /* flush from flush list */ |
|---|
| 417 | 418 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) |
|---|
| 418 | 419 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); |
|---|
| 420 | + /* wakeup QPs orphaned on the dmawait list */ |
|---|
| 421 | + do { |
|---|
| 422 | + struct iowait *w, *nw; |
|---|
| 423 | + |
|---|
| 424 | + seq = read_seqbegin(&sde->waitlock); |
|---|
| 425 | + if (!list_empty(&sde->dmawait)) { |
|---|
| 426 | + write_seqlock(&sde->waitlock); |
|---|
| 427 | + list_for_each_entry_safe(w, nw, &sde->dmawait, list) { |
|---|
| 428 | + if (w->wakeup) { |
|---|
| 429 | + w->wakeup(w, SDMA_AVAIL_REASON); |
|---|
| 430 | + list_del_init(&w->list); |
|---|
| 431 | + } |
|---|
| 432 | + } |
|---|
| 433 | + write_sequnlock(&sde->waitlock); |
|---|
| 434 | + } |
|---|
| 435 | + } while (read_seqretry(&sde->waitlock, seq)); |
|---|
| 419 | 436 | } |
|---|
| 420 | 437 | |
|---|
| 421 | 438 | /* |
|---|
| .. | .. |
|---|
| 528 | 545 | schedule_work(&sde->err_halt_worker); |
|---|
| 529 | 546 | } |
|---|
| 530 | 547 | |
|---|
| 531 | | -static void sdma_hw_clean_up_task(unsigned long opaque) |
|---|
| 548 | +static void sdma_hw_clean_up_task(struct tasklet_struct *t) |
|---|
| 532 | 549 | { |
|---|
| 533 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
|---|
| 550 | + struct sdma_engine *sde = from_tasklet(sde, t, |
|---|
| 551 | + sdma_hw_clean_up_task); |
|---|
| 534 | 552 | u64 statuscsr; |
|---|
| 535 | 553 | |
|---|
| 536 | 554 | while (1) { |
|---|
| .. | .. |
|---|
| 587 | 605 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); |
|---|
| 588 | 606 | } |
|---|
| 589 | 607 | |
|---|
| 590 | | -static void sdma_sw_clean_up_task(unsigned long opaque) |
|---|
| 608 | +static void sdma_sw_clean_up_task(struct tasklet_struct *t) |
|---|
| 591 | 609 | { |
|---|
| 592 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
|---|
| 610 | + struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); |
|---|
| 593 | 611 | unsigned long flags; |
|---|
| 594 | 612 | |
|---|
| 595 | 613 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| .. | .. |
|---|
| 816 | 834 | struct sdma_rht_map_elem { |
|---|
| 817 | 835 | u32 mask; |
|---|
| 818 | 836 | u8 ctr; |
|---|
| 819 | | - struct sdma_engine *sde[0]; |
|---|
| 837 | + struct sdma_engine *sde[]; |
|---|
| 820 | 838 | }; |
|---|
| 821 | 839 | |
|---|
| 822 | 840 | struct sdma_rht_node { |
|---|
| .. | .. |
|---|
| 831 | 849 | .nelem_hint = NR_CPUS_HINT, |
|---|
| 832 | 850 | .head_offset = offsetof(struct sdma_rht_node, node), |
|---|
| 833 | 851 | .key_offset = offsetof(struct sdma_rht_node, cpu_id), |
|---|
| 834 | | - .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), |
|---|
| 852 | + .key_len = sizeof_field(struct sdma_rht_node, cpu_id), |
|---|
| 835 | 853 | .max_size = NR_CPUS, |
|---|
| 836 | 854 | .min_size = 8, |
|---|
| 837 | 855 | .automatic_shrinking = true, |
|---|
| .. | .. |
|---|
| 853 | 871 | { |
|---|
| 854 | 872 | struct sdma_rht_node *rht_node; |
|---|
| 855 | 873 | struct sdma_engine *sde = NULL; |
|---|
| 856 | | - const struct cpumask *current_mask = ¤t->cpus_allowed; |
|---|
| 857 | 874 | unsigned long cpu_id; |
|---|
| 858 | 875 | |
|---|
| 859 | 876 | /* |
|---|
| 860 | 877 | * To ensure that always the same sdma engine(s) will be |
|---|
| 861 | 878 | * selected make sure the process is pinned to this CPU only. |
|---|
| 862 | 879 | */ |
|---|
| 863 | | - if (cpumask_weight(current_mask) != 1) |
|---|
| 880 | + if (current->nr_cpus_allowed != 1) |
|---|
| 864 | 881 | goto out; |
|---|
| 865 | 882 | |
|---|
| 866 | | - cpu_id = smp_processor_id(); |
|---|
| 867 | 883 | rcu_read_lock(); |
|---|
| 868 | | - rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, |
|---|
| 869 | | - sdma_rht_params); |
|---|
| 884 | + cpu_id = smp_processor_id(); |
|---|
| 885 | + rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, |
|---|
| 886 | + sdma_rht_params); |
|---|
| 870 | 887 | |
|---|
| 871 | 888 | if (rht_node && rht_node->map[vl]) { |
|---|
| 872 | 889 | struct sdma_rht_map_elem *map = rht_node->map[vl]; |
|---|
| .. | .. |
|---|
| 1313 | 1330 | kvfree(sde->tx_ring); |
|---|
| 1314 | 1331 | sde->tx_ring = NULL; |
|---|
| 1315 | 1332 | } |
|---|
| 1316 | | - spin_lock_irq(&dd->sde_map_lock); |
|---|
| 1317 | | - sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
|---|
| 1318 | | - RCU_INIT_POINTER(dd->sdma_map, NULL); |
|---|
| 1319 | | - spin_unlock_irq(&dd->sde_map_lock); |
|---|
| 1320 | | - synchronize_rcu(); |
|---|
| 1333 | + if (rcu_access_pointer(dd->sdma_map)) { |
|---|
| 1334 | + spin_lock_irq(&dd->sde_map_lock); |
|---|
| 1335 | + sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
|---|
| 1336 | + RCU_INIT_POINTER(dd->sdma_map, NULL); |
|---|
| 1337 | + spin_unlock_irq(&dd->sde_map_lock); |
|---|
| 1338 | + synchronize_rcu(); |
|---|
| 1339 | + } |
|---|
| 1321 | 1340 | kfree(dd->per_sdma); |
|---|
| 1322 | 1341 | dd->per_sdma = NULL; |
|---|
| 1323 | 1342 | |
|---|
| .. | .. |
|---|
| 1422 | 1441 | seqlock_init(&sde->head_lock); |
|---|
| 1423 | 1442 | spin_lock_init(&sde->senddmactrl_lock); |
|---|
| 1424 | 1443 | spin_lock_init(&sde->flushlist_lock); |
|---|
| 1444 | + seqlock_init(&sde->waitlock); |
|---|
| 1425 | 1445 | /* insure there is always a zero bit */ |
|---|
| 1426 | 1446 | sde->ahg_bits = 0xfffffffe00000000ULL; |
|---|
| 1427 | 1447 | |
|---|
| .. | .. |
|---|
| 1437 | 1457 | sde->tail_csr = |
|---|
| 1438 | 1458 | get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); |
|---|
| 1439 | 1459 | |
|---|
| 1440 | | - tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, |
|---|
| 1441 | | - (unsigned long)sde); |
|---|
| 1442 | | - |
|---|
| 1443 | | - tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, |
|---|
| 1444 | | - (unsigned long)sde); |
|---|
| 1460 | + tasklet_setup(&sde->sdma_hw_clean_up_task, |
|---|
| 1461 | + sdma_hw_clean_up_task); |
|---|
| 1462 | + tasklet_setup(&sde->sdma_sw_clean_up_task, |
|---|
| 1463 | + sdma_sw_clean_up_task); |
|---|
| 1445 | 1464 | INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); |
|---|
| 1446 | 1465 | INIT_WORK(&sde->flush_worker, sdma_field_flush); |
|---|
| 1447 | 1466 | |
|---|
| .. | .. |
|---|
| 1450 | 1469 | timer_setup(&sde->err_progress_check_timer, |
|---|
| 1451 | 1470 | sdma_err_progress_check, 0); |
|---|
| 1452 | 1471 | |
|---|
| 1453 | | - sde->descq = dma_zalloc_coherent( |
|---|
| 1454 | | - &dd->pcidev->dev, |
|---|
| 1455 | | - descq_cnt * sizeof(u64[2]), |
|---|
| 1456 | | - &sde->descq_phys, |
|---|
| 1457 | | - GFP_KERNEL |
|---|
| 1458 | | - ); |
|---|
| 1472 | + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, |
|---|
| 1473 | + descq_cnt * sizeof(u64[2]), |
|---|
| 1474 | + &sde->descq_phys, GFP_KERNEL); |
|---|
| 1459 | 1475 | if (!sde->descq) |
|---|
| 1460 | 1476 | goto bail; |
|---|
| 1461 | 1477 | sde->tx_ring = |
|---|
| .. | .. |
|---|
| 1468 | 1484 | |
|---|
| 1469 | 1485 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; |
|---|
| 1470 | 1486 | /* Allocate memory for DMA of head registers to memory */ |
|---|
| 1471 | | - dd->sdma_heads_dma = dma_zalloc_coherent( |
|---|
| 1472 | | - &dd->pcidev->dev, |
|---|
| 1473 | | - dd->sdma_heads_size, |
|---|
| 1474 | | - &dd->sdma_heads_phys, |
|---|
| 1475 | | - GFP_KERNEL |
|---|
| 1476 | | - ); |
|---|
| 1487 | + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, |
|---|
| 1488 | + dd->sdma_heads_size, |
|---|
| 1489 | + &dd->sdma_heads_phys, |
|---|
| 1490 | + GFP_KERNEL); |
|---|
| 1477 | 1491 | if (!dd->sdma_heads_dma) { |
|---|
| 1478 | 1492 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); |
|---|
| 1479 | 1493 | goto bail; |
|---|
| 1480 | 1494 | } |
|---|
| 1481 | 1495 | |
|---|
| 1482 | 1496 | /* Allocate memory for pad */ |
|---|
| 1483 | | - dd->sdma_pad_dma = dma_zalloc_coherent( |
|---|
| 1484 | | - &dd->pcidev->dev, |
|---|
| 1485 | | - SDMA_PAD, |
|---|
| 1486 | | - &dd->sdma_pad_phys, |
|---|
| 1487 | | - GFP_KERNEL |
|---|
| 1488 | | - ); |
|---|
| 1497 | + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, |
|---|
| 1498 | + &dd->sdma_pad_phys, GFP_KERNEL); |
|---|
| 1489 | 1499 | if (!dd->sdma_pad_dma) { |
|---|
| 1490 | 1500 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); |
|---|
| 1491 | 1501 | goto bail; |
|---|
| .. | .. |
|---|
| 1627 | 1637 | { |
|---|
| 1628 | 1638 | switch (sdma_mapping_type(descp)) { |
|---|
| 1629 | 1639 | case SDMA_MAP_SINGLE: |
|---|
| 1630 | | - dma_unmap_single( |
|---|
| 1631 | | - &dd->pcidev->dev, |
|---|
| 1632 | | - sdma_mapping_addr(descp), |
|---|
| 1633 | | - sdma_mapping_len(descp), |
|---|
| 1634 | | - DMA_TO_DEVICE); |
|---|
| 1640 | + dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp), |
|---|
| 1641 | + sdma_mapping_len(descp), DMA_TO_DEVICE); |
|---|
| 1635 | 1642 | break; |
|---|
| 1636 | 1643 | case SDMA_MAP_PAGE: |
|---|
| 1637 | | - dma_unmap_page( |
|---|
| 1638 | | - &dd->pcidev->dev, |
|---|
| 1639 | | - sdma_mapping_addr(descp), |
|---|
| 1640 | | - sdma_mapping_len(descp), |
|---|
| 1641 | | - DMA_TO_DEVICE); |
|---|
| 1644 | + dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp), |
|---|
| 1645 | + sdma_mapping_len(descp), DMA_TO_DEVICE); |
|---|
| 1642 | 1646 | break; |
|---|
| 1643 | 1647 | } |
|---|
| 1648 | + |
|---|
| 1649 | + if (descp->pinning_ctx && descp->ctx_put) |
|---|
| 1650 | + descp->ctx_put(descp->pinning_ctx); |
|---|
| 1651 | + descp->pinning_ctx = NULL; |
|---|
| 1644 | 1652 | } |
|---|
| 1645 | 1653 | |
|---|
| 1646 | 1654 | /* |
|---|
| .. | .. |
|---|
| 1756 | 1764 | */ |
|---|
| 1757 | 1765 | static void sdma_desc_avail(struct sdma_engine *sde, uint avail) |
|---|
| 1758 | 1766 | { |
|---|
| 1759 | | - struct iowait *wait, *nw; |
|---|
| 1767 | + struct iowait *wait, *nw, *twait; |
|---|
| 1760 | 1768 | struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; |
|---|
| 1761 | | - uint i, n = 0, seq, max_idx = 0; |
|---|
| 1762 | | - struct sdma_txreq *stx; |
|---|
| 1763 | | - struct hfi1_ibdev *dev = &sde->dd->verbs_dev; |
|---|
| 1764 | | - u8 max_starved_cnt = 0; |
|---|
| 1769 | + uint i, n = 0, seq, tidx = 0; |
|---|
| 1765 | 1770 | |
|---|
| 1766 | 1771 | #ifdef CONFIG_SDMA_VERBOSITY |
|---|
| 1767 | 1772 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, |
|---|
| .. | .. |
|---|
| 1770 | 1775 | #endif |
|---|
| 1771 | 1776 | |
|---|
| 1772 | 1777 | do { |
|---|
| 1773 | | - seq = read_seqbegin(&dev->iowait_lock); |
|---|
| 1778 | + seq = read_seqbegin(&sde->waitlock); |
|---|
| 1774 | 1779 | if (!list_empty(&sde->dmawait)) { |
|---|
| 1775 | 1780 | /* at least one item */ |
|---|
| 1776 | | - write_seqlock(&dev->iowait_lock); |
|---|
| 1781 | + write_seqlock(&sde->waitlock); |
|---|
| 1777 | 1782 | /* Harvest waiters wanting DMA descriptors */ |
|---|
| 1778 | 1783 | list_for_each_entry_safe( |
|---|
| 1779 | 1784 | wait, |
|---|
| 1780 | 1785 | nw, |
|---|
| 1781 | 1786 | &sde->dmawait, |
|---|
| 1782 | 1787 | list) { |
|---|
| 1783 | | - u16 num_desc = 0; |
|---|
| 1788 | + u32 num_desc; |
|---|
| 1784 | 1789 | |
|---|
| 1785 | 1790 | if (!wait->wakeup) |
|---|
| 1786 | 1791 | continue; |
|---|
| 1787 | 1792 | if (n == ARRAY_SIZE(waits)) |
|---|
| 1788 | 1793 | break; |
|---|
| 1789 | | - if (!list_empty(&wait->tx_head)) { |
|---|
| 1790 | | - stx = list_first_entry( |
|---|
| 1791 | | - &wait->tx_head, |
|---|
| 1792 | | - struct sdma_txreq, |
|---|
| 1793 | | - list); |
|---|
| 1794 | | - num_desc = stx->num_desc; |
|---|
| 1795 | | - } |
|---|
| 1794 | + iowait_init_priority(wait); |
|---|
| 1795 | + num_desc = iowait_get_all_desc(wait); |
|---|
| 1796 | 1796 | if (num_desc > avail) |
|---|
| 1797 | 1797 | break; |
|---|
| 1798 | 1798 | avail -= num_desc; |
|---|
| 1799 | | - /* Find the most starved wait memeber */ |
|---|
| 1800 | | - iowait_starve_find_max(wait, &max_starved_cnt, |
|---|
| 1801 | | - n, &max_idx); |
|---|
| 1799 | + /* Find the top-priority wait memeber */ |
|---|
| 1800 | + if (n) { |
|---|
| 1801 | + twait = waits[tidx]; |
|---|
| 1802 | + tidx = |
|---|
| 1803 | + iowait_priority_update_top(wait, |
|---|
| 1804 | + twait, |
|---|
| 1805 | + n, |
|---|
| 1806 | + tidx); |
|---|
| 1807 | + } |
|---|
| 1802 | 1808 | list_del_init(&wait->list); |
|---|
| 1803 | 1809 | waits[n++] = wait; |
|---|
| 1804 | 1810 | } |
|---|
| 1805 | | - write_sequnlock(&dev->iowait_lock); |
|---|
| 1811 | + write_sequnlock(&sde->waitlock); |
|---|
| 1806 | 1812 | break; |
|---|
| 1807 | 1813 | } |
|---|
| 1808 | | - } while (read_seqretry(&dev->iowait_lock, seq)); |
|---|
| 1814 | + } while (read_seqretry(&sde->waitlock, seq)); |
|---|
| 1809 | 1815 | |
|---|
| 1810 | | - /* Schedule the most starved one first */ |
|---|
| 1816 | + /* Schedule the top-priority entry first */ |
|---|
| 1811 | 1817 | if (n) |
|---|
| 1812 | | - waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON); |
|---|
| 1818 | + waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); |
|---|
| 1813 | 1819 | |
|---|
| 1814 | 1820 | for (i = 0; i < n; i++) |
|---|
| 1815 | | - if (i != max_idx) |
|---|
| 1821 | + if (i != tidx) |
|---|
| 1816 | 1822 | waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); |
|---|
| 1817 | 1823 | } |
|---|
| 1818 | 1824 | |
|---|
| .. | .. |
|---|
| 2347 | 2353 | */ |
|---|
| 2348 | 2354 | static int sdma_check_progress( |
|---|
| 2349 | 2355 | struct sdma_engine *sde, |
|---|
| 2350 | | - struct iowait *wait, |
|---|
| 2356 | + struct iowait_work *wait, |
|---|
| 2351 | 2357 | struct sdma_txreq *tx, |
|---|
| 2352 | 2358 | bool pkts_sent) |
|---|
| 2353 | 2359 | { |
|---|
| .. | .. |
|---|
| 2357 | 2363 | if (tx->num_desc <= sde->desc_avail) |
|---|
| 2358 | 2364 | return -EAGAIN; |
|---|
| 2359 | 2365 | /* pulse the head_lock */ |
|---|
| 2360 | | - if (wait && wait->sleep) { |
|---|
| 2366 | + if (wait && iowait_ioww_to_iow(wait)->sleep) { |
|---|
| 2361 | 2367 | unsigned seq; |
|---|
| 2362 | 2368 | |
|---|
| 2363 | 2369 | seq = raw_seqcount_begin( |
|---|
| 2364 | 2370 | (const seqcount_t *)&sde->head_lock.seqcount); |
|---|
| 2365 | | - ret = wait->sleep(sde, wait, tx, seq, pkts_sent); |
|---|
| 2371 | + ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); |
|---|
| 2366 | 2372 | if (ret == -EAGAIN) |
|---|
| 2367 | 2373 | sde->desc_avail = sdma_descq_freecnt(sde); |
|---|
| 2368 | 2374 | } else { |
|---|
| .. | .. |
|---|
| 2374 | 2380 | /** |
|---|
| 2375 | 2381 | * sdma_send_txreq() - submit a tx req to ring |
|---|
| 2376 | 2382 | * @sde: sdma engine to use |
|---|
| 2377 | | - * @wait: wait structure to use when full (may be NULL) |
|---|
| 2383 | + * @wait: SE wait structure to use when full (may be NULL) |
|---|
| 2378 | 2384 | * @tx: sdma_txreq to submit |
|---|
| 2379 | 2385 | * @pkts_sent: has any packet been sent yet? |
|---|
| 2380 | 2386 | * |
|---|
| .. | .. |
|---|
| 2387 | 2393 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
|---|
| 2388 | 2394 | */ |
|---|
| 2389 | 2395 | int sdma_send_txreq(struct sdma_engine *sde, |
|---|
| 2390 | | - struct iowait *wait, |
|---|
| 2396 | + struct iowait_work *wait, |
|---|
| 2391 | 2397 | struct sdma_txreq *tx, |
|---|
| 2392 | 2398 | bool pkts_sent) |
|---|
| 2393 | 2399 | { |
|---|
| .. | .. |
|---|
| 2398 | 2404 | /* user should have supplied entire packet */ |
|---|
| 2399 | 2405 | if (unlikely(tx->tlen)) |
|---|
| 2400 | 2406 | return -EINVAL; |
|---|
| 2401 | | - tx->wait = wait; |
|---|
| 2407 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2402 | 2408 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| 2403 | 2409 | retry: |
|---|
| 2404 | 2410 | if (unlikely(!__sdma_running(sde))) |
|---|
| .. | .. |
|---|
| 2407 | 2413 | goto nodesc; |
|---|
| 2408 | 2414 | tail = submit_tx(sde, tx); |
|---|
| 2409 | 2415 | if (wait) |
|---|
| 2410 | | - iowait_sdma_inc(wait); |
|---|
| 2416 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
|---|
| 2411 | 2417 | sdma_update_tail(sde, tail); |
|---|
| 2412 | 2418 | unlock: |
|---|
| 2413 | 2419 | spin_unlock_irqrestore(&sde->tail_lock, flags); |
|---|
| 2414 | 2420 | return ret; |
|---|
| 2415 | 2421 | unlock_noconn: |
|---|
| 2416 | 2422 | if (wait) |
|---|
| 2417 | | - iowait_sdma_inc(wait); |
|---|
| 2423 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
|---|
| 2418 | 2424 | tx->next_descq_idx = 0; |
|---|
| 2419 | 2425 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
|---|
| 2420 | 2426 | tx->sn = sde->tail_sn++; |
|---|
| .. | .. |
|---|
| 2423 | 2429 | spin_lock(&sde->flushlist_lock); |
|---|
| 2424 | 2430 | list_add_tail(&tx->list, &sde->flushlist); |
|---|
| 2425 | 2431 | spin_unlock(&sde->flushlist_lock); |
|---|
| 2426 | | - if (wait) { |
|---|
| 2427 | | - wait->tx_count++; |
|---|
| 2428 | | - wait->count += tx->num_desc; |
|---|
| 2429 | | - } |
|---|
| 2432 | + iowait_inc_wait_count(wait, tx->num_desc); |
|---|
| 2430 | 2433 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
|---|
| 2431 | 2434 | ret = -ECOMM; |
|---|
| 2432 | 2435 | goto unlock; |
|---|
| .. | .. |
|---|
| 2443 | 2446 | /** |
|---|
| 2444 | 2447 | * sdma_send_txlist() - submit a list of tx req to ring |
|---|
| 2445 | 2448 | * @sde: sdma engine to use |
|---|
| 2446 | | - * @wait: wait structure to use when full (may be NULL) |
|---|
| 2449 | + * @wait: SE wait structure to use when full (may be NULL) |
|---|
| 2447 | 2450 | * @tx_list: list of sdma_txreqs to submit |
|---|
| 2448 | | - * @count: pointer to a u32 which, after return will contain the total number of |
|---|
| 2451 | + * @count: pointer to a u16 which, after return will contain the total number of |
|---|
| 2449 | 2452 | * sdma_txreqs removed from the tx_list. This will include sdma_txreqs |
|---|
| 2450 | 2453 | * whose SDMA descriptors are submitted to the ring and the sdma_txreqs |
|---|
| 2451 | 2454 | * which are added to SDMA engine flush list if the SDMA engine state is |
|---|
| .. | .. |
|---|
| 2468 | 2471 | * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) |
|---|
| 2469 | 2472 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
|---|
| 2470 | 2473 | */ |
|---|
| 2471 | | -int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, |
|---|
| 2472 | | - struct list_head *tx_list, u32 *count_out) |
|---|
| 2474 | +int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, |
|---|
| 2475 | + struct list_head *tx_list, u16 *count_out) |
|---|
| 2473 | 2476 | { |
|---|
| 2474 | 2477 | struct sdma_txreq *tx, *tx_next; |
|---|
| 2475 | 2478 | int ret = 0; |
|---|
| .. | .. |
|---|
| 2480 | 2483 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| 2481 | 2484 | retry: |
|---|
| 2482 | 2485 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
|---|
| 2483 | | - tx->wait = wait; |
|---|
| 2486 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2484 | 2487 | if (unlikely(!__sdma_running(sde))) |
|---|
| 2485 | 2488 | goto unlock_noconn; |
|---|
| 2486 | 2489 | if (unlikely(tx->num_desc > sde->desc_avail)) |
|---|
| .. | .. |
|---|
| 2501 | 2504 | update_tail: |
|---|
| 2502 | 2505 | total_count = submit_count + flush_count; |
|---|
| 2503 | 2506 | if (wait) { |
|---|
| 2504 | | - iowait_sdma_add(wait, total_count); |
|---|
| 2505 | | - iowait_starve_clear(submit_count > 0, wait); |
|---|
| 2507 | + iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); |
|---|
| 2508 | + iowait_starve_clear(submit_count > 0, |
|---|
| 2509 | + iowait_ioww_to_iow(wait)); |
|---|
| 2506 | 2510 | } |
|---|
| 2507 | 2511 | if (tail != INVALID_TAIL) |
|---|
| 2508 | 2512 | sdma_update_tail(sde, tail); |
|---|
| .. | .. |
|---|
| 2512 | 2516 | unlock_noconn: |
|---|
| 2513 | 2517 | spin_lock(&sde->flushlist_lock); |
|---|
| 2514 | 2518 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
|---|
| 2515 | | - tx->wait = wait; |
|---|
| 2519 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2516 | 2520 | list_del_init(&tx->list); |
|---|
| 2517 | 2521 | tx->next_descq_idx = 0; |
|---|
| 2518 | 2522 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
|---|
| .. | .. |
|---|
| 2521 | 2525 | #endif |
|---|
| 2522 | 2526 | list_add_tail(&tx->list, &sde->flushlist); |
|---|
| 2523 | 2527 | flush_count++; |
|---|
| 2524 | | - if (wait) { |
|---|
| 2525 | | - wait->tx_count++; |
|---|
| 2526 | | - wait->count += tx->num_desc; |
|---|
| 2527 | | - } |
|---|
| 2528 | + iowait_inc_wait_count(wait, tx->num_desc); |
|---|
| 2528 | 2529 | } |
|---|
| 2529 | 2530 | spin_unlock(&sde->flushlist_lock); |
|---|
| 2530 | 2531 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
|---|
| .. | .. |
|---|
| 2583 | 2584 | * 7220, e.g. |
|---|
| 2584 | 2585 | */ |
|---|
| 2585 | 2586 | ss->go_s99_running = 1; |
|---|
| 2586 | | - /* fall through -- and start dma engine */ |
|---|
| 2587 | + fallthrough; /* and start dma engine */ |
|---|
| 2587 | 2588 | case sdma_event_e10_go_hw_start: |
|---|
| 2588 | 2589 | /* This reference means the state machine is started */ |
|---|
| 2589 | 2590 | sdma_get(&sde->state); |
|---|
| .. | .. |
|---|
| 2725 | 2726 | case sdma_event_e70_go_idle: |
|---|
| 2726 | 2727 | break; |
|---|
| 2727 | 2728 | case sdma_event_e85_link_down: |
|---|
| 2728 | | - /* fall through */ |
|---|
| 2729 | 2729 | case sdma_event_e80_hw_freeze: |
|---|
| 2730 | 2730 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
|---|
| 2731 | 2731 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
|---|
| .. | .. |
|---|
| 3006 | 3006 | case sdma_event_e60_hw_halted: |
|---|
| 3007 | 3007 | need_progress = 1; |
|---|
| 3008 | 3008 | sdma_err_progress_check_schedule(sde); |
|---|
| 3009 | | - /* fall through */ |
|---|
| 3009 | + fallthrough; |
|---|
| 3010 | 3010 | case sdma_event_e90_sw_halted: |
|---|
| 3011 | 3011 | /* |
|---|
| 3012 | 3012 | * SW initiated halt does not perform engines |
|---|
| .. | .. |
|---|
| 3020 | 3020 | break; |
|---|
| 3021 | 3021 | case sdma_event_e85_link_down: |
|---|
| 3022 | 3022 | ss->go_s99_running = 0; |
|---|
| 3023 | | - /* fall through */ |
|---|
| 3023 | + fallthrough; |
|---|
| 3024 | 3024 | case sdma_event_e80_hw_freeze: |
|---|
| 3025 | 3025 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
|---|
| 3026 | 3026 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
|---|
| .. | .. |
|---|
| 3169 | 3169 | /* Add descriptor for coalesce buffer */ |
|---|
| 3170 | 3170 | tx->desc_limit = MAX_DESC; |
|---|
| 3171 | 3171 | return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, |
|---|
| 3172 | | - addr, tx->tlen); |
|---|
| 3172 | + addr, tx->tlen, NULL, NULL, NULL); |
|---|
| 3173 | 3173 | } |
|---|
| 3174 | 3174 | |
|---|
| 3175 | 3175 | return 1; |
|---|
| .. | .. |
|---|
| 3200 | 3200 | { |
|---|
| 3201 | 3201 | int rval = 0; |
|---|
| 3202 | 3202 | |
|---|
| 3203 | | - tx->num_desc++; |
|---|
| 3204 | | - if ((unlikely(tx->num_desc == tx->desc_limit))) { |
|---|
| 3203 | + if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { |
|---|
| 3205 | 3204 | rval = _extend_sdma_tx_descs(dd, tx); |
|---|
| 3206 | 3205 | if (rval) { |
|---|
| 3207 | 3206 | __sdma_txclean(dd, tx); |
|---|
| 3208 | 3207 | return rval; |
|---|
| 3209 | 3208 | } |
|---|
| 3210 | 3209 | } |
|---|
| 3210 | + |
|---|
| 3211 | 3211 | /* finish the one just added */ |
|---|
| 3212 | 3212 | make_tx_sdma_desc( |
|---|
| 3213 | 3213 | tx, |
|---|
| 3214 | 3214 | SDMA_MAP_NONE, |
|---|
| 3215 | 3215 | dd->sdma_pad_phys, |
|---|
| 3216 | | - sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); |
|---|
| 3216 | + sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)), |
|---|
| 3217 | + NULL, NULL, NULL); |
|---|
| 3218 | + tx->num_desc++; |
|---|
| 3217 | 3219 | _sdma_close_tx(dd, tx); |
|---|
| 3218 | 3220 | return rval; |
|---|
| 3219 | 3221 | } |
|---|
| .. | .. |
|---|
| 3250 | 3252 | tx->num_desc++; |
|---|
| 3251 | 3253 | tx->descs[2].qw[0] = 0; |
|---|
| 3252 | 3254 | tx->descs[2].qw[1] = 0; |
|---|
| 3253 | | - /* FALLTHROUGH */ |
|---|
| 3255 | + fallthrough; |
|---|
| 3254 | 3256 | case SDMA_AHG_APPLY_UPDATE2: |
|---|
| 3255 | 3257 | tx->num_desc++; |
|---|
| 3256 | 3258 | tx->descs[1].qw[0] = 0; |
|---|