.. | .. |
---|
232 | 232 | static void sdma_complete(struct kref *); |
---|
233 | 233 | static void sdma_finalput(struct sdma_state *); |
---|
234 | 234 | static void sdma_get(struct sdma_state *); |
---|
235 | | -static void sdma_hw_clean_up_task(unsigned long); |
---|
| 235 | +static void sdma_hw_clean_up_task(struct tasklet_struct *); |
---|
236 | 236 | static void sdma_put(struct sdma_state *); |
---|
237 | 237 | static void sdma_set_state(struct sdma_engine *, enum sdma_states); |
---|
238 | 238 | static void sdma_start_hw_clean_up(struct sdma_engine *); |
---|
239 | | -static void sdma_sw_clean_up_task(unsigned long); |
---|
| 239 | +static void sdma_sw_clean_up_task(struct tasklet_struct *); |
---|
240 | 240 | static void sdma_sendctrl(struct sdma_engine *, unsigned); |
---|
241 | 241 | static void init_sdma_regs(struct sdma_engine *, u32, uint); |
---|
242 | 242 | static void sdma_process_event( |
---|
.. | .. |
---|
379 | 379 | __sdma_txclean(sde->dd, tx); |
---|
380 | 380 | if (complete) |
---|
381 | 381 | (*complete)(tx, res); |
---|
382 | | - if (wait && iowait_sdma_dec(wait)) |
---|
| 382 | + if (iowait_sdma_dec(wait)) |
---|
383 | 383 | iowait_drain_wakeup(wait); |
---|
384 | 384 | } |
---|
385 | 385 | |
---|
.. | .. |
---|
406 | 406 | struct sdma_txreq *txp, *txp_next; |
---|
407 | 407 | LIST_HEAD(flushlist); |
---|
408 | 408 | unsigned long flags; |
---|
| 409 | + uint seq; |
---|
409 | 410 | |
---|
410 | 411 | /* flush from head to tail */ |
---|
411 | 412 | sdma_flush_descq(sde); |
---|
.. | .. |
---|
416 | 417 | /* flush from flush list */ |
---|
417 | 418 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) |
---|
418 | 419 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); |
---|
| 420 | + /* wakeup QPs orphaned on the dmawait list */ |
---|
| 421 | + do { |
---|
| 422 | + struct iowait *w, *nw; |
---|
| 423 | + |
---|
| 424 | + seq = read_seqbegin(&sde->waitlock); |
---|
| 425 | + if (!list_empty(&sde->dmawait)) { |
---|
| 426 | + write_seqlock(&sde->waitlock); |
---|
| 427 | + list_for_each_entry_safe(w, nw, &sde->dmawait, list) { |
---|
| 428 | + if (w->wakeup) { |
---|
| 429 | + w->wakeup(w, SDMA_AVAIL_REASON); |
---|
| 430 | + list_del_init(&w->list); |
---|
| 431 | + } |
---|
| 432 | + } |
---|
| 433 | + write_sequnlock(&sde->waitlock); |
---|
| 434 | + } |
---|
| 435 | + } while (read_seqretry(&sde->waitlock, seq)); |
---|
419 | 436 | } |
---|
420 | 437 | |
---|
421 | 438 | /* |
---|
.. | .. |
---|
528 | 545 | schedule_work(&sde->err_halt_worker); |
---|
529 | 546 | } |
---|
530 | 547 | |
---|
531 | | -static void sdma_hw_clean_up_task(unsigned long opaque) |
---|
| 548 | +static void sdma_hw_clean_up_task(struct tasklet_struct *t) |
---|
532 | 549 | { |
---|
533 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
---|
| 550 | + struct sdma_engine *sde = from_tasklet(sde, t, |
---|
| 551 | + sdma_hw_clean_up_task); |
---|
534 | 552 | u64 statuscsr; |
---|
535 | 553 | |
---|
536 | 554 | while (1) { |
---|
.. | .. |
---|
587 | 605 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); |
---|
588 | 606 | } |
---|
589 | 607 | |
---|
590 | | -static void sdma_sw_clean_up_task(unsigned long opaque) |
---|
| 608 | +static void sdma_sw_clean_up_task(struct tasklet_struct *t) |
---|
591 | 609 | { |
---|
592 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
---|
| 610 | + struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); |
---|
593 | 611 | unsigned long flags; |
---|
594 | 612 | |
---|
595 | 613 | spin_lock_irqsave(&sde->tail_lock, flags); |
---|
.. | .. |
---|
816 | 834 | struct sdma_rht_map_elem { |
---|
817 | 835 | u32 mask; |
---|
818 | 836 | u8 ctr; |
---|
819 | | - struct sdma_engine *sde[0]; |
---|
| 837 | + struct sdma_engine *sde[]; |
---|
820 | 838 | }; |
---|
821 | 839 | |
---|
822 | 840 | struct sdma_rht_node { |
---|
.. | .. |
---|
831 | 849 | .nelem_hint = NR_CPUS_HINT, |
---|
832 | 850 | .head_offset = offsetof(struct sdma_rht_node, node), |
---|
833 | 851 | .key_offset = offsetof(struct sdma_rht_node, cpu_id), |
---|
834 | | - .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), |
---|
| 852 | + .key_len = sizeof_field(struct sdma_rht_node, cpu_id), |
---|
835 | 853 | .max_size = NR_CPUS, |
---|
836 | 854 | .min_size = 8, |
---|
837 | 855 | .automatic_shrinking = true, |
---|
.. | .. |
---|
862 | 880 | if (current->nr_cpus_allowed != 1) |
---|
863 | 881 | goto out; |
---|
864 | 882 | |
---|
865 | | - cpu_id = smp_processor_id(); |
---|
866 | 883 | rcu_read_lock(); |
---|
867 | | - rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, |
---|
868 | | - sdma_rht_params); |
---|
| 884 | + cpu_id = smp_processor_id(); |
---|
| 885 | + rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, |
---|
| 886 | + sdma_rht_params); |
---|
869 | 887 | |
---|
870 | 888 | if (rht_node && rht_node->map[vl]) { |
---|
871 | 889 | struct sdma_rht_map_elem *map = rht_node->map[vl]; |
---|
.. | .. |
---|
1312 | 1330 | kvfree(sde->tx_ring); |
---|
1313 | 1331 | sde->tx_ring = NULL; |
---|
1314 | 1332 | } |
---|
1315 | | - spin_lock_irq(&dd->sde_map_lock); |
---|
1316 | | - sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
---|
1317 | | - RCU_INIT_POINTER(dd->sdma_map, NULL); |
---|
1318 | | - spin_unlock_irq(&dd->sde_map_lock); |
---|
1319 | | - synchronize_rcu(); |
---|
| 1333 | + if (rcu_access_pointer(dd->sdma_map)) { |
---|
| 1334 | + spin_lock_irq(&dd->sde_map_lock); |
---|
| 1335 | + sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
---|
| 1336 | + RCU_INIT_POINTER(dd->sdma_map, NULL); |
---|
| 1337 | + spin_unlock_irq(&dd->sde_map_lock); |
---|
| 1338 | + synchronize_rcu(); |
---|
| 1339 | + } |
---|
1320 | 1340 | kfree(dd->per_sdma); |
---|
1321 | 1341 | dd->per_sdma = NULL; |
---|
1322 | 1342 | |
---|
.. | .. |
---|
1421 | 1441 | seqlock_init(&sde->head_lock); |
---|
1422 | 1442 | spin_lock_init(&sde->senddmactrl_lock); |
---|
1423 | 1443 | spin_lock_init(&sde->flushlist_lock); |
---|
| 1444 | + seqlock_init(&sde->waitlock); |
---|
1424 | 1445 | /* insure there is always a zero bit */ |
---|
1425 | 1446 | sde->ahg_bits = 0xfffffffe00000000ULL; |
---|
1426 | 1447 | |
---|
.. | .. |
---|
1436 | 1457 | sde->tail_csr = |
---|
1437 | 1458 | get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); |
---|
1438 | 1459 | |
---|
1439 | | - tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, |
---|
1440 | | - (unsigned long)sde); |
---|
1441 | | - |
---|
1442 | | - tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, |
---|
1443 | | - (unsigned long)sde); |
---|
| 1460 | + tasklet_setup(&sde->sdma_hw_clean_up_task, |
---|
| 1461 | + sdma_hw_clean_up_task); |
---|
| 1462 | + tasklet_setup(&sde->sdma_sw_clean_up_task, |
---|
| 1463 | + sdma_sw_clean_up_task); |
---|
1444 | 1464 | INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); |
---|
1445 | 1465 | INIT_WORK(&sde->flush_worker, sdma_field_flush); |
---|
1446 | 1466 | |
---|
.. | .. |
---|
1449 | 1469 | timer_setup(&sde->err_progress_check_timer, |
---|
1450 | 1470 | sdma_err_progress_check, 0); |
---|
1451 | 1471 | |
---|
1452 | | - sde->descq = dma_zalloc_coherent( |
---|
1453 | | - &dd->pcidev->dev, |
---|
1454 | | - descq_cnt * sizeof(u64[2]), |
---|
1455 | | - &sde->descq_phys, |
---|
1456 | | - GFP_KERNEL |
---|
1457 | | - ); |
---|
| 1472 | + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, |
---|
| 1473 | + descq_cnt * sizeof(u64[2]), |
---|
| 1474 | + &sde->descq_phys, GFP_KERNEL); |
---|
1458 | 1475 | if (!sde->descq) |
---|
1459 | 1476 | goto bail; |
---|
1460 | 1477 | sde->tx_ring = |
---|
.. | .. |
---|
1467 | 1484 | |
---|
1468 | 1485 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; |
---|
1469 | 1486 | /* Allocate memory for DMA of head registers to memory */ |
---|
1470 | | - dd->sdma_heads_dma = dma_zalloc_coherent( |
---|
1471 | | - &dd->pcidev->dev, |
---|
1472 | | - dd->sdma_heads_size, |
---|
1473 | | - &dd->sdma_heads_phys, |
---|
1474 | | - GFP_KERNEL |
---|
1475 | | - ); |
---|
| 1487 | + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, |
---|
| 1488 | + dd->sdma_heads_size, |
---|
| 1489 | + &dd->sdma_heads_phys, |
---|
| 1490 | + GFP_KERNEL); |
---|
1476 | 1491 | if (!dd->sdma_heads_dma) { |
---|
1477 | 1492 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); |
---|
1478 | 1493 | goto bail; |
---|
1479 | 1494 | } |
---|
1480 | 1495 | |
---|
1481 | 1496 | /* Allocate memory for pad */ |
---|
1482 | | - dd->sdma_pad_dma = dma_zalloc_coherent( |
---|
1483 | | - &dd->pcidev->dev, |
---|
1484 | | - SDMA_PAD, |
---|
1485 | | - &dd->sdma_pad_phys, |
---|
1486 | | - GFP_KERNEL |
---|
1487 | | - ); |
---|
| 1497 | + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, |
---|
| 1498 | + &dd->sdma_pad_phys, GFP_KERNEL); |
---|
1488 | 1499 | if (!dd->sdma_pad_dma) { |
---|
1489 | 1500 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); |
---|
1490 | 1501 | goto bail; |
---|
.. | .. |
---|
1626 | 1637 | { |
---|
1627 | 1638 | switch (sdma_mapping_type(descp)) { |
---|
1628 | 1639 | case SDMA_MAP_SINGLE: |
---|
1629 | | - dma_unmap_single( |
---|
1630 | | - &dd->pcidev->dev, |
---|
1631 | | - sdma_mapping_addr(descp), |
---|
1632 | | - sdma_mapping_len(descp), |
---|
1633 | | - DMA_TO_DEVICE); |
---|
| 1640 | + dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp), |
---|
| 1641 | + sdma_mapping_len(descp), DMA_TO_DEVICE); |
---|
1634 | 1642 | break; |
---|
1635 | 1643 | case SDMA_MAP_PAGE: |
---|
1636 | | - dma_unmap_page( |
---|
1637 | | - &dd->pcidev->dev, |
---|
1638 | | - sdma_mapping_addr(descp), |
---|
1639 | | - sdma_mapping_len(descp), |
---|
1640 | | - DMA_TO_DEVICE); |
---|
| 1644 | + dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp), |
---|
| 1645 | + sdma_mapping_len(descp), DMA_TO_DEVICE); |
---|
1641 | 1646 | break; |
---|
1642 | 1647 | } |
---|
| 1648 | + |
---|
| 1649 | + if (descp->pinning_ctx && descp->ctx_put) |
---|
| 1650 | + descp->ctx_put(descp->pinning_ctx); |
---|
| 1651 | + descp->pinning_ctx = NULL; |
---|
1643 | 1652 | } |
---|
1644 | 1653 | |
---|
1645 | 1654 | /* |
---|
.. | .. |
---|
1755 | 1764 | */ |
---|
1756 | 1765 | static void sdma_desc_avail(struct sdma_engine *sde, uint avail) |
---|
1757 | 1766 | { |
---|
1758 | | - struct iowait *wait, *nw; |
---|
| 1767 | + struct iowait *wait, *nw, *twait; |
---|
1759 | 1768 | struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; |
---|
1760 | | - uint i, n = 0, seq, max_idx = 0; |
---|
1761 | | - struct sdma_txreq *stx; |
---|
1762 | | - struct hfi1_ibdev *dev = &sde->dd->verbs_dev; |
---|
1763 | | - u8 max_starved_cnt = 0; |
---|
| 1769 | + uint i, n = 0, seq, tidx = 0; |
---|
1764 | 1770 | |
---|
1765 | 1771 | #ifdef CONFIG_SDMA_VERBOSITY |
---|
1766 | 1772 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, |
---|
.. | .. |
---|
1769 | 1775 | #endif |
---|
1770 | 1776 | |
---|
1771 | 1777 | do { |
---|
1772 | | - seq = read_seqbegin(&dev->iowait_lock); |
---|
| 1778 | + seq = read_seqbegin(&sde->waitlock); |
---|
1773 | 1779 | if (!list_empty(&sde->dmawait)) { |
---|
1774 | 1780 | /* at least one item */ |
---|
1775 | | - write_seqlock(&dev->iowait_lock); |
---|
| 1781 | + write_seqlock(&sde->waitlock); |
---|
1776 | 1782 | /* Harvest waiters wanting DMA descriptors */ |
---|
1777 | 1783 | list_for_each_entry_safe( |
---|
1778 | 1784 | wait, |
---|
1779 | 1785 | nw, |
---|
1780 | 1786 | &sde->dmawait, |
---|
1781 | 1787 | list) { |
---|
1782 | | - u16 num_desc = 0; |
---|
| 1788 | + u32 num_desc; |
---|
1783 | 1789 | |
---|
1784 | 1790 | if (!wait->wakeup) |
---|
1785 | 1791 | continue; |
---|
1786 | 1792 | if (n == ARRAY_SIZE(waits)) |
---|
1787 | 1793 | break; |
---|
1788 | | - if (!list_empty(&wait->tx_head)) { |
---|
1789 | | - stx = list_first_entry( |
---|
1790 | | - &wait->tx_head, |
---|
1791 | | - struct sdma_txreq, |
---|
1792 | | - list); |
---|
1793 | | - num_desc = stx->num_desc; |
---|
1794 | | - } |
---|
| 1794 | + iowait_init_priority(wait); |
---|
| 1795 | + num_desc = iowait_get_all_desc(wait); |
---|
1795 | 1796 | if (num_desc > avail) |
---|
1796 | 1797 | break; |
---|
1797 | 1798 | avail -= num_desc; |
---|
1798 | | - /* Find the most starved wait memeber */ |
---|
1799 | | - iowait_starve_find_max(wait, &max_starved_cnt, |
---|
1800 | | - n, &max_idx); |
---|
| 1799 | + /* Find the top-priority wait memeber */ |
---|
| 1800 | + if (n) { |
---|
| 1801 | + twait = waits[tidx]; |
---|
| 1802 | + tidx = |
---|
| 1803 | + iowait_priority_update_top(wait, |
---|
| 1804 | + twait, |
---|
| 1805 | + n, |
---|
| 1806 | + tidx); |
---|
| 1807 | + } |
---|
1801 | 1808 | list_del_init(&wait->list); |
---|
1802 | 1809 | waits[n++] = wait; |
---|
1803 | 1810 | } |
---|
1804 | | - write_sequnlock(&dev->iowait_lock); |
---|
| 1811 | + write_sequnlock(&sde->waitlock); |
---|
1805 | 1812 | break; |
---|
1806 | 1813 | } |
---|
1807 | | - } while (read_seqretry(&dev->iowait_lock, seq)); |
---|
| 1814 | + } while (read_seqretry(&sde->waitlock, seq)); |
---|
1808 | 1815 | |
---|
1809 | | - /* Schedule the most starved one first */ |
---|
| 1816 | + /* Schedule the top-priority entry first */ |
---|
1810 | 1817 | if (n) |
---|
1811 | | - waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON); |
---|
| 1818 | + waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); |
---|
1812 | 1819 | |
---|
1813 | 1820 | for (i = 0; i < n; i++) |
---|
1814 | | - if (i != max_idx) |
---|
| 1821 | + if (i != tidx) |
---|
1815 | 1822 | waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); |
---|
1816 | 1823 | } |
---|
1817 | 1824 | |
---|
.. | .. |
---|
2346 | 2353 | */ |
---|
2347 | 2354 | static int sdma_check_progress( |
---|
2348 | 2355 | struct sdma_engine *sde, |
---|
2349 | | - struct iowait *wait, |
---|
| 2356 | + struct iowait_work *wait, |
---|
2350 | 2357 | struct sdma_txreq *tx, |
---|
2351 | 2358 | bool pkts_sent) |
---|
2352 | 2359 | { |
---|
.. | .. |
---|
2356 | 2363 | if (tx->num_desc <= sde->desc_avail) |
---|
2357 | 2364 | return -EAGAIN; |
---|
2358 | 2365 | /* pulse the head_lock */ |
---|
2359 | | - if (wait && wait->sleep) { |
---|
| 2366 | + if (wait && iowait_ioww_to_iow(wait)->sleep) { |
---|
2360 | 2367 | unsigned seq; |
---|
2361 | 2368 | |
---|
2362 | 2369 | seq = raw_seqcount_begin( |
---|
2363 | 2370 | (const seqcount_t *)&sde->head_lock.seqcount); |
---|
2364 | | - ret = wait->sleep(sde, wait, tx, seq, pkts_sent); |
---|
| 2371 | + ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); |
---|
2365 | 2372 | if (ret == -EAGAIN) |
---|
2366 | 2373 | sde->desc_avail = sdma_descq_freecnt(sde); |
---|
2367 | 2374 | } else { |
---|
.. | .. |
---|
2373 | 2380 | /** |
---|
2374 | 2381 | * sdma_send_txreq() - submit a tx req to ring |
---|
2375 | 2382 | * @sde: sdma engine to use |
---|
2376 | | - * @wait: wait structure to use when full (may be NULL) |
---|
| 2383 | + * @wait: SE wait structure to use when full (may be NULL) |
---|
2377 | 2384 | * @tx: sdma_txreq to submit |
---|
2378 | 2385 | * @pkts_sent: has any packet been sent yet? |
---|
2379 | 2386 | * |
---|
.. | .. |
---|
2386 | 2393 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
---|
2387 | 2394 | */ |
---|
2388 | 2395 | int sdma_send_txreq(struct sdma_engine *sde, |
---|
2389 | | - struct iowait *wait, |
---|
| 2396 | + struct iowait_work *wait, |
---|
2390 | 2397 | struct sdma_txreq *tx, |
---|
2391 | 2398 | bool pkts_sent) |
---|
2392 | 2399 | { |
---|
.. | .. |
---|
2397 | 2404 | /* user should have supplied entire packet */ |
---|
2398 | 2405 | if (unlikely(tx->tlen)) |
---|
2399 | 2406 | return -EINVAL; |
---|
2400 | | - tx->wait = wait; |
---|
| 2407 | + tx->wait = iowait_ioww_to_iow(wait); |
---|
2401 | 2408 | spin_lock_irqsave(&sde->tail_lock, flags); |
---|
2402 | 2409 | retry: |
---|
2403 | 2410 | if (unlikely(!__sdma_running(sde))) |
---|
.. | .. |
---|
2406 | 2413 | goto nodesc; |
---|
2407 | 2414 | tail = submit_tx(sde, tx); |
---|
2408 | 2415 | if (wait) |
---|
2409 | | - iowait_sdma_inc(wait); |
---|
| 2416 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
---|
2410 | 2417 | sdma_update_tail(sde, tail); |
---|
2411 | 2418 | unlock: |
---|
2412 | 2419 | spin_unlock_irqrestore(&sde->tail_lock, flags); |
---|
2413 | 2420 | return ret; |
---|
2414 | 2421 | unlock_noconn: |
---|
2415 | 2422 | if (wait) |
---|
2416 | | - iowait_sdma_inc(wait); |
---|
| 2423 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
---|
2417 | 2424 | tx->next_descq_idx = 0; |
---|
2418 | 2425 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
---|
2419 | 2426 | tx->sn = sde->tail_sn++; |
---|
.. | .. |
---|
2422 | 2429 | spin_lock(&sde->flushlist_lock); |
---|
2423 | 2430 | list_add_tail(&tx->list, &sde->flushlist); |
---|
2424 | 2431 | spin_unlock(&sde->flushlist_lock); |
---|
2425 | | - if (wait) { |
---|
2426 | | - wait->tx_count++; |
---|
2427 | | - wait->count += tx->num_desc; |
---|
2428 | | - } |
---|
| 2432 | + iowait_inc_wait_count(wait, tx->num_desc); |
---|
2429 | 2433 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
---|
2430 | 2434 | ret = -ECOMM; |
---|
2431 | 2435 | goto unlock; |
---|
.. | .. |
---|
2442 | 2446 | /** |
---|
2443 | 2447 | * sdma_send_txlist() - submit a list of tx req to ring |
---|
2444 | 2448 | * @sde: sdma engine to use |
---|
2445 | | - * @wait: wait structure to use when full (may be NULL) |
---|
| 2449 | + * @wait: SE wait structure to use when full (may be NULL) |
---|
2446 | 2450 | * @tx_list: list of sdma_txreqs to submit |
---|
2447 | | - * @count: pointer to a u32 which, after return will contain the total number of |
---|
| 2451 | + * @count: pointer to a u16 which, after return will contain the total number of |
---|
2448 | 2452 | * sdma_txreqs removed from the tx_list. This will include sdma_txreqs |
---|
2449 | 2453 | * whose SDMA descriptors are submitted to the ring and the sdma_txreqs |
---|
2450 | 2454 | * which are added to SDMA engine flush list if the SDMA engine state is |
---|
.. | .. |
---|
2467 | 2471 | * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) |
---|
2468 | 2472 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
---|
2469 | 2473 | */ |
---|
2470 | | -int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, |
---|
2471 | | - struct list_head *tx_list, u32 *count_out) |
---|
| 2474 | +int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, |
---|
| 2475 | + struct list_head *tx_list, u16 *count_out) |
---|
2472 | 2476 | { |
---|
2473 | 2477 | struct sdma_txreq *tx, *tx_next; |
---|
2474 | 2478 | int ret = 0; |
---|
.. | .. |
---|
2479 | 2483 | spin_lock_irqsave(&sde->tail_lock, flags); |
---|
2480 | 2484 | retry: |
---|
2481 | 2485 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
---|
2482 | | - tx->wait = wait; |
---|
| 2486 | + tx->wait = iowait_ioww_to_iow(wait); |
---|
2483 | 2487 | if (unlikely(!__sdma_running(sde))) |
---|
2484 | 2488 | goto unlock_noconn; |
---|
2485 | 2489 | if (unlikely(tx->num_desc > sde->desc_avail)) |
---|
.. | .. |
---|
2500 | 2504 | update_tail: |
---|
2501 | 2505 | total_count = submit_count + flush_count; |
---|
2502 | 2506 | if (wait) { |
---|
2503 | | - iowait_sdma_add(wait, total_count); |
---|
2504 | | - iowait_starve_clear(submit_count > 0, wait); |
---|
| 2507 | + iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); |
---|
| 2508 | + iowait_starve_clear(submit_count > 0, |
---|
| 2509 | + iowait_ioww_to_iow(wait)); |
---|
2505 | 2510 | } |
---|
2506 | 2511 | if (tail != INVALID_TAIL) |
---|
2507 | 2512 | sdma_update_tail(sde, tail); |
---|
.. | .. |
---|
2511 | 2516 | unlock_noconn: |
---|
2512 | 2517 | spin_lock(&sde->flushlist_lock); |
---|
2513 | 2518 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
---|
2514 | | - tx->wait = wait; |
---|
| 2519 | + tx->wait = iowait_ioww_to_iow(wait); |
---|
2515 | 2520 | list_del_init(&tx->list); |
---|
2516 | 2521 | tx->next_descq_idx = 0; |
---|
2517 | 2522 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
---|
.. | .. |
---|
2520 | 2525 | #endif |
---|
2521 | 2526 | list_add_tail(&tx->list, &sde->flushlist); |
---|
2522 | 2527 | flush_count++; |
---|
2523 | | - if (wait) { |
---|
2524 | | - wait->tx_count++; |
---|
2525 | | - wait->count += tx->num_desc; |
---|
2526 | | - } |
---|
| 2528 | + iowait_inc_wait_count(wait, tx->num_desc); |
---|
2527 | 2529 | } |
---|
2528 | 2530 | spin_unlock(&sde->flushlist_lock); |
---|
2529 | 2531 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
---|
.. | .. |
---|
2582 | 2584 | * 7220, e.g. |
---|
2583 | 2585 | */ |
---|
2584 | 2586 | ss->go_s99_running = 1; |
---|
2585 | | - /* fall through -- and start dma engine */ |
---|
| 2587 | + fallthrough; /* and start dma engine */ |
---|
2586 | 2588 | case sdma_event_e10_go_hw_start: |
---|
2587 | 2589 | /* This reference means the state machine is started */ |
---|
2588 | 2590 | sdma_get(&sde->state); |
---|
.. | .. |
---|
2724 | 2726 | case sdma_event_e70_go_idle: |
---|
2725 | 2727 | break; |
---|
2726 | 2728 | case sdma_event_e85_link_down: |
---|
2727 | | - /* fall through */ |
---|
2728 | 2729 | case sdma_event_e80_hw_freeze: |
---|
2729 | 2730 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
---|
2730 | 2731 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
---|
.. | .. |
---|
3005 | 3006 | case sdma_event_e60_hw_halted: |
---|
3006 | 3007 | need_progress = 1; |
---|
3007 | 3008 | sdma_err_progress_check_schedule(sde); |
---|
3008 | | - /* fall through */ |
---|
| 3009 | + fallthrough; |
---|
3009 | 3010 | case sdma_event_e90_sw_halted: |
---|
3010 | 3011 | /* |
---|
3011 | 3012 | * SW initiated halt does not perform engines |
---|
.. | .. |
---|
3019 | 3020 | break; |
---|
3020 | 3021 | case sdma_event_e85_link_down: |
---|
3021 | 3022 | ss->go_s99_running = 0; |
---|
3022 | | - /* fall through */ |
---|
| 3023 | + fallthrough; |
---|
3023 | 3024 | case sdma_event_e80_hw_freeze: |
---|
3024 | 3025 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
---|
3025 | 3026 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
---|
.. | .. |
---|
3168 | 3169 | /* Add descriptor for coalesce buffer */ |
---|
3169 | 3170 | tx->desc_limit = MAX_DESC; |
---|
3170 | 3171 | return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, |
---|
3171 | | - addr, tx->tlen); |
---|
| 3172 | + addr, tx->tlen, NULL, NULL, NULL); |
---|
3172 | 3173 | } |
---|
3173 | 3174 | |
---|
3174 | 3175 | return 1; |
---|
.. | .. |
---|
3199 | 3200 | { |
---|
3200 | 3201 | int rval = 0; |
---|
3201 | 3202 | |
---|
3202 | | - tx->num_desc++; |
---|
3203 | | - if ((unlikely(tx->num_desc == tx->desc_limit))) { |
---|
| 3203 | + if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) { |
---|
3204 | 3204 | rval = _extend_sdma_tx_descs(dd, tx); |
---|
3205 | 3205 | if (rval) { |
---|
3206 | 3206 | __sdma_txclean(dd, tx); |
---|
3207 | 3207 | return rval; |
---|
3208 | 3208 | } |
---|
3209 | 3209 | } |
---|
| 3210 | + |
---|
3210 | 3211 | /* finish the one just added */ |
---|
3211 | 3212 | make_tx_sdma_desc( |
---|
3212 | 3213 | tx, |
---|
3213 | 3214 | SDMA_MAP_NONE, |
---|
3214 | 3215 | dd->sdma_pad_phys, |
---|
3215 | | - sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); |
---|
| 3216 | + sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)), |
---|
| 3217 | + NULL, NULL, NULL); |
---|
| 3218 | + tx->num_desc++; |
---|
3216 | 3219 | _sdma_close_tx(dd, tx); |
---|
3217 | 3220 | return rval; |
---|
3218 | 3221 | } |
---|
.. | .. |
---|
3249 | 3252 | tx->num_desc++; |
---|
3250 | 3253 | tx->descs[2].qw[0] = 0; |
---|
3251 | 3254 | tx->descs[2].qw[1] = 0; |
---|
3252 | | - /* FALLTHROUGH */ |
---|
| 3255 | + fallthrough; |
---|
3253 | 3256 | case SDMA_AHG_APPLY_UPDATE2: |
---|
3254 | 3257 | tx->num_desc++; |
---|
3255 | 3258 | tx->descs[1].qw[0] = 0; |
---|