| .. | .. |
|---|
| 232 | 232 | static void sdma_complete(struct kref *); |
|---|
| 233 | 233 | static void sdma_finalput(struct sdma_state *); |
|---|
| 234 | 234 | static void sdma_get(struct sdma_state *); |
|---|
| 235 | | -static void sdma_hw_clean_up_task(unsigned long); |
|---|
| 235 | +static void sdma_hw_clean_up_task(struct tasklet_struct *); |
|---|
| 236 | 236 | static void sdma_put(struct sdma_state *); |
|---|
| 237 | 237 | static void sdma_set_state(struct sdma_engine *, enum sdma_states); |
|---|
| 238 | 238 | static void sdma_start_hw_clean_up(struct sdma_engine *); |
|---|
| 239 | | -static void sdma_sw_clean_up_task(unsigned long); |
|---|
| 239 | +static void sdma_sw_clean_up_task(struct tasklet_struct *); |
|---|
| 240 | 240 | static void sdma_sendctrl(struct sdma_engine *, unsigned); |
|---|
| 241 | 241 | static void init_sdma_regs(struct sdma_engine *, u32, uint); |
|---|
| 242 | 242 | static void sdma_process_event( |
|---|
| .. | .. |
|---|
| 379 | 379 | __sdma_txclean(sde->dd, tx); |
|---|
| 380 | 380 | if (complete) |
|---|
| 381 | 381 | (*complete)(tx, res); |
|---|
| 382 | | - if (wait && iowait_sdma_dec(wait)) |
|---|
| 382 | + if (iowait_sdma_dec(wait)) |
|---|
| 383 | 383 | iowait_drain_wakeup(wait); |
|---|
| 384 | 384 | } |
|---|
| 385 | 385 | |
|---|
| .. | .. |
|---|
| 406 | 406 | struct sdma_txreq *txp, *txp_next; |
|---|
| 407 | 407 | LIST_HEAD(flushlist); |
|---|
| 408 | 408 | unsigned long flags; |
|---|
| 409 | + uint seq; |
|---|
| 409 | 410 | |
|---|
| 410 | 411 | /* flush from head to tail */ |
|---|
| 411 | 412 | sdma_flush_descq(sde); |
|---|
| .. | .. |
|---|
| 416 | 417 | /* flush from flush list */ |
|---|
| 417 | 418 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) |
|---|
| 418 | 419 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); |
|---|
| 420 | + /* wakeup QPs orphaned on the dmawait list */ |
|---|
| 421 | + do { |
|---|
| 422 | + struct iowait *w, *nw; |
|---|
| 423 | + |
|---|
| 424 | + seq = read_seqbegin(&sde->waitlock); |
|---|
| 425 | + if (!list_empty(&sde->dmawait)) { |
|---|
| 426 | + write_seqlock(&sde->waitlock); |
|---|
| 427 | + list_for_each_entry_safe(w, nw, &sde->dmawait, list) { |
|---|
| 428 | + if (w->wakeup) { |
|---|
| 429 | + w->wakeup(w, SDMA_AVAIL_REASON); |
|---|
| 430 | + list_del_init(&w->list); |
|---|
| 431 | + } |
|---|
| 432 | + } |
|---|
| 433 | + write_sequnlock(&sde->waitlock); |
|---|
| 434 | + } |
|---|
| 435 | + } while (read_seqretry(&sde->waitlock, seq)); |
|---|
| 419 | 436 | } |
|---|
| 420 | 437 | |
|---|
| 421 | 438 | /* |
|---|
| .. | .. |
|---|
| 528 | 545 | schedule_work(&sde->err_halt_worker); |
|---|
| 529 | 546 | } |
|---|
| 530 | 547 | |
|---|
| 531 | | -static void sdma_hw_clean_up_task(unsigned long opaque) |
|---|
| 548 | +static void sdma_hw_clean_up_task(struct tasklet_struct *t) |
|---|
| 532 | 549 | { |
|---|
| 533 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
|---|
| 550 | + struct sdma_engine *sde = from_tasklet(sde, t, |
|---|
| 551 | + sdma_hw_clean_up_task); |
|---|
| 534 | 552 | u64 statuscsr; |
|---|
| 535 | 553 | |
|---|
| 536 | 554 | while (1) { |
|---|
| .. | .. |
|---|
| 587 | 605 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); |
|---|
| 588 | 606 | } |
|---|
| 589 | 607 | |
|---|
| 590 | | -static void sdma_sw_clean_up_task(unsigned long opaque) |
|---|
| 608 | +static void sdma_sw_clean_up_task(struct tasklet_struct *t) |
|---|
| 591 | 609 | { |
|---|
| 592 | | - struct sdma_engine *sde = (struct sdma_engine *)opaque; |
|---|
| 610 | + struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task); |
|---|
| 593 | 611 | unsigned long flags; |
|---|
| 594 | 612 | |
|---|
| 595 | 613 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| .. | .. |
|---|
| 816 | 834 | struct sdma_rht_map_elem { |
|---|
| 817 | 835 | u32 mask; |
|---|
| 818 | 836 | u8 ctr; |
|---|
| 819 | | - struct sdma_engine *sde[0]; |
|---|
| 837 | + struct sdma_engine *sde[]; |
|---|
| 820 | 838 | }; |
|---|
| 821 | 839 | |
|---|
| 822 | 840 | struct sdma_rht_node { |
|---|
| .. | .. |
|---|
| 831 | 849 | .nelem_hint = NR_CPUS_HINT, |
|---|
| 832 | 850 | .head_offset = offsetof(struct sdma_rht_node, node), |
|---|
| 833 | 851 | .key_offset = offsetof(struct sdma_rht_node, cpu_id), |
|---|
| 834 | | - .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), |
|---|
| 852 | + .key_len = sizeof_field(struct sdma_rht_node, cpu_id), |
|---|
| 835 | 853 | .max_size = NR_CPUS, |
|---|
| 836 | 854 | .min_size = 8, |
|---|
| 837 | 855 | .automatic_shrinking = true, |
|---|
| .. | .. |
|---|
| 853 | 871 | { |
|---|
| 854 | 872 | struct sdma_rht_node *rht_node; |
|---|
| 855 | 873 | struct sdma_engine *sde = NULL; |
|---|
| 856 | | - const struct cpumask *current_mask = ¤t->cpus_allowed; |
|---|
| 857 | 874 | unsigned long cpu_id; |
|---|
| 858 | 875 | |
|---|
| 859 | 876 | /* |
|---|
| 860 | 877 | * To ensure that always the same sdma engine(s) will be |
|---|
| 861 | 878 | * selected make sure the process is pinned to this CPU only. |
|---|
| 862 | 879 | */ |
|---|
| 863 | | - if (cpumask_weight(current_mask) != 1) |
|---|
| 880 | + if (current->nr_cpus_allowed != 1) |
|---|
| 864 | 881 | goto out; |
|---|
| 865 | 882 | |
|---|
| 866 | | - cpu_id = smp_processor_id(); |
|---|
| 867 | 883 | rcu_read_lock(); |
|---|
| 868 | | - rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id, |
|---|
| 869 | | - sdma_rht_params); |
|---|
| 884 | + cpu_id = smp_processor_id(); |
|---|
| 885 | + rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, |
|---|
| 886 | + sdma_rht_params); |
|---|
| 870 | 887 | |
|---|
| 871 | 888 | if (rht_node && rht_node->map[vl]) { |
|---|
| 872 | 889 | struct sdma_rht_map_elem *map = rht_node->map[vl]; |
|---|
| .. | .. |
|---|
| 1313 | 1330 | kvfree(sde->tx_ring); |
|---|
| 1314 | 1331 | sde->tx_ring = NULL; |
|---|
| 1315 | 1332 | } |
|---|
| 1316 | | - spin_lock_irq(&dd->sde_map_lock); |
|---|
| 1317 | | - sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
|---|
| 1318 | | - RCU_INIT_POINTER(dd->sdma_map, NULL); |
|---|
| 1319 | | - spin_unlock_irq(&dd->sde_map_lock); |
|---|
| 1320 | | - synchronize_rcu(); |
|---|
| 1333 | + if (rcu_access_pointer(dd->sdma_map)) { |
|---|
| 1334 | + spin_lock_irq(&dd->sde_map_lock); |
|---|
| 1335 | + sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
|---|
| 1336 | + RCU_INIT_POINTER(dd->sdma_map, NULL); |
|---|
| 1337 | + spin_unlock_irq(&dd->sde_map_lock); |
|---|
| 1338 | + synchronize_rcu(); |
|---|
| 1339 | + } |
|---|
| 1321 | 1340 | kfree(dd->per_sdma); |
|---|
| 1322 | 1341 | dd->per_sdma = NULL; |
|---|
| 1323 | 1342 | |
|---|
| .. | .. |
|---|
| 1422 | 1441 | seqlock_init(&sde->head_lock); |
|---|
| 1423 | 1442 | spin_lock_init(&sde->senddmactrl_lock); |
|---|
| 1424 | 1443 | spin_lock_init(&sde->flushlist_lock); |
|---|
| 1444 | + seqlock_init(&sde->waitlock); |
|---|
| 1425 | 1445 | /* insure there is always a zero bit */ |
|---|
| 1426 | 1446 | sde->ahg_bits = 0xfffffffe00000000ULL; |
|---|
| 1427 | 1447 | |
|---|
| .. | .. |
|---|
| 1437 | 1457 | sde->tail_csr = |
|---|
| 1438 | 1458 | get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); |
|---|
| 1439 | 1459 | |
|---|
| 1440 | | - tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, |
|---|
| 1441 | | - (unsigned long)sde); |
|---|
| 1442 | | - |
|---|
| 1443 | | - tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, |
|---|
| 1444 | | - (unsigned long)sde); |
|---|
| 1460 | + tasklet_setup(&sde->sdma_hw_clean_up_task, |
|---|
| 1461 | + sdma_hw_clean_up_task); |
|---|
| 1462 | + tasklet_setup(&sde->sdma_sw_clean_up_task, |
|---|
| 1463 | + sdma_sw_clean_up_task); |
|---|
| 1445 | 1464 | INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); |
|---|
| 1446 | 1465 | INIT_WORK(&sde->flush_worker, sdma_field_flush); |
|---|
| 1447 | 1466 | |
|---|
| .. | .. |
|---|
| 1450 | 1469 | timer_setup(&sde->err_progress_check_timer, |
|---|
| 1451 | 1470 | sdma_err_progress_check, 0); |
|---|
| 1452 | 1471 | |
|---|
| 1453 | | - sde->descq = dma_zalloc_coherent( |
|---|
| 1454 | | - &dd->pcidev->dev, |
|---|
| 1455 | | - descq_cnt * sizeof(u64[2]), |
|---|
| 1456 | | - &sde->descq_phys, |
|---|
| 1457 | | - GFP_KERNEL |
|---|
| 1458 | | - ); |
|---|
| 1472 | + sde->descq = dma_alloc_coherent(&dd->pcidev->dev, |
|---|
| 1473 | + descq_cnt * sizeof(u64[2]), |
|---|
| 1474 | + &sde->descq_phys, GFP_KERNEL); |
|---|
| 1459 | 1475 | if (!sde->descq) |
|---|
| 1460 | 1476 | goto bail; |
|---|
| 1461 | 1477 | sde->tx_ring = |
|---|
| .. | .. |
|---|
| 1468 | 1484 | |
|---|
| 1469 | 1485 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; |
|---|
| 1470 | 1486 | /* Allocate memory for DMA of head registers to memory */ |
|---|
| 1471 | | - dd->sdma_heads_dma = dma_zalloc_coherent( |
|---|
| 1472 | | - &dd->pcidev->dev, |
|---|
| 1473 | | - dd->sdma_heads_size, |
|---|
| 1474 | | - &dd->sdma_heads_phys, |
|---|
| 1475 | | - GFP_KERNEL |
|---|
| 1476 | | - ); |
|---|
| 1487 | + dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, |
|---|
| 1488 | + dd->sdma_heads_size, |
|---|
| 1489 | + &dd->sdma_heads_phys, |
|---|
| 1490 | + GFP_KERNEL); |
|---|
| 1477 | 1491 | if (!dd->sdma_heads_dma) { |
|---|
| 1478 | 1492 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); |
|---|
| 1479 | 1493 | goto bail; |
|---|
| 1480 | 1494 | } |
|---|
| 1481 | 1495 | |
|---|
| 1482 | 1496 | /* Allocate memory for pad */ |
|---|
| 1483 | | - dd->sdma_pad_dma = dma_zalloc_coherent( |
|---|
| 1484 | | - &dd->pcidev->dev, |
|---|
| 1485 | | - SDMA_PAD, |
|---|
| 1486 | | - &dd->sdma_pad_phys, |
|---|
| 1487 | | - GFP_KERNEL |
|---|
| 1488 | | - ); |
|---|
| 1497 | + dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, |
|---|
| 1498 | + &dd->sdma_pad_phys, GFP_KERNEL); |
|---|
| 1489 | 1499 | if (!dd->sdma_pad_dma) { |
|---|
| 1490 | 1500 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); |
|---|
| 1491 | 1501 | goto bail; |
|---|
| .. | .. |
|---|
| 1756 | 1766 | */ |
|---|
| 1757 | 1767 | static void sdma_desc_avail(struct sdma_engine *sde, uint avail) |
|---|
| 1758 | 1768 | { |
|---|
| 1759 | | - struct iowait *wait, *nw; |
|---|
| 1769 | + struct iowait *wait, *nw, *twait; |
|---|
| 1760 | 1770 | struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; |
|---|
| 1761 | | - uint i, n = 0, seq, max_idx = 0; |
|---|
| 1762 | | - struct sdma_txreq *stx; |
|---|
| 1763 | | - struct hfi1_ibdev *dev = &sde->dd->verbs_dev; |
|---|
| 1764 | | - u8 max_starved_cnt = 0; |
|---|
| 1771 | + uint i, n = 0, seq, tidx = 0; |
|---|
| 1765 | 1772 | |
|---|
| 1766 | 1773 | #ifdef CONFIG_SDMA_VERBOSITY |
|---|
| 1767 | 1774 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, |
|---|
| .. | .. |
|---|
| 1770 | 1777 | #endif |
|---|
| 1771 | 1778 | |
|---|
| 1772 | 1779 | do { |
|---|
| 1773 | | - seq = read_seqbegin(&dev->iowait_lock); |
|---|
| 1780 | + seq = read_seqbegin(&sde->waitlock); |
|---|
| 1774 | 1781 | if (!list_empty(&sde->dmawait)) { |
|---|
| 1775 | 1782 | /* at least one item */ |
|---|
| 1776 | | - write_seqlock(&dev->iowait_lock); |
|---|
| 1783 | + write_seqlock(&sde->waitlock); |
|---|
| 1777 | 1784 | /* Harvest waiters wanting DMA descriptors */ |
|---|
| 1778 | 1785 | list_for_each_entry_safe( |
|---|
| 1779 | 1786 | wait, |
|---|
| 1780 | 1787 | nw, |
|---|
| 1781 | 1788 | &sde->dmawait, |
|---|
| 1782 | 1789 | list) { |
|---|
| 1783 | | - u16 num_desc = 0; |
|---|
| 1790 | + u32 num_desc; |
|---|
| 1784 | 1791 | |
|---|
| 1785 | 1792 | if (!wait->wakeup) |
|---|
| 1786 | 1793 | continue; |
|---|
| 1787 | 1794 | if (n == ARRAY_SIZE(waits)) |
|---|
| 1788 | 1795 | break; |
|---|
| 1789 | | - if (!list_empty(&wait->tx_head)) { |
|---|
| 1790 | | - stx = list_first_entry( |
|---|
| 1791 | | - &wait->tx_head, |
|---|
| 1792 | | - struct sdma_txreq, |
|---|
| 1793 | | - list); |
|---|
| 1794 | | - num_desc = stx->num_desc; |
|---|
| 1795 | | - } |
|---|
| 1796 | + iowait_init_priority(wait); |
|---|
| 1797 | + num_desc = iowait_get_all_desc(wait); |
|---|
| 1796 | 1798 | if (num_desc > avail) |
|---|
| 1797 | 1799 | break; |
|---|
| 1798 | 1800 | avail -= num_desc; |
|---|
| 1799 | | - /* Find the most starved wait memeber */ |
|---|
| 1800 | | - iowait_starve_find_max(wait, &max_starved_cnt, |
|---|
| 1801 | | - n, &max_idx); |
|---|
| 1801 | + /* Find the top-priority wait memeber */ |
|---|
| 1802 | + if (n) { |
|---|
| 1803 | + twait = waits[tidx]; |
|---|
| 1804 | + tidx = |
|---|
| 1805 | + iowait_priority_update_top(wait, |
|---|
| 1806 | + twait, |
|---|
| 1807 | + n, |
|---|
| 1808 | + tidx); |
|---|
| 1809 | + } |
|---|
| 1802 | 1810 | list_del_init(&wait->list); |
|---|
| 1803 | 1811 | waits[n++] = wait; |
|---|
| 1804 | 1812 | } |
|---|
| 1805 | | - write_sequnlock(&dev->iowait_lock); |
|---|
| 1813 | + write_sequnlock(&sde->waitlock); |
|---|
| 1806 | 1814 | break; |
|---|
| 1807 | 1815 | } |
|---|
| 1808 | | - } while (read_seqretry(&dev->iowait_lock, seq)); |
|---|
| 1816 | + } while (read_seqretry(&sde->waitlock, seq)); |
|---|
| 1809 | 1817 | |
|---|
| 1810 | | - /* Schedule the most starved one first */ |
|---|
| 1818 | + /* Schedule the top-priority entry first */ |
|---|
| 1811 | 1819 | if (n) |
|---|
| 1812 | | - waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON); |
|---|
| 1820 | + waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); |
|---|
| 1813 | 1821 | |
|---|
| 1814 | 1822 | for (i = 0; i < n; i++) |
|---|
| 1815 | | - if (i != max_idx) |
|---|
| 1823 | + if (i != tidx) |
|---|
| 1816 | 1824 | waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); |
|---|
| 1817 | 1825 | } |
|---|
| 1818 | 1826 | |
|---|
| .. | .. |
|---|
| 2347 | 2355 | */ |
|---|
| 2348 | 2356 | static int sdma_check_progress( |
|---|
| 2349 | 2357 | struct sdma_engine *sde, |
|---|
| 2350 | | - struct iowait *wait, |
|---|
| 2358 | + struct iowait_work *wait, |
|---|
| 2351 | 2359 | struct sdma_txreq *tx, |
|---|
| 2352 | 2360 | bool pkts_sent) |
|---|
| 2353 | 2361 | { |
|---|
| .. | .. |
|---|
| 2357 | 2365 | if (tx->num_desc <= sde->desc_avail) |
|---|
| 2358 | 2366 | return -EAGAIN; |
|---|
| 2359 | 2367 | /* pulse the head_lock */ |
|---|
| 2360 | | - if (wait && wait->sleep) { |
|---|
| 2368 | + if (wait && iowait_ioww_to_iow(wait)->sleep) { |
|---|
| 2361 | 2369 | unsigned seq; |
|---|
| 2362 | 2370 | |
|---|
| 2363 | 2371 | seq = raw_seqcount_begin( |
|---|
| 2364 | 2372 | (const seqcount_t *)&sde->head_lock.seqcount); |
|---|
| 2365 | | - ret = wait->sleep(sde, wait, tx, seq, pkts_sent); |
|---|
| 2373 | + ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); |
|---|
| 2366 | 2374 | if (ret == -EAGAIN) |
|---|
| 2367 | 2375 | sde->desc_avail = sdma_descq_freecnt(sde); |
|---|
| 2368 | 2376 | } else { |
|---|
| .. | .. |
|---|
| 2374 | 2382 | /** |
|---|
| 2375 | 2383 | * sdma_send_txreq() - submit a tx req to ring |
|---|
| 2376 | 2384 | * @sde: sdma engine to use |
|---|
| 2377 | | - * @wait: wait structure to use when full (may be NULL) |
|---|
| 2385 | + * @wait: SE wait structure to use when full (may be NULL) |
|---|
| 2378 | 2386 | * @tx: sdma_txreq to submit |
|---|
| 2379 | 2387 | * @pkts_sent: has any packet been sent yet? |
|---|
| 2380 | 2388 | * |
|---|
| .. | .. |
|---|
| 2387 | 2395 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
|---|
| 2388 | 2396 | */ |
|---|
| 2389 | 2397 | int sdma_send_txreq(struct sdma_engine *sde, |
|---|
| 2390 | | - struct iowait *wait, |
|---|
| 2398 | + struct iowait_work *wait, |
|---|
| 2391 | 2399 | struct sdma_txreq *tx, |
|---|
| 2392 | 2400 | bool pkts_sent) |
|---|
| 2393 | 2401 | { |
|---|
| .. | .. |
|---|
| 2398 | 2406 | /* user should have supplied entire packet */ |
|---|
| 2399 | 2407 | if (unlikely(tx->tlen)) |
|---|
| 2400 | 2408 | return -EINVAL; |
|---|
| 2401 | | - tx->wait = wait; |
|---|
| 2409 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2402 | 2410 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| 2403 | 2411 | retry: |
|---|
| 2404 | 2412 | if (unlikely(!__sdma_running(sde))) |
|---|
| .. | .. |
|---|
| 2407 | 2415 | goto nodesc; |
|---|
| 2408 | 2416 | tail = submit_tx(sde, tx); |
|---|
| 2409 | 2417 | if (wait) |
|---|
| 2410 | | - iowait_sdma_inc(wait); |
|---|
| 2418 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
|---|
| 2411 | 2419 | sdma_update_tail(sde, tail); |
|---|
| 2412 | 2420 | unlock: |
|---|
| 2413 | 2421 | spin_unlock_irqrestore(&sde->tail_lock, flags); |
|---|
| 2414 | 2422 | return ret; |
|---|
| 2415 | 2423 | unlock_noconn: |
|---|
| 2416 | 2424 | if (wait) |
|---|
| 2417 | | - iowait_sdma_inc(wait); |
|---|
| 2425 | + iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
|---|
| 2418 | 2426 | tx->next_descq_idx = 0; |
|---|
| 2419 | 2427 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
|---|
| 2420 | 2428 | tx->sn = sde->tail_sn++; |
|---|
| .. | .. |
|---|
| 2423 | 2431 | spin_lock(&sde->flushlist_lock); |
|---|
| 2424 | 2432 | list_add_tail(&tx->list, &sde->flushlist); |
|---|
| 2425 | 2433 | spin_unlock(&sde->flushlist_lock); |
|---|
| 2426 | | - if (wait) { |
|---|
| 2427 | | - wait->tx_count++; |
|---|
| 2428 | | - wait->count += tx->num_desc; |
|---|
| 2429 | | - } |
|---|
| 2434 | + iowait_inc_wait_count(wait, tx->num_desc); |
|---|
| 2430 | 2435 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
|---|
| 2431 | 2436 | ret = -ECOMM; |
|---|
| 2432 | 2437 | goto unlock; |
|---|
| .. | .. |
|---|
| 2443 | 2448 | /** |
|---|
| 2444 | 2449 | * sdma_send_txlist() - submit a list of tx req to ring |
|---|
| 2445 | 2450 | * @sde: sdma engine to use |
|---|
| 2446 | | - * @wait: wait structure to use when full (may be NULL) |
|---|
| 2451 | + * @wait: SE wait structure to use when full (may be NULL) |
|---|
| 2447 | 2452 | * @tx_list: list of sdma_txreqs to submit |
|---|
| 2448 | | - * @count: pointer to a u32 which, after return will contain the total number of |
|---|
| 2453 | + * @count: pointer to a u16 which, after return will contain the total number of |
|---|
| 2449 | 2454 | * sdma_txreqs removed from the tx_list. This will include sdma_txreqs |
|---|
| 2450 | 2455 | * whose SDMA descriptors are submitted to the ring and the sdma_txreqs |
|---|
| 2451 | 2456 | * which are added to SDMA engine flush list if the SDMA engine state is |
|---|
| .. | .. |
|---|
| 2468 | 2473 | * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) |
|---|
| 2469 | 2474 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
|---|
| 2470 | 2475 | */ |
|---|
| 2471 | | -int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, |
|---|
| 2472 | | - struct list_head *tx_list, u32 *count_out) |
|---|
| 2476 | +int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, |
|---|
| 2477 | + struct list_head *tx_list, u16 *count_out) |
|---|
| 2473 | 2478 | { |
|---|
| 2474 | 2479 | struct sdma_txreq *tx, *tx_next; |
|---|
| 2475 | 2480 | int ret = 0; |
|---|
| .. | .. |
|---|
| 2480 | 2485 | spin_lock_irqsave(&sde->tail_lock, flags); |
|---|
| 2481 | 2486 | retry: |
|---|
| 2482 | 2487 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
|---|
| 2483 | | - tx->wait = wait; |
|---|
| 2488 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2484 | 2489 | if (unlikely(!__sdma_running(sde))) |
|---|
| 2485 | 2490 | goto unlock_noconn; |
|---|
| 2486 | 2491 | if (unlikely(tx->num_desc > sde->desc_avail)) |
|---|
| .. | .. |
|---|
| 2501 | 2506 | update_tail: |
|---|
| 2502 | 2507 | total_count = submit_count + flush_count; |
|---|
| 2503 | 2508 | if (wait) { |
|---|
| 2504 | | - iowait_sdma_add(wait, total_count); |
|---|
| 2505 | | - iowait_starve_clear(submit_count > 0, wait); |
|---|
| 2509 | + iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); |
|---|
| 2510 | + iowait_starve_clear(submit_count > 0, |
|---|
| 2511 | + iowait_ioww_to_iow(wait)); |
|---|
| 2506 | 2512 | } |
|---|
| 2507 | 2513 | if (tail != INVALID_TAIL) |
|---|
| 2508 | 2514 | sdma_update_tail(sde, tail); |
|---|
| .. | .. |
|---|
| 2512 | 2518 | unlock_noconn: |
|---|
| 2513 | 2519 | spin_lock(&sde->flushlist_lock); |
|---|
| 2514 | 2520 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { |
|---|
| 2515 | | - tx->wait = wait; |
|---|
| 2521 | + tx->wait = iowait_ioww_to_iow(wait); |
|---|
| 2516 | 2522 | list_del_init(&tx->list); |
|---|
| 2517 | 2523 | tx->next_descq_idx = 0; |
|---|
| 2518 | 2524 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
|---|
| .. | .. |
|---|
| 2521 | 2527 | #endif |
|---|
| 2522 | 2528 | list_add_tail(&tx->list, &sde->flushlist); |
|---|
| 2523 | 2529 | flush_count++; |
|---|
| 2524 | | - if (wait) { |
|---|
| 2525 | | - wait->tx_count++; |
|---|
| 2526 | | - wait->count += tx->num_desc; |
|---|
| 2527 | | - } |
|---|
| 2530 | + iowait_inc_wait_count(wait, tx->num_desc); |
|---|
| 2528 | 2531 | } |
|---|
| 2529 | 2532 | spin_unlock(&sde->flushlist_lock); |
|---|
| 2530 | 2533 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
|---|
| .. | .. |
|---|
| 2583 | 2586 | * 7220, e.g. |
|---|
| 2584 | 2587 | */ |
|---|
| 2585 | 2588 | ss->go_s99_running = 1; |
|---|
| 2586 | | - /* fall through -- and start dma engine */ |
|---|
| 2589 | + fallthrough; /* and start dma engine */ |
|---|
| 2587 | 2590 | case sdma_event_e10_go_hw_start: |
|---|
| 2588 | 2591 | /* This reference means the state machine is started */ |
|---|
| 2589 | 2592 | sdma_get(&sde->state); |
|---|
| .. | .. |
|---|
| 2725 | 2728 | case sdma_event_e70_go_idle: |
|---|
| 2726 | 2729 | break; |
|---|
| 2727 | 2730 | case sdma_event_e85_link_down: |
|---|
| 2728 | | - /* fall through */ |
|---|
| 2729 | 2731 | case sdma_event_e80_hw_freeze: |
|---|
| 2730 | 2732 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
|---|
| 2731 | 2733 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
|---|
| .. | .. |
|---|
| 3006 | 3008 | case sdma_event_e60_hw_halted: |
|---|
| 3007 | 3009 | need_progress = 1; |
|---|
| 3008 | 3010 | sdma_err_progress_check_schedule(sde); |
|---|
| 3009 | | - /* fall through */ |
|---|
| 3011 | + fallthrough; |
|---|
| 3010 | 3012 | case sdma_event_e90_sw_halted: |
|---|
| 3011 | 3013 | /* |
|---|
| 3012 | 3014 | * SW initiated halt does not perform engines |
|---|
| .. | .. |
|---|
| 3020 | 3022 | break; |
|---|
| 3021 | 3023 | case sdma_event_e85_link_down: |
|---|
| 3022 | 3024 | ss->go_s99_running = 0; |
|---|
| 3023 | | - /* fall through */ |
|---|
| 3025 | + fallthrough; |
|---|
| 3024 | 3026 | case sdma_event_e80_hw_freeze: |
|---|
| 3025 | 3027 | sdma_set_state(sde, sdma_state_s80_hw_freeze); |
|---|
| 3026 | 3028 | atomic_dec(&sde->dd->sdma_unfreeze_count); |
|---|
| .. | .. |
|---|
| 3250 | 3252 | tx->num_desc++; |
|---|
| 3251 | 3253 | tx->descs[2].qw[0] = 0; |
|---|
| 3252 | 3254 | tx->descs[2].qw[1] = 0; |
|---|
| 3253 | | - /* FALLTHROUGH */ |
|---|
| 3255 | + fallthrough; |
|---|
| 3254 | 3256 | case SDMA_AHG_APPLY_UPDATE2: |
|---|
| 3255 | 3257 | tx->num_desc++; |
|---|
| 3256 | 3258 | tx->descs[1].qw[0] = 0; |
|---|