hc
2024-01-03 2f7c68cb55ecb7331f2381deb497c27155f32faf
kernel/drivers/infiniband/hw/hfi1/sdma.c
....@@ -232,11 +232,11 @@
232232 static void sdma_complete(struct kref *);
233233 static void sdma_finalput(struct sdma_state *);
234234 static void sdma_get(struct sdma_state *);
235
-static void sdma_hw_clean_up_task(unsigned long);
235
+static void sdma_hw_clean_up_task(struct tasklet_struct *);
236236 static void sdma_put(struct sdma_state *);
237237 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
238238 static void sdma_start_hw_clean_up(struct sdma_engine *);
239
-static void sdma_sw_clean_up_task(unsigned long);
239
+static void sdma_sw_clean_up_task(struct tasklet_struct *);
240240 static void sdma_sendctrl(struct sdma_engine *, unsigned);
241241 static void init_sdma_regs(struct sdma_engine *, u32, uint);
242242 static void sdma_process_event(
....@@ -379,7 +379,7 @@
379379 __sdma_txclean(sde->dd, tx);
380380 if (complete)
381381 (*complete)(tx, res);
382
- if (wait && iowait_sdma_dec(wait))
382
+ if (iowait_sdma_dec(wait))
383383 iowait_drain_wakeup(wait);
384384 }
385385
....@@ -406,6 +406,7 @@
406406 struct sdma_txreq *txp, *txp_next;
407407 LIST_HEAD(flushlist);
408408 unsigned long flags;
409
+ uint seq;
409410
410411 /* flush from head to tail */
411412 sdma_flush_descq(sde);
....@@ -416,6 +417,22 @@
416417 /* flush from flush list */
417418 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
418419 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
420
+ /* wakeup QPs orphaned on the dmawait list */
421
+ do {
422
+ struct iowait *w, *nw;
423
+
424
+ seq = read_seqbegin(&sde->waitlock);
425
+ if (!list_empty(&sde->dmawait)) {
426
+ write_seqlock(&sde->waitlock);
427
+ list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
428
+ if (w->wakeup) {
429
+ w->wakeup(w, SDMA_AVAIL_REASON);
430
+ list_del_init(&w->list);
431
+ }
432
+ }
433
+ write_sequnlock(&sde->waitlock);
434
+ }
435
+ } while (read_seqretry(&sde->waitlock, seq));
419436 }
420437
421438 /*
....@@ -528,9 +545,10 @@
528545 schedule_work(&sde->err_halt_worker);
529546 }
530547
531
-static void sdma_hw_clean_up_task(unsigned long opaque)
548
+static void sdma_hw_clean_up_task(struct tasklet_struct *t)
532549 {
533
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
550
+ struct sdma_engine *sde = from_tasklet(sde, t,
551
+ sdma_hw_clean_up_task);
534552 u64 statuscsr;
535553
536554 while (1) {
....@@ -587,9 +605,9 @@
587605 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
588606 }
589607
590
-static void sdma_sw_clean_up_task(unsigned long opaque)
608
+static void sdma_sw_clean_up_task(struct tasklet_struct *t)
591609 {
592
- struct sdma_engine *sde = (struct sdma_engine *)opaque;
610
+ struct sdma_engine *sde = from_tasklet(sde, t, sdma_sw_clean_up_task);
593611 unsigned long flags;
594612
595613 spin_lock_irqsave(&sde->tail_lock, flags);
....@@ -816,7 +834,7 @@
816834 struct sdma_rht_map_elem {
817835 u32 mask;
818836 u8 ctr;
819
- struct sdma_engine *sde[0];
837
+ struct sdma_engine *sde[];
820838 };
821839
822840 struct sdma_rht_node {
....@@ -831,7 +849,7 @@
831849 .nelem_hint = NR_CPUS_HINT,
832850 .head_offset = offsetof(struct sdma_rht_node, node),
833851 .key_offset = offsetof(struct sdma_rht_node, cpu_id),
834
- .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
852
+ .key_len = sizeof_field(struct sdma_rht_node, cpu_id),
835853 .max_size = NR_CPUS,
836854 .min_size = 8,
837855 .automatic_shrinking = true,
....@@ -853,20 +871,19 @@
853871 {
854872 struct sdma_rht_node *rht_node;
855873 struct sdma_engine *sde = NULL;
856
- const struct cpumask *current_mask = &current->cpus_allowed;
857874 unsigned long cpu_id;
858875
859876 /*
860877 * To ensure that always the same sdma engine(s) will be
861878 * selected make sure the process is pinned to this CPU only.
862879 */
863
- if (cpumask_weight(current_mask) != 1)
880
+ if (current->nr_cpus_allowed != 1)
864881 goto out;
865882
866
- cpu_id = smp_processor_id();
867883 rcu_read_lock();
868
- rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
869
- sdma_rht_params);
884
+ cpu_id = smp_processor_id();
885
+ rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id,
886
+ sdma_rht_params);
870887
871888 if (rht_node && rht_node->map[vl]) {
872889 struct sdma_rht_map_elem *map = rht_node->map[vl];
....@@ -1313,11 +1330,13 @@
13131330 kvfree(sde->tx_ring);
13141331 sde->tx_ring = NULL;
13151332 }
1316
- spin_lock_irq(&dd->sde_map_lock);
1317
- sdma_map_free(rcu_access_pointer(dd->sdma_map));
1318
- RCU_INIT_POINTER(dd->sdma_map, NULL);
1319
- spin_unlock_irq(&dd->sde_map_lock);
1320
- synchronize_rcu();
1333
+ if (rcu_access_pointer(dd->sdma_map)) {
1334
+ spin_lock_irq(&dd->sde_map_lock);
1335
+ sdma_map_free(rcu_access_pointer(dd->sdma_map));
1336
+ RCU_INIT_POINTER(dd->sdma_map, NULL);
1337
+ spin_unlock_irq(&dd->sde_map_lock);
1338
+ synchronize_rcu();
1339
+ }
13211340 kfree(dd->per_sdma);
13221341 dd->per_sdma = NULL;
13231342
....@@ -1422,6 +1441,7 @@
14221441 seqlock_init(&sde->head_lock);
14231442 spin_lock_init(&sde->senddmactrl_lock);
14241443 spin_lock_init(&sde->flushlist_lock);
1444
+ seqlock_init(&sde->waitlock);
14251445 /* insure there is always a zero bit */
14261446 sde->ahg_bits = 0xfffffffe00000000ULL;
14271447
....@@ -1437,11 +1457,10 @@
14371457 sde->tail_csr =
14381458 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
14391459
1440
- tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1441
- (unsigned long)sde);
1442
-
1443
- tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1444
- (unsigned long)sde);
1460
+ tasklet_setup(&sde->sdma_hw_clean_up_task,
1461
+ sdma_hw_clean_up_task);
1462
+ tasklet_setup(&sde->sdma_sw_clean_up_task,
1463
+ sdma_sw_clean_up_task);
14451464 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
14461465 INIT_WORK(&sde->flush_worker, sdma_field_flush);
14471466
....@@ -1450,12 +1469,9 @@
14501469 timer_setup(&sde->err_progress_check_timer,
14511470 sdma_err_progress_check, 0);
14521471
1453
- sde->descq = dma_zalloc_coherent(
1454
- &dd->pcidev->dev,
1455
- descq_cnt * sizeof(u64[2]),
1456
- &sde->descq_phys,
1457
- GFP_KERNEL
1458
- );
1472
+ sde->descq = dma_alloc_coherent(&dd->pcidev->dev,
1473
+ descq_cnt * sizeof(u64[2]),
1474
+ &sde->descq_phys, GFP_KERNEL);
14591475 if (!sde->descq)
14601476 goto bail;
14611477 sde->tx_ring =
....@@ -1468,24 +1484,18 @@
14681484
14691485 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
14701486 /* Allocate memory for DMA of head registers to memory */
1471
- dd->sdma_heads_dma = dma_zalloc_coherent(
1472
- &dd->pcidev->dev,
1473
- dd->sdma_heads_size,
1474
- &dd->sdma_heads_phys,
1475
- GFP_KERNEL
1476
- );
1487
+ dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev,
1488
+ dd->sdma_heads_size,
1489
+ &dd->sdma_heads_phys,
1490
+ GFP_KERNEL);
14771491 if (!dd->sdma_heads_dma) {
14781492 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
14791493 goto bail;
14801494 }
14811495
14821496 /* Allocate memory for pad */
1483
- dd->sdma_pad_dma = dma_zalloc_coherent(
1484
- &dd->pcidev->dev,
1485
- SDMA_PAD,
1486
- &dd->sdma_pad_phys,
1487
- GFP_KERNEL
1488
- );
1497
+ dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
1498
+ &dd->sdma_pad_phys, GFP_KERNEL);
14891499 if (!dd->sdma_pad_dma) {
14901500 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
14911501 goto bail;
....@@ -1627,20 +1637,18 @@
16271637 {
16281638 switch (sdma_mapping_type(descp)) {
16291639 case SDMA_MAP_SINGLE:
1630
- dma_unmap_single(
1631
- &dd->pcidev->dev,
1632
- sdma_mapping_addr(descp),
1633
- sdma_mapping_len(descp),
1634
- DMA_TO_DEVICE);
1640
+ dma_unmap_single(&dd->pcidev->dev, sdma_mapping_addr(descp),
1641
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
16351642 break;
16361643 case SDMA_MAP_PAGE:
1637
- dma_unmap_page(
1638
- &dd->pcidev->dev,
1639
- sdma_mapping_addr(descp),
1640
- sdma_mapping_len(descp),
1641
- DMA_TO_DEVICE);
1644
+ dma_unmap_page(&dd->pcidev->dev, sdma_mapping_addr(descp),
1645
+ sdma_mapping_len(descp), DMA_TO_DEVICE);
16421646 break;
16431647 }
1648
+
1649
+ if (descp->pinning_ctx && descp->ctx_put)
1650
+ descp->ctx_put(descp->pinning_ctx);
1651
+ descp->pinning_ctx = NULL;
16441652 }
16451653
16461654 /*
....@@ -1756,12 +1764,9 @@
17561764 */
17571765 static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
17581766 {
1759
- struct iowait *wait, *nw;
1767
+ struct iowait *wait, *nw, *twait;
17601768 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1761
- uint i, n = 0, seq, max_idx = 0;
1762
- struct sdma_txreq *stx;
1763
- struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1764
- u8 max_starved_cnt = 0;
1769
+ uint i, n = 0, seq, tidx = 0;
17651770
17661771 #ifdef CONFIG_SDMA_VERBOSITY
17671772 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
....@@ -1770,49 +1775,50 @@
17701775 #endif
17711776
17721777 do {
1773
- seq = read_seqbegin(&dev->iowait_lock);
1778
+ seq = read_seqbegin(&sde->waitlock);
17741779 if (!list_empty(&sde->dmawait)) {
17751780 /* at least one item */
1776
- write_seqlock(&dev->iowait_lock);
1781
+ write_seqlock(&sde->waitlock);
17771782 /* Harvest waiters wanting DMA descriptors */
17781783 list_for_each_entry_safe(
17791784 wait,
17801785 nw,
17811786 &sde->dmawait,
17821787 list) {
1783
- u16 num_desc = 0;
1788
+ u32 num_desc;
17841789
17851790 if (!wait->wakeup)
17861791 continue;
17871792 if (n == ARRAY_SIZE(waits))
17881793 break;
1789
- if (!list_empty(&wait->tx_head)) {
1790
- stx = list_first_entry(
1791
- &wait->tx_head,
1792
- struct sdma_txreq,
1793
- list);
1794
- num_desc = stx->num_desc;
1795
- }
1794
+ iowait_init_priority(wait);
1795
+ num_desc = iowait_get_all_desc(wait);
17961796 if (num_desc > avail)
17971797 break;
17981798 avail -= num_desc;
1799
- /* Find the most starved wait memeber */
1800
- iowait_starve_find_max(wait, &max_starved_cnt,
1801
- n, &max_idx);
1799
+ /* Find the top-priority wait memeber */
1800
+ if (n) {
1801
+ twait = waits[tidx];
1802
+ tidx =
1803
+ iowait_priority_update_top(wait,
1804
+ twait,
1805
+ n,
1806
+ tidx);
1807
+ }
18021808 list_del_init(&wait->list);
18031809 waits[n++] = wait;
18041810 }
1805
- write_sequnlock(&dev->iowait_lock);
1811
+ write_sequnlock(&sde->waitlock);
18061812 break;
18071813 }
1808
- } while (read_seqretry(&dev->iowait_lock, seq));
1814
+ } while (read_seqretry(&sde->waitlock, seq));
18091815
1810
- /* Schedule the most starved one first */
1816
+ /* Schedule the top-priority entry first */
18111817 if (n)
1812
- waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON);
1818
+ waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON);
18131819
18141820 for (i = 0; i < n; i++)
1815
- if (i != max_idx)
1821
+ if (i != tidx)
18161822 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
18171823 }
18181824
....@@ -2347,7 +2353,7 @@
23472353 */
23482354 static int sdma_check_progress(
23492355 struct sdma_engine *sde,
2350
- struct iowait *wait,
2356
+ struct iowait_work *wait,
23512357 struct sdma_txreq *tx,
23522358 bool pkts_sent)
23532359 {
....@@ -2357,12 +2363,12 @@
23572363 if (tx->num_desc <= sde->desc_avail)
23582364 return -EAGAIN;
23592365 /* pulse the head_lock */
2360
- if (wait && wait->sleep) {
2366
+ if (wait && iowait_ioww_to_iow(wait)->sleep) {
23612367 unsigned seq;
23622368
23632369 seq = raw_seqcount_begin(
23642370 (const seqcount_t *)&sde->head_lock.seqcount);
2365
- ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
2371
+ ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
23662372 if (ret == -EAGAIN)
23672373 sde->desc_avail = sdma_descq_freecnt(sde);
23682374 } else {
....@@ -2374,7 +2380,7 @@
23742380 /**
23752381 * sdma_send_txreq() - submit a tx req to ring
23762382 * @sde: sdma engine to use
2377
- * @wait: wait structure to use when full (may be NULL)
2383
+ * @wait: SE wait structure to use when full (may be NULL)
23782384 * @tx: sdma_txreq to submit
23792385 * @pkts_sent: has any packet been sent yet?
23802386 *
....@@ -2387,7 +2393,7 @@
23872393 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
23882394 */
23892395 int sdma_send_txreq(struct sdma_engine *sde,
2390
- struct iowait *wait,
2396
+ struct iowait_work *wait,
23912397 struct sdma_txreq *tx,
23922398 bool pkts_sent)
23932399 {
....@@ -2398,7 +2404,7 @@
23982404 /* user should have supplied entire packet */
23992405 if (unlikely(tx->tlen))
24002406 return -EINVAL;
2401
- tx->wait = wait;
2407
+ tx->wait = iowait_ioww_to_iow(wait);
24022408 spin_lock_irqsave(&sde->tail_lock, flags);
24032409 retry:
24042410 if (unlikely(!__sdma_running(sde)))
....@@ -2407,14 +2413,14 @@
24072413 goto nodesc;
24082414 tail = submit_tx(sde, tx);
24092415 if (wait)
2410
- iowait_sdma_inc(wait);
2416
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
24112417 sdma_update_tail(sde, tail);
24122418 unlock:
24132419 spin_unlock_irqrestore(&sde->tail_lock, flags);
24142420 return ret;
24152421 unlock_noconn:
24162422 if (wait)
2417
- iowait_sdma_inc(wait);
2423
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
24182424 tx->next_descq_idx = 0;
24192425 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
24202426 tx->sn = sde->tail_sn++;
....@@ -2423,10 +2429,7 @@
24232429 spin_lock(&sde->flushlist_lock);
24242430 list_add_tail(&tx->list, &sde->flushlist);
24252431 spin_unlock(&sde->flushlist_lock);
2426
- if (wait) {
2427
- wait->tx_count++;
2428
- wait->count += tx->num_desc;
2429
- }
2432
+ iowait_inc_wait_count(wait, tx->num_desc);
24302433 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
24312434 ret = -ECOMM;
24322435 goto unlock;
....@@ -2443,9 +2446,9 @@
24432446 /**
24442447 * sdma_send_txlist() - submit a list of tx req to ring
24452448 * @sde: sdma engine to use
2446
- * @wait: wait structure to use when full (may be NULL)
2449
+ * @wait: SE wait structure to use when full (may be NULL)
24472450 * @tx_list: list of sdma_txreqs to submit
2448
- * @count: pointer to a u32 which, after return will contain the total number of
2451
+ * @count: pointer to a u16 which, after return will contain the total number of
24492452 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
24502453 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
24512454 * which are added to SDMA engine flush list if the SDMA engine state is
....@@ -2468,8 +2471,8 @@
24682471 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
24692472 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
24702473 */
2471
-int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
2472
- struct list_head *tx_list, u32 *count_out)
2474
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
2475
+ struct list_head *tx_list, u16 *count_out)
24732476 {
24742477 struct sdma_txreq *tx, *tx_next;
24752478 int ret = 0;
....@@ -2480,7 +2483,7 @@
24802483 spin_lock_irqsave(&sde->tail_lock, flags);
24812484 retry:
24822485 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2483
- tx->wait = wait;
2486
+ tx->wait = iowait_ioww_to_iow(wait);
24842487 if (unlikely(!__sdma_running(sde)))
24852488 goto unlock_noconn;
24862489 if (unlikely(tx->num_desc > sde->desc_avail))
....@@ -2501,8 +2504,9 @@
25012504 update_tail:
25022505 total_count = submit_count + flush_count;
25032506 if (wait) {
2504
- iowait_sdma_add(wait, total_count);
2505
- iowait_starve_clear(submit_count > 0, wait);
2507
+ iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
2508
+ iowait_starve_clear(submit_count > 0,
2509
+ iowait_ioww_to_iow(wait));
25062510 }
25072511 if (tail != INVALID_TAIL)
25082512 sdma_update_tail(sde, tail);
....@@ -2512,7 +2516,7 @@
25122516 unlock_noconn:
25132517 spin_lock(&sde->flushlist_lock);
25142518 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2515
- tx->wait = wait;
2519
+ tx->wait = iowait_ioww_to_iow(wait);
25162520 list_del_init(&tx->list);
25172521 tx->next_descq_idx = 0;
25182522 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
....@@ -2521,10 +2525,7 @@
25212525 #endif
25222526 list_add_tail(&tx->list, &sde->flushlist);
25232527 flush_count++;
2524
- if (wait) {
2525
- wait->tx_count++;
2526
- wait->count += tx->num_desc;
2527
- }
2528
+ iowait_inc_wait_count(wait, tx->num_desc);
25282529 }
25292530 spin_unlock(&sde->flushlist_lock);
25302531 queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
....@@ -2583,7 +2584,7 @@
25832584 * 7220, e.g.
25842585 */
25852586 ss->go_s99_running = 1;
2586
- /* fall through -- and start dma engine */
2587
+ fallthrough; /* and start dma engine */
25872588 case sdma_event_e10_go_hw_start:
25882589 /* This reference means the state machine is started */
25892590 sdma_get(&sde->state);
....@@ -2725,7 +2726,6 @@
27252726 case sdma_event_e70_go_idle:
27262727 break;
27272728 case sdma_event_e85_link_down:
2728
- /* fall through */
27292729 case sdma_event_e80_hw_freeze:
27302730 sdma_set_state(sde, sdma_state_s80_hw_freeze);
27312731 atomic_dec(&sde->dd->sdma_unfreeze_count);
....@@ -3006,7 +3006,7 @@
30063006 case sdma_event_e60_hw_halted:
30073007 need_progress = 1;
30083008 sdma_err_progress_check_schedule(sde);
3009
- /* fall through */
3009
+ fallthrough;
30103010 case sdma_event_e90_sw_halted:
30113011 /*
30123012 * SW initiated halt does not perform engines
....@@ -3020,7 +3020,7 @@
30203020 break;
30213021 case sdma_event_e85_link_down:
30223022 ss->go_s99_running = 0;
3023
- /* fall through */
3023
+ fallthrough;
30243024 case sdma_event_e80_hw_freeze:
30253025 sdma_set_state(sde, sdma_state_s80_hw_freeze);
30263026 atomic_dec(&sde->dd->sdma_unfreeze_count);
....@@ -3169,7 +3169,7 @@
31693169 /* Add descriptor for coalesce buffer */
31703170 tx->desc_limit = MAX_DESC;
31713171 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3172
- addr, tx->tlen);
3172
+ addr, tx->tlen, NULL, NULL, NULL);
31733173 }
31743174
31753175 return 1;
....@@ -3200,20 +3200,22 @@
32003200 {
32013201 int rval = 0;
32023202
3203
- tx->num_desc++;
3204
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
3203
+ if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
32053204 rval = _extend_sdma_tx_descs(dd, tx);
32063205 if (rval) {
32073206 __sdma_txclean(dd, tx);
32083207 return rval;
32093208 }
32103209 }
3210
+
32113211 /* finish the one just added */
32123212 make_tx_sdma_desc(
32133213 tx,
32143214 SDMA_MAP_NONE,
32153215 dd->sdma_pad_phys,
3216
- sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3216
+ sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)),
3217
+ NULL, NULL, NULL);
3218
+ tx->num_desc++;
32173219 _sdma_close_tx(dd, tx);
32183220 return rval;
32193221 }
....@@ -3250,7 +3252,7 @@
32503252 tx->num_desc++;
32513253 tx->descs[2].qw[0] = 0;
32523254 tx->descs[2].qw[1] = 0;
3253
- /* FALLTHROUGH */
3255
+ fallthrough;
32543256 case SDMA_AHG_APPLY_UPDATE2:
32553257 tx->num_desc++;
32563258 tx->descs[1].qw[0] = 0;