forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 6778948f9de86c3cfaf36725a7c87dcff9ba247f
kernel/drivers/net/wireless/ath/ath10k/sdio.c
....@@ -1,19 +1,8 @@
1
+// SPDX-License-Identifier: ISC
12 /*
23 * Copyright (c) 2004-2011 Atheros Communications Inc.
34 * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
45 * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
5
- *
6
- * Permission to use, copy, modify, and/or distribute this software for any
7
- * purpose with or without fee is hereby granted, provided that the above
8
- * copyright notice and this permission notice appear in all copies.
9
- *
10
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
176 */
187
198 #include <linux/module.h>
....@@ -34,6 +23,11 @@
3423 #include "targaddrs.h"
3524 #include "trace.h"
3625 #include "sdio.h"
26
+#include "coredump.h"
27
+
28
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
29
+
30
+#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
3731
3832 /* inlined helper functions */
3933
....@@ -428,6 +422,7 @@
428422 struct ath10k_htc *htc = &ar->htc;
429423 struct ath10k_sdio_rx_data *pkt;
430424 struct ath10k_htc_ep *ep;
425
+ struct ath10k_skb_rxcb *cb;
431426 enum ath10k_htc_ep_id id;
432427 int ret, i, *n_lookahead_local;
433428 u32 *lookaheads_local;
....@@ -473,10 +468,16 @@
473468 if (ret)
474469 goto out;
475470
476
- if (!pkt->trailer_only)
477
- ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
478
- else
471
+ if (!pkt->trailer_only) {
472
+ cb = ATH10K_SKB_RXCB(pkt->skb);
473
+ cb->eid = id;
474
+
475
+ skb_queue_tail(&ar_sdio->rx_head, pkt->skb);
476
+ queue_work(ar->workqueue_aux,
477
+ &ar_sdio->async_work_rx);
478
+ } else {
479479 kfree_skb(pkt->skb);
480
+ }
480481
481482 /* The RX complete handler now owns the skb...*/
482483 pkt->skb = NULL;
....@@ -495,21 +496,22 @@
495496 return ret;
496497 }
497498
498
-static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
499
- struct ath10k_sdio_rx_data *rx_pkts,
500
- struct ath10k_htc_hdr *htc_hdr,
501
- size_t full_len, size_t act_len,
502
- size_t *bndl_cnt)
499
+static int ath10k_sdio_mbox_alloc_bundle(struct ath10k *ar,
500
+ struct ath10k_sdio_rx_data *rx_pkts,
501
+ struct ath10k_htc_hdr *htc_hdr,
502
+ size_t full_len, size_t act_len,
503
+ size_t *bndl_cnt)
503504 {
504505 int ret, i;
506
+ u8 max_msgs = ar->htc.max_msgs_per_htc_bundle;
505507
506
- *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
508
+ *bndl_cnt = ath10k_htc_get_bundle_count(max_msgs, htc_hdr->flags);
507509
508
- if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
510
+ if (*bndl_cnt > max_msgs) {
509511 ath10k_warn(ar,
510512 "HTC bundle length %u exceeds maximum %u\n",
511513 le16_to_cpu(htc_hdr->len),
512
- HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
514
+ max_msgs);
513515 return -ENOMEM;
514516 }
515517
....@@ -540,12 +542,11 @@
540542 size_t full_len, act_len;
541543 bool last_in_bundle;
542544 int ret, i;
545
+ int pkt_cnt = 0;
543546
544547 if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
545
- ath10k_warn(ar,
546
- "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
547
- n_lookaheads,
548
- ATH10K_SDIO_MAX_RX_MSGS);
548
+ ath10k_warn(ar, "the total number of pkts to be fetched (%u) exceeds maximum %u\n",
549
+ n_lookaheads, ATH10K_SDIO_MAX_RX_MSGS);
549550 ret = -ENOMEM;
550551 goto err;
551552 }
....@@ -554,10 +555,8 @@
554555 htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
555556 last_in_bundle = false;
556557
557
- if (le16_to_cpu(htc_hdr->len) >
558
- ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
559
- ath10k_warn(ar,
560
- "payload length %d exceeds max htc length: %zu\n",
558
+ if (le16_to_cpu(htc_hdr->len) > ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
559
+ ath10k_warn(ar, "payload length %d exceeds max htc length: %zu\n",
561560 le16_to_cpu(htc_hdr->len),
562561 ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
563562 ret = -ENOMEM;
....@@ -572,31 +571,37 @@
572571 full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
573572
574573 if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
575
- ath10k_warn(ar,
576
- "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
574
+ ath10k_warn(ar, "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
577575 htc_hdr->eid, htc_hdr->flags,
578576 le16_to_cpu(htc_hdr->len));
579577 ret = -EINVAL;
580578 goto err;
581579 }
582580
583
- if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
581
+ if (ath10k_htc_get_bundle_count(
582
+ ar->htc.max_msgs_per_htc_bundle, htc_hdr->flags)) {
584583 /* HTC header indicates that every packet to follow
585584 * has the same padded length so that it can be
586585 * optimally fetched as a full bundle.
587586 */
588587 size_t bndl_cnt;
589588
590
- ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
591
- &ar_sdio->rx_pkts[i],
592
- htc_hdr,
593
- full_len,
594
- act_len,
595
- &bndl_cnt);
589
+ ret = ath10k_sdio_mbox_alloc_bundle(ar,
590
+ &ar_sdio->rx_pkts[pkt_cnt],
591
+ htc_hdr,
592
+ full_len,
593
+ act_len,
594
+ &bndl_cnt);
596595
597
- n_lookaheads += bndl_cnt;
598
- i += bndl_cnt;
599
- /*Next buffer will be the last in the bundle */
596
+ if (ret) {
597
+ ath10k_warn(ar, "failed to allocate a bundle: %d\n",
598
+ ret);
599
+ goto err;
600
+ }
601
+
602
+ pkt_cnt += bndl_cnt;
603
+
604
+ /* next buffer will be the last in the bundle */
600605 last_in_bundle = true;
601606 }
602607
....@@ -607,7 +612,7 @@
607612 if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
608613 full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
609614
610
- ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
615
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[pkt_cnt],
611616 act_len,
612617 full_len,
613618 last_in_bundle,
....@@ -616,9 +621,11 @@
616621 ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
617622 goto err;
618623 }
624
+
625
+ pkt_cnt++;
619626 }
620627
621
- ar_sdio->n_rx_pkts = i;
628
+ ar_sdio->n_rx_pkts = pkt_cnt;
622629
623630 return 0;
624631
....@@ -632,10 +639,10 @@
632639 return ret;
633640 }
634641
635
-static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
636
- struct ath10k_sdio_rx_data *pkt)
642
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
637643 {
638644 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
645
+ struct ath10k_sdio_rx_data *pkt = &ar_sdio->rx_pkts[0];
639646 struct sk_buff *skb = pkt->skb;
640647 struct ath10k_htc_hdr *htc_hdr;
641648 int ret;
....@@ -643,47 +650,74 @@
643650 ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
644651 skb->data, pkt->alloc_len);
645652 if (ret)
646
- goto out;
653
+ goto err;
647654
648
- /* Update actual length. The original length may be incorrect,
649
- * as the FW will bundle multiple packets as long as their sizes
650
- * fit within the same aligned length (pkt->alloc_len).
651
- */
652655 htc_hdr = (struct ath10k_htc_hdr *)skb->data;
653656 pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
657
+
654658 if (pkt->act_len > pkt->alloc_len) {
655
- ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
656
- pkt->act_len, pkt->alloc_len);
657
- ret = -EMSGSIZE;
658
- goto out;
659
+ ret = -EINVAL;
660
+ goto err;
659661 }
660662
661663 skb_put(skb, pkt->act_len);
664
+ return 0;
662665
663
-out:
664
- pkt->status = ret;
666
+err:
667
+ ar_sdio->n_rx_pkts = 0;
668
+ ath10k_sdio_mbox_free_rx_pkt(pkt);
665669
666670 return ret;
667671 }
668672
669
-static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
673
+static int ath10k_sdio_mbox_rx_fetch_bundle(struct ath10k *ar)
670674 {
671675 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
676
+ struct ath10k_sdio_rx_data *pkt;
677
+ struct ath10k_htc_hdr *htc_hdr;
672678 int ret, i;
679
+ u32 pkt_offset, virt_pkt_len;
673680
681
+ virt_pkt_len = 0;
682
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
683
+ virt_pkt_len += ar_sdio->rx_pkts[i].alloc_len;
684
+
685
+ if (virt_pkt_len > ATH10K_SDIO_VSG_BUF_SIZE) {
686
+ ath10k_warn(ar, "sdio vsg buffer size limit: %d\n", virt_pkt_len);
687
+ ret = -E2BIG;
688
+ goto err;
689
+ }
690
+
691
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
692
+ ar_sdio->vsg_buffer, virt_pkt_len);
693
+ if (ret) {
694
+ ath10k_warn(ar, "failed to read bundle packets: %d", ret);
695
+ goto err;
696
+ }
697
+
698
+ pkt_offset = 0;
674699 for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
675
- ret = ath10k_sdio_mbox_rx_packet(ar,
676
- &ar_sdio->rx_pkts[i]);
677
- if (ret)
700
+ pkt = &ar_sdio->rx_pkts[i];
701
+ htc_hdr = (struct ath10k_htc_hdr *)(ar_sdio->vsg_buffer + pkt_offset);
702
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
703
+
704
+ if (pkt->act_len > pkt->alloc_len) {
705
+ ret = -EINVAL;
678706 goto err;
707
+ }
708
+
709
+ skb_put_data(pkt->skb, htc_hdr, pkt->act_len);
710
+ pkt_offset += pkt->alloc_len;
679711 }
680712
681713 return 0;
682714
683715 err:
684716 /* Free all packets that was not successfully fetched. */
685
- for (; i < ar_sdio->n_rx_pkts; i++)
717
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++)
686718 ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
719
+
720
+ ar_sdio->n_rx_pkts = 0;
687721
688722 return ret;
689723 }
....@@ -727,7 +761,10 @@
727761 */
728762 *done = false;
729763
730
- ret = ath10k_sdio_mbox_rx_fetch(ar);
764
+ if (ar_sdio->n_rx_pkts > 1)
765
+ ret = ath10k_sdio_mbox_rx_fetch_bundle(ar);
766
+ else
767
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
731768
732769 /* Process fetched packets. This will potentially update
733770 * n_lookaheads depending on if the packets contain lookahead
....@@ -882,6 +919,9 @@
882919
883920 out:
884921 mutex_unlock(&irq_data->mtx);
922
+ if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
923
+ ath10k_sdio_fw_crashed_dump(ar);
924
+
885925 return ret;
886926 }
887927
....@@ -919,8 +959,11 @@
919959 */
920960 ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
921961 irq_proc_reg, sizeof(*irq_proc_reg));
922
- if (ret)
962
+ if (ret) {
963
+ queue_work(ar->workqueue, &ar->restart_work);
964
+ ath10k_warn(ar, "read int status fail, start recovery\n");
923965 goto out;
966
+ }
924967
925968 /* Update only those registers that are enabled */
926969 *host_int_status = irq_proc_reg->host_int_status &
....@@ -1046,10 +1089,10 @@
10461089
10471090 mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
10481091
1049
- dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
1050
- dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
1092
+ dev_id_base = (device & 0x0F00);
1093
+ dev_id_chiprev = (device & 0x00FF);
10511094 switch (dev_id_base) {
1052
- case QCA_MANUFACTURER_ID_AR6005_BASE:
1095
+ case (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00):
10531096 if (dev_id_chiprev < 4)
10541097 mbox_info->ext_info[0].htc_ext_sz =
10551098 ATH10K_HIF_MBOX0_EXT_WIDTH;
....@@ -1060,7 +1103,7 @@
10601103 mbox_info->ext_info[0].htc_ext_sz =
10611104 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
10621105 break;
1063
- case QCA_MANUFACTURER_ID_QCA9377_BASE:
1106
+ case (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00):
10641107 mbox_info->ext_info[0].htc_ext_sz =
10651108 ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
10661109 break;
....@@ -1299,23 +1342,145 @@
12991342 ath10k_sdio_free_bus_req(ar, req);
13001343 }
13011344
1345
+/* To improve throughput use workqueue to deliver packets to HTC layer,
1346
+ * this way SDIO bus is utilised much better.
1347
+ */
1348
+static void ath10k_rx_indication_async_work(struct work_struct *work)
1349
+{
1350
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
1351
+ async_work_rx);
1352
+ struct ath10k *ar = ar_sdio->ar;
1353
+ struct ath10k_htc_ep *ep;
1354
+ struct ath10k_skb_rxcb *cb;
1355
+ struct sk_buff *skb;
1356
+
1357
+ while (true) {
1358
+ skb = skb_dequeue(&ar_sdio->rx_head);
1359
+ if (!skb)
1360
+ break;
1361
+ cb = ATH10K_SKB_RXCB(skb);
1362
+ ep = &ar->htc.endpoint[cb->eid];
1363
+ ep->ep_ops.ep_rx_complete(ar, skb);
1364
+ }
1365
+
1366
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
1367
+ local_bh_disable();
1368
+ napi_schedule(&ar->napi);
1369
+ local_bh_enable();
1370
+ }
1371
+}
1372
+
1373
+static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
1374
+{
1375
+ struct ath10k *ar = ar_sdio->ar;
1376
+ unsigned char rtc_state = 0;
1377
+ int ret = 0;
1378
+
1379
+ rtc_state = sdio_f0_readb(ar_sdio->func, ATH10K_CIS_RTC_STATE_ADDR, &ret);
1380
+ if (ret) {
1381
+ ath10k_warn(ar, "failed to read rtc state: %d\n", ret);
1382
+ return ret;
1383
+ }
1384
+
1385
+ *state = rtc_state & 0x3;
1386
+
1387
+ return ret;
1388
+}
1389
+
1390
+static int ath10k_sdio_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1391
+{
1392
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1393
+ u32 val;
1394
+ int retry = ATH10K_CIS_READ_RETRY, ret = 0;
1395
+ unsigned char rtc_state = 0;
1396
+
1397
+ sdio_claim_host(ar_sdio->func);
1398
+
1399
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1400
+ if (ret) {
1401
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1402
+ ret);
1403
+ goto release;
1404
+ }
1405
+
1406
+ if (enable_sleep) {
1407
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1408
+ ar_sdio->mbox_state = SDIO_MBOX_SLEEP_STATE;
1409
+ } else {
1410
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1411
+ ar_sdio->mbox_state = SDIO_MBOX_AWAKE_STATE;
1412
+ }
1413
+
1414
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1415
+ if (ret) {
1416
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1417
+ ret);
1418
+ }
1419
+
1420
+ if (!enable_sleep) {
1421
+ do {
1422
+ udelay(ATH10K_CIS_READ_WAIT_4_RTC_CYCLE_IN_US);
1423
+ ret = ath10k_sdio_read_rtc_state(ar_sdio, &rtc_state);
1424
+
1425
+ if (ret) {
1426
+ ath10k_warn(ar, "failed to disable mbox sleep: %d", ret);
1427
+ break;
1428
+ }
1429
+
1430
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read rtc state: %d\n",
1431
+ rtc_state);
1432
+
1433
+ if (rtc_state == ATH10K_CIS_RTC_STATE_ON)
1434
+ break;
1435
+
1436
+ udelay(ATH10K_CIS_XTAL_SETTLE_DURATION_IN_US);
1437
+ retry--;
1438
+ } while (retry > 0);
1439
+ }
1440
+
1441
+release:
1442
+ sdio_release_host(ar_sdio->func);
1443
+
1444
+ return ret;
1445
+}
1446
+
1447
+static void ath10k_sdio_sleep_timer_handler(struct timer_list *t)
1448
+{
1449
+ struct ath10k_sdio *ar_sdio = from_timer(ar_sdio, t, sleep_timer);
1450
+
1451
+ ar_sdio->mbox_state = SDIO_MBOX_REQUEST_TO_SLEEP_STATE;
1452
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
1453
+}
1454
+
13021455 static void ath10k_sdio_write_async_work(struct work_struct *work)
13031456 {
13041457 struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
13051458 wr_async_work);
13061459 struct ath10k *ar = ar_sdio->ar;
13071460 struct ath10k_sdio_bus_request *req, *tmp_req;
1461
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
13081462
13091463 spin_lock_bh(&ar_sdio->wr_async_lock);
13101464
13111465 list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
13121466 list_del(&req->list);
13131467 spin_unlock_bh(&ar_sdio->wr_async_lock);
1468
+
1469
+ if (req->address >= mbox_info->htc_addr &&
1470
+ ar_sdio->mbox_state == SDIO_MBOX_SLEEP_STATE) {
1471
+ ath10k_sdio_set_mbox_sleep(ar, false);
1472
+ mod_timer(&ar_sdio->sleep_timer, jiffies +
1473
+ msecs_to_jiffies(ATH10K_MIN_SLEEP_INACTIVITY_TIME_MS));
1474
+ }
1475
+
13141476 __ath10k_sdio_write_async(ar, req);
13151477 spin_lock_bh(&ar_sdio->wr_async_lock);
13161478 }
13171479
13181480 spin_unlock_bh(&ar_sdio->wr_async_lock);
1481
+
1482
+ if (ar_sdio->mbox_state == SDIO_MBOX_REQUEST_TO_SLEEP_STATE)
1483
+ ath10k_sdio_set_mbox_sleep(ar, true);
13191484 }
13201485
13211486 static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
....@@ -1382,7 +1547,7 @@
13821547
13831548 /* sdio HIF functions */
13841549
1385
-static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
1550
+static int ath10k_sdio_disable_intrs(struct ath10k *ar)
13861551 {
13871552 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
13881553 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
....@@ -1402,7 +1567,8 @@
14021567 return ret;
14031568 }
14041569
1405
-static int ath10k_sdio_hif_power_up(struct ath10k *ar)
1570
+static int ath10k_sdio_hif_power_up(struct ath10k *ar,
1571
+ enum ath10k_firmware_mode fw_mode)
14061572 {
14071573 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
14081574 struct sdio_func *func = ar_sdio->func;
....@@ -1412,6 +1578,12 @@
14121578 return 0;
14131579
14141580 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
1581
+
1582
+ ret = ath10k_sdio_config(ar);
1583
+ if (ret) {
1584
+ ath10k_err(ar, "failed to config sdio: %d\n", ret);
1585
+ return ret;
1586
+ }
14151587
14161588 sdio_claim_host(func);
14171589
....@@ -1431,7 +1603,7 @@
14311603
14321604 ar_sdio->is_disabled = false;
14331605
1434
- ret = ath10k_sdio_hif_disable_intrs(ar);
1606
+ ret = ath10k_sdio_disable_intrs(ar);
14351607 if (ret)
14361608 return ret;
14371609
....@@ -1448,13 +1620,24 @@
14481620
14491621 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
14501622
1623
+ del_timer_sync(&ar_sdio->sleep_timer);
1624
+ ath10k_sdio_set_mbox_sleep(ar, true);
1625
+
14511626 /* Disable the card */
14521627 sdio_claim_host(ar_sdio->func);
1453
- ret = sdio_disable_func(ar_sdio->func);
1454
- sdio_release_host(ar_sdio->func);
14551628
1456
- if (ret)
1629
+ ret = sdio_disable_func(ar_sdio->func);
1630
+ if (ret) {
14571631 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
1632
+ sdio_release_host(ar_sdio->func);
1633
+ return;
1634
+ }
1635
+
1636
+ ret = mmc_hw_reset(ar_sdio->func->card->host);
1637
+ if (ret)
1638
+ ath10k_warn(ar, "unable to reset sdio: %d\n", ret);
1639
+
1640
+ sdio_release_host(ar_sdio->func);
14581641
14591642 ar_sdio->is_disabled = true;
14601643 }
....@@ -1492,7 +1675,7 @@
14921675 return 0;
14931676 }
14941677
1495
-static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
1678
+static int ath10k_sdio_enable_intrs(struct ath10k *ar)
14961679 {
14971680 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
14981681 struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
....@@ -1512,8 +1695,10 @@
15121695 regs->int_status_en |=
15131696 FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
15141697
1515
- /* Set up the CPU Interrupt status Register */
1516
- regs->cpu_int_status_en = 0;
1698
+ /* Set up the CPU Interrupt Status Register, enable CPU sourced interrupt #0
1699
+ * #0 is used for report assertion from target
1700
+ */
1701
+ regs->cpu_int_status_en = FIELD_PREP(MBOX_CPU_STATUS_ENABLE_ASSERT_MASK, 1);
15171702
15181703 /* Set up the Error Interrupt status Register */
15191704 regs->err_int_status_en =
....@@ -1536,33 +1721,6 @@
15361721
15371722 mutex_unlock(&irq_data->mtx);
15381723 return ret;
1539
-}
1540
-
1541
-static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
1542
-{
1543
- u32 val;
1544
- int ret;
1545
-
1546
- ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
1547
- if (ret) {
1548
- ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
1549
- ret);
1550
- return ret;
1551
- }
1552
-
1553
- if (enable_sleep)
1554
- val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
1555
- else
1556
- val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
1557
-
1558
- ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
1559
- if (ret) {
1560
- ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
1561
- ret);
1562
- return ret;
1563
- }
1564
-
1565
- return 0;
15661724 }
15671725
15681726 /* HIF diagnostics */
....@@ -1600,8 +1758,8 @@
16001758 return ret;
16011759 }
16021760
1603
-static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
1604
- u32 *value)
1761
+static int ath10k_sdio_diag_read32(struct ath10k *ar, u32 address,
1762
+ u32 *value)
16051763 {
16061764 __le32 *val;
16071765 int ret;
....@@ -1646,20 +1804,72 @@
16461804 return 0;
16471805 }
16481806
1807
+static int ath10k_sdio_hif_start_post(struct ath10k *ar)
1808
+{
1809
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1810
+ u32 addr, val;
1811
+ int ret = 0;
1812
+
1813
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1814
+
1815
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
1816
+ if (ret) {
1817
+ ath10k_warn(ar, "unable to read hi_acs_flags : %d\n", ret);
1818
+ return ret;
1819
+ }
1820
+
1821
+ if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1822
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
1823
+ "sdio mailbox swap service enabled\n");
1824
+ ar_sdio->swap_mbox = true;
1825
+ } else {
1826
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
1827
+ "sdio mailbox swap service disabled\n");
1828
+ ar_sdio->swap_mbox = false;
1829
+ }
1830
+
1831
+ ath10k_sdio_set_mbox_sleep(ar, true);
1832
+
1833
+ return 0;
1834
+}
1835
+
1836
+static int ath10k_sdio_get_htt_tx_complete(struct ath10k *ar)
1837
+{
1838
+ u32 addr, val;
1839
+ int ret;
1840
+
1841
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1842
+
1843
+ ret = ath10k_sdio_diag_read32(ar, addr, &val);
1844
+ if (ret) {
1845
+ ath10k_warn(ar,
1846
+ "unable to read hi_acs_flags for htt tx comple : %d\n", ret);
1847
+ return ret;
1848
+ }
1849
+
1850
+ ret = (val & HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK);
1851
+
1852
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio reduce tx complete fw%sack\n",
1853
+ ret ? " " : " not ");
1854
+
1855
+ return ret;
1856
+}
1857
+
16491858 /* HIF start/stop */
16501859
16511860 static int ath10k_sdio_hif_start(struct ath10k *ar)
16521861 {
16531862 struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
1654
- u32 addr, val;
16551863 int ret;
1864
+
1865
+ napi_enable(&ar->napi);
16561866
16571867 /* Sleep 20 ms before HIF interrupts are disabled.
16581868 * This will give target plenty of time to process the BMI done
16591869 * request before interrupts are disabled.
16601870 */
16611871 msleep(20);
1662
- ret = ath10k_sdio_hif_disable_intrs(ar);
1872
+ ret = ath10k_sdio_disable_intrs(ar);
16631873 if (ret)
16641874 return ret;
16651875
....@@ -1681,33 +1891,19 @@
16811891
16821892 sdio_release_host(ar_sdio->func);
16831893
1684
- ret = ath10k_sdio_hif_enable_intrs(ar);
1894
+ ret = ath10k_sdio_enable_intrs(ar);
16851895 if (ret)
16861896 ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
16871897
1688
- addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
1689
-
1690
- ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
1691
- if (ret) {
1692
- ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
1693
- return ret;
1694
- }
1695
-
1696
- if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
1697
- ath10k_dbg(ar, ATH10K_DBG_SDIO,
1698
- "sdio mailbox swap service enabled\n");
1699
- ar_sdio->swap_mbox = true;
1700
- }
1701
-
17021898 /* Enable sleep and then disable it again */
1703
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
1899
+ ret = ath10k_sdio_set_mbox_sleep(ar, true);
17041900 if (ret)
17051901 return ret;
17061902
17071903 /* Wait for 20ms for the written value to take effect */
17081904 msleep(20);
17091905
1710
- ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
1906
+ ret = ath10k_sdio_set_mbox_sleep(ar, false);
17111907 if (ret)
17121908 return ret;
17131909
....@@ -1792,13 +1988,16 @@
17921988 }
17931989
17941990 spin_unlock_bh(&ar_sdio->wr_async_lock);
1991
+
1992
+ napi_synchronize(&ar->napi);
1993
+ napi_disable(&ar->napi);
17951994 }
17961995
17971996 #ifdef CONFIG_PM
17981997
17991998 static int ath10k_sdio_hif_suspend(struct ath10k *ar)
18001999 {
1801
- return -EOPNOTSUPP;
2000
+ return 0;
18022001 }
18032002
18042003 static int ath10k_sdio_hif_resume(struct ath10k *ar)
....@@ -1911,17 +2110,6 @@
19112110 *dl_pipe = 0;
19122111 }
19132112
1914
-/* This op is currently only used by htc_wait_target if the HTC ready
1915
- * message times out. It is not applicable for SDIO since there is nothing
1916
- * we can do if the HTC ready message does not arrive in time.
1917
- * TODO: Make this op non mandatory by introducing a NULL check in the
1918
- * hif op wrapper.
1919
- */
1920
-static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
1921
- u8 pipe, int force)
1922
-{
1923
-}
1924
-
19252113 static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
19262114 .tx_sg = ath10k_sdio_hif_tx_sg,
19272115 .diag_read = ath10k_sdio_hif_diag_read,
....@@ -1929,9 +2117,10 @@
19292117 .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
19302118 .start = ath10k_sdio_hif_start,
19312119 .stop = ath10k_sdio_hif_stop,
2120
+ .start_post = ath10k_sdio_hif_start_post,
2121
+ .get_htt_tx_complete = ath10k_sdio_get_htt_tx_complete,
19322122 .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
19332123 .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
1934
- .send_complete_check = ath10k_sdio_hif_send_complete_check,
19352124 .power_up = ath10k_sdio_hif_power_up,
19362125 .power_down = ath10k_sdio_hif_power_down,
19372126 #ifdef CONFIG_PM
....@@ -1947,7 +2136,28 @@
19472136 */
19482137 static int ath10k_sdio_pm_suspend(struct device *device)
19492138 {
1950
- return 0;
2139
+ struct sdio_func *func = dev_to_sdio_func(device);
2140
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
2141
+ struct ath10k *ar = ar_sdio->ar;
2142
+ mmc_pm_flag_t pm_flag, pm_caps;
2143
+ int ret;
2144
+
2145
+ if (!device_may_wakeup(ar->dev))
2146
+ return 0;
2147
+
2148
+ ath10k_sdio_set_mbox_sleep(ar, true);
2149
+
2150
+ pm_flag = MMC_PM_KEEP_POWER;
2151
+
2152
+ ret = sdio_set_host_pm_flags(func, pm_flag);
2153
+ if (ret) {
2154
+ pm_caps = sdio_get_host_pm_caps(func);
2155
+ ath10k_warn(ar, "failed to set sdio host pm flags (0x%x, 0x%x): %d\n",
2156
+ pm_flag, pm_caps, ret);
2157
+ return ret;
2158
+ }
2159
+
2160
+ return ret;
19512161 }
19522162
19532163 static int ath10k_sdio_pm_resume(struct device *device)
....@@ -1966,13 +2176,345 @@
19662176
19672177 #endif /* CONFIG_PM_SLEEP */
19682178
2179
+static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
2180
+{
2181
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
2182
+ int done;
2183
+
2184
+ done = ath10k_htt_rx_hl_indication(ar, budget);
2185
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "napi poll: done: %d, budget:%d\n", done, budget);
2186
+
2187
+ if (done < budget)
2188
+ napi_complete_done(ctx, done);
2189
+
2190
+ return done;
2191
+}
2192
+
2193
+static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
2194
+ u32 item_offset,
2195
+ u32 *val)
2196
+{
2197
+ u32 addr;
2198
+ int ret;
2199
+
2200
+ addr = host_interest_item_address(item_offset);
2201
+
2202
+ ret = ath10k_sdio_diag_read32(ar, addr, val);
2203
+
2204
+ if (ret)
2205
+ ath10k_warn(ar, "unable to read host interest offset %d value\n",
2206
+ item_offset);
2207
+
2208
+ return ret;
2209
+}
2210
+
2211
+static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
2212
+ u32 buf_len)
2213
+{
2214
+ u32 val;
2215
+ int i, ret;
2216
+
2217
+ for (i = 0; i < buf_len; i += 4) {
2218
+ ret = ath10k_sdio_diag_read32(ar, address + i, &val);
2219
+ if (ret) {
2220
+ ath10k_warn(ar, "unable to read mem %d value\n", address + i);
2221
+ break;
2222
+ }
2223
+ memcpy(buf + i, &val, 4);
2224
+ }
2225
+
2226
+ return ret;
2227
+}
2228
+
2229
+static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
2230
+{
2231
+ u32 param;
2232
+
2233
+ ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
2234
+
2235
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
2236
+
2237
+ return param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW;
2238
+}
2239
+
2240
+static void ath10k_sdio_dump_registers(struct ath10k *ar,
2241
+ struct ath10k_fw_crash_data *crash_data,
2242
+ bool fast_dump)
2243
+{
2244
+ u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
2245
+ int i, ret;
2246
+ u32 reg_dump_area;
2247
+
2248
+ ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
2249
+ &reg_dump_area);
2250
+ if (ret) {
2251
+ ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
2252
+ return;
2253
+ }
2254
+
2255
+ if (fast_dump)
2256
+ ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
2257
+ sizeof(reg_dump_values));
2258
+ else
2259
+ ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
2260
+ sizeof(reg_dump_values));
2261
+
2262
+ if (ret) {
2263
+ ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
2264
+ return;
2265
+ }
2266
+
2267
+ ath10k_err(ar, "firmware register dump:\n");
2268
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
2269
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
2270
+ i,
2271
+ reg_dump_values[i],
2272
+ reg_dump_values[i + 1],
2273
+ reg_dump_values[i + 2],
2274
+ reg_dump_values[i + 3]);
2275
+
2276
+ if (!crash_data)
2277
+ return;
2278
+
2279
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
2280
+ crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
2281
+}
2282
+
2283
+static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
2284
+ const struct ath10k_mem_region *mem_region,
2285
+ u8 *buf, size_t buf_len)
2286
+{
2287
+ const struct ath10k_mem_section *cur_section, *next_section;
2288
+ unsigned int count, section_size, skip_size;
2289
+ int ret, i, j;
2290
+
2291
+ if (!mem_region || !buf)
2292
+ return 0;
2293
+
2294
+ cur_section = &mem_region->section_table.sections[0];
2295
+
2296
+ if (mem_region->start > cur_section->start) {
2297
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
2298
+ mem_region->start, cur_section->start);
2299
+ return 0;
2300
+ }
2301
+
2302
+ skip_size = cur_section->start - mem_region->start;
2303
+
2304
+ /* fill the gap between the first register section and register
2305
+ * start address
2306
+ */
2307
+ for (i = 0; i < skip_size; i++) {
2308
+ *buf = ATH10K_MAGIC_NOT_COPIED;
2309
+ buf++;
2310
+ }
2311
+
2312
+ count = 0;
2313
+
2314
+ for (i = 0; cur_section; i++) {
2315
+ section_size = cur_section->end - cur_section->start;
2316
+
2317
+ if (section_size <= 0) {
2318
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
2319
+ cur_section->start,
2320
+ cur_section->end);
2321
+ break;
2322
+ }
2323
+
2324
+ if ((i + 1) == mem_region->section_table.size) {
2325
+ /* last section */
2326
+ next_section = NULL;
2327
+ skip_size = 0;
2328
+ } else {
2329
+ next_section = cur_section + 1;
2330
+
2331
+ if (cur_section->end > next_section->start) {
2332
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
2333
+ next_section->start,
2334
+ cur_section->end);
2335
+ break;
2336
+ }
2337
+
2338
+ skip_size = next_section->start - cur_section->end;
2339
+ }
2340
+
2341
+ if (buf_len < (skip_size + section_size)) {
2342
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
2343
+ break;
2344
+ }
2345
+
2346
+ buf_len -= skip_size + section_size;
2347
+
2348
+ /* read section to dest memory */
2349
+ ret = ath10k_sdio_read_mem(ar, cur_section->start,
2350
+ buf, section_size);
2351
+ if (ret) {
2352
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
2353
+ cur_section->start, ret);
2354
+ break;
2355
+ }
2356
+
2357
+ buf += section_size;
2358
+ count += section_size;
2359
+
2360
+ /* fill in the gap between this section and the next */
2361
+ for (j = 0; j < skip_size; j++) {
2362
+ *buf = ATH10K_MAGIC_NOT_COPIED;
2363
+ buf++;
2364
+ }
2365
+
2366
+ count += skip_size;
2367
+
2368
+ if (!next_section)
2369
+ /* this was the last section */
2370
+ break;
2371
+
2372
+ cur_section = next_section;
2373
+ }
2374
+
2375
+ return count;
2376
+}
2377
+
2378
+/* if an error happened returns < 0, otherwise the length */
2379
+static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
2380
+ const struct ath10k_mem_region *current_region,
2381
+ u8 *buf,
2382
+ bool fast_dump)
2383
+{
2384
+ int ret;
2385
+
2386
+ if (current_region->section_table.size > 0)
2387
+ /* Copy each section individually. */
2388
+ return ath10k_sdio_dump_memory_section(ar,
2389
+ current_region,
2390
+ buf,
2391
+ current_region->len);
2392
+
2393
+ /* No individiual memory sections defined so we can
2394
+ * copy the entire memory region.
2395
+ */
2396
+ if (fast_dump)
2397
+ ret = ath10k_bmi_read_memory(ar,
2398
+ current_region->start,
2399
+ buf,
2400
+ current_region->len);
2401
+ else
2402
+ ret = ath10k_sdio_read_mem(ar,
2403
+ current_region->start,
2404
+ buf,
2405
+ current_region->len);
2406
+
2407
+ if (ret) {
2408
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
2409
+ current_region->name, ret);
2410
+ return ret;
2411
+ }
2412
+
2413
+ return current_region->len;
2414
+}
2415
+
2416
+static void ath10k_sdio_dump_memory(struct ath10k *ar,
2417
+ struct ath10k_fw_crash_data *crash_data,
2418
+ bool fast_dump)
2419
+{
2420
+ const struct ath10k_hw_mem_layout *mem_layout;
2421
+ const struct ath10k_mem_region *current_region;
2422
+ struct ath10k_dump_ram_data_hdr *hdr;
2423
+ u32 count;
2424
+ size_t buf_len;
2425
+ int ret, i;
2426
+ u8 *buf;
2427
+
2428
+ if (!crash_data)
2429
+ return;
2430
+
2431
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
2432
+ if (!mem_layout)
2433
+ return;
2434
+
2435
+ current_region = &mem_layout->region_table.regions[0];
2436
+
2437
+ buf = crash_data->ramdump_buf;
2438
+ buf_len = crash_data->ramdump_buf_len;
2439
+
2440
+ memset(buf, 0, buf_len);
2441
+
2442
+ for (i = 0; i < mem_layout->region_table.size; i++) {
2443
+ count = 0;
2444
+
2445
+ if (current_region->len > buf_len) {
2446
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
2447
+ current_region->name,
2448
+ current_region->len,
2449
+ buf_len);
2450
+ break;
2451
+ }
2452
+
2453
+ /* Reserve space for the header. */
2454
+ hdr = (void *)buf;
2455
+ buf += sizeof(*hdr);
2456
+ buf_len -= sizeof(*hdr);
2457
+
2458
+ ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
2459
+ fast_dump);
2460
+ if (ret >= 0)
2461
+ count = ret;
2462
+
2463
+ hdr->region_type = cpu_to_le32(current_region->type);
2464
+ hdr->start = cpu_to_le32(current_region->start);
2465
+ hdr->length = cpu_to_le32(count);
2466
+
2467
+ if (count == 0)
2468
+ /* Note: the header remains, just with zero length. */
2469
+ break;
2470
+
2471
+ buf += count;
2472
+ buf_len -= count;
2473
+
2474
+ current_region++;
2475
+ }
2476
+}
2477
+
2478
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
2479
+{
2480
+ struct ath10k_fw_crash_data *crash_data;
2481
+ char guid[UUID_STRING_LEN + 1];
2482
+ bool fast_dump;
2483
+
2484
+ fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
2485
+
2486
+ if (fast_dump)
2487
+ ath10k_bmi_start(ar);
2488
+
2489
+ ar->stats.fw_crash_counter++;
2490
+
2491
+ ath10k_sdio_disable_intrs(ar);
2492
+
2493
+ crash_data = ath10k_coredump_new(ar);
2494
+
2495
+ if (crash_data)
2496
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
2497
+ else
2498
+ scnprintf(guid, sizeof(guid), "n/a");
2499
+
2500
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
2501
+ ath10k_print_driver_info(ar);
2502
+ ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
2503
+ ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
2504
+
2505
+ ath10k_sdio_enable_intrs(ar);
2506
+
2507
+ queue_work(ar->workqueue, &ar->restart_work);
2508
+}
2509
+
19692510 static int ath10k_sdio_probe(struct sdio_func *func,
19702511 const struct sdio_device_id *id)
19712512 {
19722513 struct ath10k_sdio *ar_sdio;
19732514 struct ath10k *ar;
19742515 enum ath10k_hw_rev hw_rev;
1975
- u32 chip_id, dev_id_base;
2516
+ u32 dev_id_base;
2517
+ struct ath10k_bus_params bus_params = {};
19762518 int ret, i;
19772519
19782520 /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
....@@ -1990,6 +2532,9 @@
19902532 return -ENOMEM;
19912533 }
19922534
2535
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll,
2536
+ ATH10K_NAPI_BUDGET);
2537
+
19932538 ath10k_dbg(ar, ATH10K_DBG_BOOT,
19942539 "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
19952540 func->num, func->vendor, func->device,
....@@ -2005,6 +2550,12 @@
20052550 goto err_core_destroy;
20062551 }
20072552
2553
+ ar_sdio->vsg_buffer = devm_kmalloc(ar->dev, ATH10K_SDIO_VSG_BUF_SIZE, GFP_KERNEL);
2554
+ if (!ar_sdio->vsg_buffer) {
2555
+ ret = -ENOMEM;
2556
+ goto err_core_destroy;
2557
+ }
2558
+
20082559 ar_sdio->irq_data.irq_en_reg =
20092560 devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
20102561 GFP_KERNEL);
....@@ -2013,7 +2564,7 @@
20132564 goto err_core_destroy;
20142565 }
20152566
2016
- ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
2567
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_LARGE_CMDBUF_SIZE, GFP_KERNEL);
20172568 if (!ar_sdio->bmi_buf) {
20182569 ret = -ENOMEM;
20192570 goto err_core_destroy;
....@@ -2042,40 +2593,38 @@
20422593 for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
20432594 ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
20442595
2045
- dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
2046
- switch (dev_id_base) {
2047
- case QCA_MANUFACTURER_ID_AR6005_BASE:
2048
- case QCA_MANUFACTURER_ID_QCA9377_BASE:
2049
- ar->dev_id = QCA9377_1_0_DEVICE_ID;
2050
- break;
2051
- default:
2596
+ skb_queue_head_init(&ar_sdio->rx_head);
2597
+ INIT_WORK(&ar_sdio->async_work_rx, ath10k_rx_indication_async_work);
2598
+
2599
+ dev_id_base = (id->device & 0x0F00);
2600
+ if (dev_id_base != (SDIO_DEVICE_ID_ATHEROS_AR6005 & 0x0F00) &&
2601
+ dev_id_base != (SDIO_DEVICE_ID_ATHEROS_QCA9377 & 0x0F00)) {
20522602 ret = -ENODEV;
20532603 ath10k_err(ar, "unsupported device id %u (0x%x)\n",
20542604 dev_id_base, id->device);
20552605 goto err_free_wq;
20562606 }
20572607
2608
+ ar->dev_id = QCA9377_1_0_DEVICE_ID;
20582609 ar->id.vendor = id->vendor;
20592610 ar->id.device = id->device;
20602611
20612612 ath10k_sdio_set_mbox_info(ar);
20622613
2063
- ret = ath10k_sdio_config(ar);
2064
- if (ret) {
2065
- ath10k_err(ar, "failed to config sdio: %d\n", ret);
2066
- goto err_free_wq;
2067
- }
2068
-
2614
+ bus_params.dev_type = ATH10K_DEV_TYPE_HL;
20692615 /* TODO: don't know yet how to get chip_id with SDIO */
2070
- chip_id = 0;
2071
- ret = ath10k_core_register(ar, chip_id);
2616
+ bus_params.chip_id = 0;
2617
+ bus_params.hl_msdu_ids = true;
2618
+
2619
+ ar->hw->max_mtu = ETH_DATA_LEN;
2620
+
2621
+ ret = ath10k_core_register(ar, &bus_params);
20722622 if (ret) {
20732623 ath10k_err(ar, "failed to register driver core: %d\n", ret);
20742624 goto err_free_wq;
20752625 }
20762626
2077
- /* TODO: remove this once SDIO support is fully implemented */
2078
- ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
2627
+ timer_setup(&ar_sdio->sleep_timer, ath10k_sdio_sleep_timer_handler, 0);
20792628
20802629 return 0;
20812630
....@@ -2096,9 +2645,10 @@
20962645 "sdio removed func %d vendor 0x%x device 0x%x\n",
20972646 func->num, func->vendor, func->device);
20982647
2099
- (void)ath10k_sdio_hif_disable_intrs(ar);
2100
- cancel_work_sync(&ar_sdio->wr_async_work);
21012648 ath10k_core_unregister(ar);
2649
+
2650
+ netif_napi_del(&ar->napi);
2651
+
21022652 ath10k_core_destroy(ar);
21032653
21042654 flush_workqueue(ar_sdio->workqueue);
....@@ -2106,10 +2656,8 @@
21062656 }
21072657
21082658 static const struct sdio_device_id ath10k_sdio_devices[] = {
2109
- {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2110
- (QCA_SDIO_ID_AR6005_BASE | 0xA))},
2111
- {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
2112
- (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
2659
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_AR6005)},
2660
+ {SDIO_DEVICE(SDIO_VENDOR_ID_ATHEROS, SDIO_DEVICE_ID_ATHEROS_QCA9377)},
21132661 {},
21142662 };
21152663
....@@ -2120,7 +2668,10 @@
21202668 .id_table = ath10k_sdio_devices,
21212669 .probe = ath10k_sdio_probe,
21222670 .remove = ath10k_sdio_remove,
2123
- .drv.pm = ATH10K_SDIO_PM_OPS,
2671
+ .drv = {
2672
+ .owner = THIS_MODULE,
2673
+ .pm = ATH10K_SDIO_PM_OPS,
2674
+ },
21242675 };
21252676
21262677 static int __init ath10k_sdio_init(void)