forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-09 95099d4622f8cb224d94e314c7a8e0df60b13f87
kernel/drivers/bluetooth/hci_qca.c
....@@ -1,3 +1,4 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Bluetooth Software UART Qualcomm protocol
34 *
....@@ -12,34 +13,25 @@
1213 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
1314 * which was in turn based on hci_h4.c, which was written
1415 * by Maxim Krasnyansky and Marcel Holtmann.
15
- *
16
- * This program is free software; you can redistribute it and/or modify
17
- * it under the terms of the GNU General Public License version 2
18
- * as published by the Free Software Foundation
19
- *
20
- * This program is distributed in the hope that it will be useful,
21
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
22
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23
- * GNU General Public License for more details.
24
- *
25
- * You should have received a copy of the GNU General Public License
26
- * along with this program; if not, write to the Free Software
27
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28
- *
2916 */
3017
3118 #include <linux/kernel.h>
3219 #include <linux/clk.h>
20
+#include <linux/completion.h>
3321 #include <linux/debugfs.h>
3422 #include <linux/delay.h>
23
+#include <linux/devcoredump.h>
3524 #include <linux/device.h>
3625 #include <linux/gpio/consumer.h>
3726 #include <linux/mod_devicetable.h>
3827 #include <linux/module.h>
3928 #include <linux/of_device.h>
29
+#include <linux/acpi.h>
4030 #include <linux/platform_device.h>
4131 #include <linux/regulator/consumer.h>
4232 #include <linux/serdev.h>
33
+#include <linux/mutex.h>
34
+#include <asm/unaligned.h>
4335
4436 #include <net/bluetooth/bluetooth.h>
4537 #include <net/bluetooth/hci_core.h>
....@@ -53,15 +45,41 @@
5345 #define HCI_IBS_WAKE_ACK 0xFC
5446 #define HCI_MAX_IBS_SIZE 10
5547
56
-/* Controller states */
57
-#define STATE_IN_BAND_SLEEP_ENABLED 1
58
-
5948 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
60
-#define IBS_TX_IDLE_TIMEOUT_MS 2000
61
-#define BAUDRATE_SETTLE_TIMEOUT_MS 300
49
+#define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200
50
+#define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000
51
+#define CMD_TRANS_TIMEOUT_MS 100
52
+#define MEMDUMP_TIMEOUT_MS 8000
6253
6354 /* susclk rate */
6455 #define SUSCLK_RATE_32KHZ 32768
56
+
57
+/* Controller debug log header */
58
+#define QCA_DEBUG_HANDLE 0x2EDC
59
+
60
+/* max retry count when init fails */
61
+#define MAX_INIT_RETRIES 3
62
+
63
+/* Controller dump header */
64
+#define QCA_SSR_DUMP_HANDLE 0x0108
65
+#define QCA_DUMP_PACKET_SIZE 255
66
+#define QCA_LAST_SEQUENCE_NUM 0xFFFF
67
+#define QCA_CRASHBYTE_PACKET_LEN 1096
68
+#define QCA_MEMDUMP_BYTE 0xFB
69
+
70
+enum qca_flags {
71
+ QCA_IBS_ENABLED,
72
+ QCA_DROP_VENDOR_EVENT,
73
+ QCA_SUSPENDING,
74
+ QCA_MEMDUMP_COLLECTION,
75
+ QCA_HW_ERROR_EVENT,
76
+ QCA_SSR_TRIGGERED
77
+};
78
+
79
+enum qca_capabilities {
80
+ QCA_CAP_WIDEBAND_SPEECH = BIT(0),
81
+ QCA_CAP_VALID_LE_STATES = BIT(1),
82
+};
6583
6684 /* HCI_IBS transmit side sleep protocol states */
6785 enum tx_ibs_states {
....@@ -85,11 +103,41 @@
85103 HCI_IBS_RX_VOTE_CLOCK_OFF,
86104 };
87105
106
+/* Controller memory dump states */
107
+enum qca_memdump_states {
108
+ QCA_MEMDUMP_IDLE,
109
+ QCA_MEMDUMP_COLLECTING,
110
+ QCA_MEMDUMP_COLLECTED,
111
+ QCA_MEMDUMP_TIMEOUT,
112
+};
113
+
114
+struct qca_memdump_data {
115
+ char *memdump_buf_head;
116
+ char *memdump_buf_tail;
117
+ u32 current_seq_no;
118
+ u32 received_dump;
119
+ u32 ram_dump_size;
120
+};
121
+
122
+struct qca_memdump_event_hdr {
123
+ __u8 evt;
124
+ __u8 plen;
125
+ __u16 opcode;
126
+ __u16 seq_no;
127
+ __u8 reserved;
128
+} __packed;
129
+
130
+
131
+struct qca_dump_size {
132
+ u32 dump_size;
133
+} __packed;
134
+
88135 struct qca_data {
89136 struct hci_uart *hu;
90137 struct sk_buff *rx_skb;
91138 struct sk_buff_head txq;
92139 struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */
140
+ struct sk_buff_head rx_memdump_q; /* Memdump wait queue */
93141 spinlock_t hci_ibs_lock; /* HCI_IBS state lock */
94142 u8 tx_ibs_state; /* HCI_IBS transmit side power state*/
95143 u8 rx_ibs_state; /* HCI_IBS receive side power state */
....@@ -104,7 +152,14 @@
104152 struct work_struct ws_awake_device;
105153 struct work_struct ws_rx_vote_off;
106154 struct work_struct ws_tx_vote_off;
155
+ struct work_struct ctrl_memdump_evt;
156
+ struct delayed_work ctrl_memdump_timeout;
157
+ struct qca_memdump_data *qca_memdump;
107158 unsigned long flags;
159
+ struct completion drop_ev_comp;
160
+ wait_queue_head_t suspend_wait_q;
161
+ enum qca_memdump_states memdump_state;
162
+ struct mutex hci_memdump_lock;
108163
109164 /* For debugging purpose */
110165 u64 ibs_sent_wacks;
....@@ -135,15 +190,14 @@
135190 */
136191 struct qca_vreg {
137192 const char *name;
138
- unsigned int min_uV;
139
- unsigned int max_uV;
140193 unsigned int load_uA;
141194 };
142195
143
-struct qca_vreg_data {
196
+struct qca_device_data {
144197 enum qca_btsoc_type soc_type;
145198 struct qca_vreg *vregs;
146199 size_t num_vregs;
200
+ uint32_t capabilities;
147201 };
148202
149203 /*
....@@ -151,8 +205,8 @@
151205 */
152206 struct qca_power {
153207 struct device *dev;
154
- const struct qca_vreg_data *vreg_data;
155208 struct regulator_bulk_data *vreg_bulk;
209
+ int num_vregs;
156210 bool vregs_on;
157211 };
158212
....@@ -164,10 +218,40 @@
164218 struct qca_power *bt_power;
165219 u32 init_speed;
166220 u32 oper_speed;
221
+ const char *firmware_name;
167222 };
168223
169
-static int qca_power_setup(struct hci_uart *hu, bool on);
224
+static int qca_regulator_enable(struct qca_serdev *qcadev);
225
+static void qca_regulator_disable(struct qca_serdev *qcadev);
170226 static void qca_power_shutdown(struct hci_uart *hu);
227
+static int qca_power_off(struct hci_dev *hdev);
228
+static void qca_controller_memdump(struct work_struct *work);
229
+
230
+static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu)
231
+{
232
+ enum qca_btsoc_type soc_type;
233
+
234
+ if (hu->serdev) {
235
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
236
+
237
+ soc_type = qsd->btsoc_type;
238
+ } else {
239
+ soc_type = QCA_ROME;
240
+ }
241
+
242
+ return soc_type;
243
+}
244
+
245
+static const char *qca_get_firmware_name(struct hci_uart *hu)
246
+{
247
+ if (hu->serdev) {
248
+ struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev);
249
+
250
+ return qsd->firmware_name;
251
+ } else {
252
+ return NULL;
253
+ }
254
+}
171255
172256 static void __serial_clock_on(struct tty_struct *tty)
173257 {
....@@ -207,31 +291,29 @@
207291 case HCI_IBS_TX_VOTE_CLOCK_ON:
208292 qca->tx_vote = true;
209293 qca->tx_votes_on++;
210
- new_vote = true;
211294 break;
212295
213296 case HCI_IBS_RX_VOTE_CLOCK_ON:
214297 qca->rx_vote = true;
215298 qca->rx_votes_on++;
216
- new_vote = true;
217299 break;
218300
219301 case HCI_IBS_TX_VOTE_CLOCK_OFF:
220302 qca->tx_vote = false;
221303 qca->tx_votes_off++;
222
- new_vote = qca->rx_vote | qca->tx_vote;
223304 break;
224305
225306 case HCI_IBS_RX_VOTE_CLOCK_OFF:
226307 qca->rx_vote = false;
227308 qca->rx_votes_off++;
228
- new_vote = qca->rx_vote | qca->tx_vote;
229309 break;
230310
231311 default:
232312 BT_ERR("Voting irregularity");
233313 return;
234314 }
315
+
316
+ new_vote = qca->rx_vote | qca->tx_vote;
235317
236318 if (new_vote != old_vote) {
237319 if (new_vote)
....@@ -286,13 +368,14 @@
286368 ws_awake_device);
287369 struct hci_uart *hu = qca->hu;
288370 unsigned long retrans_delay;
371
+ unsigned long flags;
289372
290373 BT_DBG("hu %p wq awake device", hu);
291374
292375 /* Vote for serial clock */
293376 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
294377
295
- spin_lock(&qca->hci_ibs_lock);
378
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
296379
297380 /* Send wake indication to device */
298381 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
....@@ -304,7 +387,7 @@
304387 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
305388 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
306389
307
- spin_unlock(&qca->hci_ibs_lock);
390
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
308391
309392 /* Actually send the packets */
310393 hci_uart_tx_wakeup(hu);
....@@ -315,12 +398,13 @@
315398 struct qca_data *qca = container_of(work, struct qca_data,
316399 ws_awake_rx);
317400 struct hci_uart *hu = qca->hu;
401
+ unsigned long flags;
318402
319403 BT_DBG("hu %p wq awake rx", hu);
320404
321405 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
322406
323
- spin_lock(&qca->hci_ibs_lock);
407
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
324408 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
325409
326410 /* Always acknowledge device wake up,
....@@ -331,7 +415,7 @@
331415
332416 qca->ibs_sent_wacks++;
333417
334
- spin_unlock(&qca->hci_ibs_lock);
418
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
335419
336420 /* Actually send the packets */
337421 hci_uart_tx_wakeup(hu);
....@@ -390,8 +474,6 @@
390474
391475 case HCI_IBS_TX_ASLEEP:
392476 case HCI_IBS_TX_WAKING:
393
- /* Fall through */
394
-
395477 default:
396478 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
397479 break;
....@@ -413,6 +495,12 @@
413495 spin_lock_irqsave_nested(&qca->hci_ibs_lock,
414496 flags, SINGLE_DEPTH_NESTING);
415497
498
+ /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */
499
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
500
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
501
+ return;
502
+ }
503
+
416504 switch (qca->tx_ibs_state) {
417505 case HCI_IBS_TX_WAKING:
418506 /* No WAKE_ACK, retransmit WAKE */
....@@ -428,8 +516,6 @@
428516
429517 case HCI_IBS_TX_ASLEEP:
430518 case HCI_IBS_TX_AWAKE:
431
- /* Fall through */
432
-
433519 default:
434520 BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state);
435521 break;
....@@ -441,12 +527,33 @@
441527 hci_uart_tx_wakeup(hu);
442528 }
443529
530
+
531
+static void qca_controller_memdump_timeout(struct work_struct *work)
532
+{
533
+ struct qca_data *qca = container_of(work, struct qca_data,
534
+ ctrl_memdump_timeout.work);
535
+ struct hci_uart *hu = qca->hu;
536
+
537
+ mutex_lock(&qca->hci_memdump_lock);
538
+ if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
539
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
540
+ if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
541
+ /* Inject hw error event to reset the device
542
+ * and driver.
543
+ */
544
+ hci_reset_dev(hu->hdev);
545
+ }
546
+ }
547
+
548
+ mutex_unlock(&qca->hci_memdump_lock);
549
+}
550
+
551
+
444552 /* Initialize protocol */
445553 static int qca_open(struct hci_uart *hu)
446554 {
447555 struct qca_serdev *qcadev;
448556 struct qca_data *qca;
449
- int ret;
450557
451558 BT_DBG("hu %p qca_open", hu);
452559
....@@ -459,7 +566,9 @@
459566
460567 skb_queue_head_init(&qca->txq);
461568 skb_queue_head_init(&qca->tx_wait_q);
569
+ skb_queue_head_init(&qca->rx_memdump_q);
462570 spin_lock_init(&qca->hci_ibs_lock);
571
+ mutex_init(&qca->hci_memdump_lock);
463572 qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
464573 if (!qca->workqueue) {
465574 BT_ERR("QCA Workqueue not initialized properly");
....@@ -471,63 +580,37 @@
471580 INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device);
472581 INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
473582 INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
583
+ INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
584
+ INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
585
+ qca_controller_memdump_timeout);
586
+ init_waitqueue_head(&qca->suspend_wait_q);
474587
475588 qca->hu = hu;
589
+ init_completion(&qca->drop_ev_comp);
476590
477591 /* Assume we start with both sides asleep -- extra wakes OK */
478592 qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
479593 qca->rx_ibs_state = HCI_IBS_RX_ASLEEP;
480594
481
- /* clocks actually on, but we start votes off */
482
- qca->tx_vote = false;
483
- qca->rx_vote = false;
484
- qca->flags = 0;
485
-
486
- qca->ibs_sent_wacks = 0;
487
- qca->ibs_sent_slps = 0;
488
- qca->ibs_sent_wakes = 0;
489
- qca->ibs_recv_wacks = 0;
490
- qca->ibs_recv_slps = 0;
491
- qca->ibs_recv_wakes = 0;
492595 qca->vote_last_jif = jiffies;
493
- qca->vote_on_ms = 0;
494
- qca->vote_off_ms = 0;
495
- qca->votes_on = 0;
496
- qca->votes_off = 0;
497
- qca->tx_votes_on = 0;
498
- qca->tx_votes_off = 0;
499
- qca->rx_votes_on = 0;
500
- qca->rx_votes_off = 0;
501596
502597 hu->priv = qca;
503598
504599 if (hu->serdev) {
505
- serdev_device_open(hu->serdev);
506
-
507600 qcadev = serdev_device_get_drvdata(hu->serdev);
508
- if (qcadev->btsoc_type != QCA_WCN3990) {
509
- gpiod_set_value_cansleep(qcadev->bt_en, 1);
510
- /* Controller needs time to bootup. */
511
- msleep(150);
512
- } else {
601
+
602
+ if (qca_is_wcn399x(qcadev->btsoc_type))
513603 hu->init_speed = qcadev->init_speed;
604
+
605
+ if (qcadev->oper_speed)
514606 hu->oper_speed = qcadev->oper_speed;
515
- ret = qca_power_setup(hu, true);
516
- if (ret) {
517
- destroy_workqueue(qca->workqueue);
518
- kfree_skb(qca->rx_skb);
519
- hu->priv = NULL;
520
- kfree(qca);
521
- return ret;
522
- }
523
- }
524607 }
525608
526609 timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0);
527610 qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS;
528611
529612 timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
530
- qca->tx_idle_delay = IBS_TX_IDLE_TIMEOUT_MS;
613
+ qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
531614
532615 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
533616 qca->tx_idle_delay, qca->wake_retrans);
....@@ -597,7 +680,6 @@
597680 /* Close protocol */
598681 static int qca_close(struct hci_uart *hu)
599682 {
600
- struct qca_serdev *qcadev;
601683 struct qca_data *qca = hu->priv;
602684
603685 BT_DBG("hu %p qca close", hu);
....@@ -606,20 +688,11 @@
606688
607689 skb_queue_purge(&qca->tx_wait_q);
608690 skb_queue_purge(&qca->txq);
609
- del_timer(&qca->tx_idle_timer);
610
- del_timer(&qca->wake_retrans_timer);
691
+ skb_queue_purge(&qca->rx_memdump_q);
611692 destroy_workqueue(qca->workqueue);
693
+ del_timer_sync(&qca->tx_idle_timer);
694
+ del_timer_sync(&qca->wake_retrans_timer);
612695 qca->hu = NULL;
613
-
614
- if (hu->serdev) {
615
- qcadev = serdev_device_get_drvdata(hu->serdev);
616
- if (qcadev->btsoc_type == QCA_WCN3990)
617
- qca_power_shutdown(hu);
618
- else
619
- gpiod_set_value_cansleep(qcadev->bt_en, 0);
620
-
621
- serdev_device_close(hu->serdev);
622
- }
623696
624697 kfree_skb(qca->rx_skb);
625698
....@@ -642,6 +715,12 @@
642715 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
643716
644717 qca->ibs_recv_wakes++;
718
+
719
+ /* Don't wake the rx up when suspending. */
720
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
721
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
722
+ return;
723
+ }
645724
646725 switch (qca->rx_ibs_state) {
647726 case HCI_IBS_RX_ASLEEP:
....@@ -683,7 +762,7 @@
683762 unsigned long flags;
684763 struct qca_data *qca = hu->priv;
685764
686
- BT_DBG("hu %p want to sleep", hu);
765
+ BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
687766
688767 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
689768
....@@ -698,7 +777,7 @@
698777 break;
699778
700779 case HCI_IBS_RX_ASLEEP:
701
- /* Fall through */
780
+ break;
702781
703782 default:
704783 /* Any other state is illegal */
....@@ -706,6 +785,8 @@
706785 qca->rx_ibs_state);
707786 break;
708787 }
788
+
789
+ wake_up_interruptible(&qca->suspend_wait_q);
709790
710791 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
711792 }
....@@ -723,6 +804,12 @@
723804 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
724805
725806 qca->ibs_recv_wacks++;
807
+
808
+ /* Don't react to the wake-up-acknowledgment when suspending. */
809
+ if (test_bit(QCA_SUSPENDING, &qca->flags)) {
810
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
811
+ return;
812
+ }
726813
727814 switch (qca->tx_ibs_state) {
728815 case HCI_IBS_TX_AWAKE:
....@@ -744,8 +831,6 @@
744831 break;
745832
746833 case HCI_IBS_TX_ASLEEP:
747
- /* Fall through */
748
-
749834 default:
750835 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
751836 qca->tx_ibs_state);
....@@ -769,18 +854,28 @@
769854 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb,
770855 qca->tx_ibs_state);
771856
772
- /* Prepend skb with frame type */
773
- memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
774
-
775
- /* Don't go to sleep in middle of patch download or
776
- * Out-Of-Band(GPIOs control) sleep is selected.
777
- */
778
- if (!test_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags)) {
779
- skb_queue_tail(&qca->txq, skb);
857
+ if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) {
858
+ /* As SSR is in progress, ignore the packets */
859
+ bt_dev_dbg(hu->hdev, "SSR is in progress");
860
+ kfree_skb(skb);
780861 return 0;
781862 }
782863
864
+ /* Prepend skb with frame type */
865
+ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
866
+
783867 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
868
+
869
+ /* Don't go to sleep in middle of patch download or
870
+ * Out-Of-Band(GPIOs control) sleep is selected.
871
+ * Don't wake the device up when suspending.
872
+ */
873
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags) ||
874
+ test_bit(QCA_SUSPENDING, &qca->flags)) {
875
+ skb_queue_tail(&qca->txq, skb);
876
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
877
+ return 0;
878
+ }
784879
785880 /* Act according to current state */
786881 switch (qca->tx_ibs_state) {
....@@ -855,6 +950,232 @@
855950 return 0;
856951 }
857952
953
+static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb)
954
+{
955
+ /* We receive debug logs from chip as an ACL packets.
956
+ * Instead of sending the data to ACL to decode the
957
+ * received data, we are pushing them to the above layers
958
+ * as a diagnostic packet.
959
+ */
960
+ if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE)
961
+ return hci_recv_diag(hdev, skb);
962
+
963
+ return hci_recv_frame(hdev, skb);
964
+}
965
+
966
+static void qca_controller_memdump(struct work_struct *work)
967
+{
968
+ struct qca_data *qca = container_of(work, struct qca_data,
969
+ ctrl_memdump_evt);
970
+ struct hci_uart *hu = qca->hu;
971
+ struct sk_buff *skb;
972
+ struct qca_memdump_event_hdr *cmd_hdr;
973
+ struct qca_memdump_data *qca_memdump = qca->qca_memdump;
974
+ struct qca_dump_size *dump;
975
+ char *memdump_buf;
976
+ char nullBuff[QCA_DUMP_PACKET_SIZE] = { 0 };
977
+ u16 seq_no;
978
+ u32 dump_size;
979
+ u32 rx_size;
980
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
981
+
982
+ while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
983
+
984
+ mutex_lock(&qca->hci_memdump_lock);
985
+ /* Skip processing the received packets if timeout detected
986
+ * or memdump collection completed.
987
+ */
988
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
989
+ qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
990
+ mutex_unlock(&qca->hci_memdump_lock);
991
+ return;
992
+ }
993
+
994
+ if (!qca_memdump) {
995
+ qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
996
+ GFP_ATOMIC);
997
+ if (!qca_memdump) {
998
+ mutex_unlock(&qca->hci_memdump_lock);
999
+ return;
1000
+ }
1001
+
1002
+ qca->qca_memdump = qca_memdump;
1003
+ }
1004
+
1005
+ qca->memdump_state = QCA_MEMDUMP_COLLECTING;
1006
+ cmd_hdr = (void *) skb->data;
1007
+ seq_no = __le16_to_cpu(cmd_hdr->seq_no);
1008
+ skb_pull(skb, sizeof(struct qca_memdump_event_hdr));
1009
+
1010
+ if (!seq_no) {
1011
+
1012
+ /* This is the first frame of memdump packet from
1013
+ * the controller, Disable IBS to recevie dump
1014
+ * with out any interruption, ideally time required for
1015
+ * the controller to send the dump is 8 seconds. let us
1016
+ * start timer to handle this asynchronous activity.
1017
+ */
1018
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
1019
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1020
+ dump = (void *) skb->data;
1021
+ dump_size = __le32_to_cpu(dump->dump_size);
1022
+ if (!(dump_size)) {
1023
+ bt_dev_err(hu->hdev, "Rx invalid memdump size");
1024
+ kfree(qca_memdump);
1025
+ kfree_skb(skb);
1026
+ qca->qca_memdump = NULL;
1027
+ mutex_unlock(&qca->hci_memdump_lock);
1028
+ return;
1029
+ }
1030
+
1031
+ bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
1032
+ dump_size);
1033
+ queue_delayed_work(qca->workqueue,
1034
+ &qca->ctrl_memdump_timeout,
1035
+ msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)
1036
+ );
1037
+
1038
+ skb_pull(skb, sizeof(dump_size));
1039
+ memdump_buf = vmalloc(dump_size);
1040
+ qca_memdump->ram_dump_size = dump_size;
1041
+ qca_memdump->memdump_buf_head = memdump_buf;
1042
+ qca_memdump->memdump_buf_tail = memdump_buf;
1043
+ }
1044
+
1045
+ memdump_buf = qca_memdump->memdump_buf_tail;
1046
+
1047
+ /* If sequence no 0 is missed then there is no point in
1048
+ * accepting the other sequences.
1049
+ */
1050
+ if (!memdump_buf) {
1051
+ bt_dev_err(hu->hdev, "QCA: Discarding other packets");
1052
+ kfree(qca_memdump);
1053
+ kfree_skb(skb);
1054
+ qca->qca_memdump = NULL;
1055
+ mutex_unlock(&qca->hci_memdump_lock);
1056
+ return;
1057
+ }
1058
+
1059
+ /* There could be chance of missing some packets from
1060
+ * the controller. In such cases let us store the dummy
1061
+ * packets in the buffer.
1062
+ */
1063
+ /* For QCA6390, controller does not lost packets but
1064
+ * sequence number field of packat sometimes has error
1065
+ * bits, so skip this checking for missing packet.
1066
+ */
1067
+ while ((seq_no > qca_memdump->current_seq_no + 1) &&
1068
+ (soc_type != QCA_QCA6390) &&
1069
+ seq_no != QCA_LAST_SEQUENCE_NUM) {
1070
+ bt_dev_err(hu->hdev, "QCA controller missed packet:%d",
1071
+ qca_memdump->current_seq_no);
1072
+ rx_size = qca_memdump->received_dump;
1073
+ rx_size += QCA_DUMP_PACKET_SIZE;
1074
+ if (rx_size > qca_memdump->ram_dump_size) {
1075
+ bt_dev_err(hu->hdev,
1076
+ "QCA memdump received %d, no space for missed packet",
1077
+ qca_memdump->received_dump);
1078
+ break;
1079
+ }
1080
+ memcpy(memdump_buf, nullBuff, QCA_DUMP_PACKET_SIZE);
1081
+ memdump_buf = memdump_buf + QCA_DUMP_PACKET_SIZE;
1082
+ qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE;
1083
+ qca_memdump->current_seq_no++;
1084
+ }
1085
+
1086
+ rx_size = qca_memdump->received_dump + skb->len;
1087
+ if (rx_size <= qca_memdump->ram_dump_size) {
1088
+ if ((seq_no != QCA_LAST_SEQUENCE_NUM) &&
1089
+ (seq_no != qca_memdump->current_seq_no))
1090
+ bt_dev_err(hu->hdev,
1091
+ "QCA memdump unexpected packet %d",
1092
+ seq_no);
1093
+ bt_dev_dbg(hu->hdev,
1094
+ "QCA memdump packet %d with length %d",
1095
+ seq_no, skb->len);
1096
+ memcpy(memdump_buf, (unsigned char *)skb->data,
1097
+ skb->len);
1098
+ memdump_buf = memdump_buf + skb->len;
1099
+ qca_memdump->memdump_buf_tail = memdump_buf;
1100
+ qca_memdump->current_seq_no = seq_no + 1;
1101
+ qca_memdump->received_dump += skb->len;
1102
+ } else {
1103
+ bt_dev_err(hu->hdev,
1104
+ "QCA memdump received %d, no space for packet %d",
1105
+ qca_memdump->received_dump, seq_no);
1106
+ }
1107
+ qca->qca_memdump = qca_memdump;
1108
+ kfree_skb(skb);
1109
+ if (seq_no == QCA_LAST_SEQUENCE_NUM) {
1110
+ bt_dev_info(hu->hdev,
1111
+ "QCA memdump Done, received %d, total %d",
1112
+ qca_memdump->received_dump,
1113
+ qca_memdump->ram_dump_size);
1114
+ memdump_buf = qca_memdump->memdump_buf_head;
1115
+ dev_coredumpv(&hu->serdev->dev, memdump_buf,
1116
+ qca_memdump->received_dump, GFP_KERNEL);
1117
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
1118
+ kfree(qca->qca_memdump);
1119
+ qca->qca_memdump = NULL;
1120
+ qca->memdump_state = QCA_MEMDUMP_COLLECTED;
1121
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1122
+ }
1123
+
1124
+ mutex_unlock(&qca->hci_memdump_lock);
1125
+ }
1126
+
1127
+}
1128
+
1129
+static int qca_controller_memdump_event(struct hci_dev *hdev,
1130
+ struct sk_buff *skb)
1131
+{
1132
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1133
+ struct qca_data *qca = hu->priv;
1134
+
1135
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1136
+ skb_queue_tail(&qca->rx_memdump_q, skb);
1137
+ queue_work(qca->workqueue, &qca->ctrl_memdump_evt);
1138
+
1139
+ return 0;
1140
+}
1141
+
1142
+static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
1143
+{
1144
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1145
+ struct qca_data *qca = hu->priv;
1146
+
1147
+ if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) {
1148
+ struct hci_event_hdr *hdr = (void *)skb->data;
1149
+
1150
+ /* For the WCN3990 the vendor command for a baudrate change
1151
+ * isn't sent as synchronous HCI command, because the
1152
+ * controller sends the corresponding vendor event with the
1153
+ * new baudrate. The event is received and properly decoded
1154
+ * after changing the baudrate of the host port. It needs to
1155
+ * be dropped, otherwise it can be misinterpreted as
1156
+ * response to a later firmware download command (also a
1157
+ * vendor command).
1158
+ */
1159
+
1160
+ if (hdr->evt == HCI_EV_VENDOR)
1161
+ complete(&qca->drop_ev_comp);
1162
+
1163
+ kfree_skb(skb);
1164
+
1165
+ return 0;
1166
+ }
1167
+ /* We receive chip memory dump as an event packet, With a dedicated
1168
+ * handler followed by a hardware error event. When this event is
1169
+ * received we store dump into a file before closing hci. This
1170
+ * dump will help in triaging the issues.
1171
+ */
1172
+ if ((skb->data[0] == HCI_VENDOR_PKT) &&
1173
+ (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE))
1174
+ return qca_controller_memdump_event(hdev, skb);
1175
+
1176
+ return hci_recv_frame(hdev, skb);
1177
+}
1178
+
8581179 #define QCA_IBS_SLEEP_IND_EVENT \
8591180 .type = HCI_IBS_SLEEP_IND, \
8601181 .hlen = 0, \
....@@ -877,9 +1198,9 @@
8771198 .maxlen = HCI_MAX_IBS_SIZE
8781199
8791200 static const struct h4_recv_pkt qca_recv_pkts[] = {
880
- { H4_RECV_ACL, .recv = hci_recv_frame },
1201
+ { H4_RECV_ACL, .recv = qca_recv_acl_data },
8811202 { H4_RECV_SCO, .recv = hci_recv_frame },
882
- { H4_RECV_EVENT, .recv = hci_recv_frame },
1203
+ { H4_RECV_EVENT, .recv = qca_recv_event },
8831204 { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind },
8841205 { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack },
8851206 { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind },
....@@ -952,7 +1273,6 @@
9521273 struct hci_uart *hu = hci_get_drvdata(hdev);
9531274 struct qca_data *qca = hu->priv;
9541275 struct sk_buff *skb;
955
- struct qca_serdev *qcadev;
9561276 u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
9571277
9581278 if (baudrate > QCA_BAUDRATE_3200000)
....@@ -966,13 +1286,6 @@
9661286 return -ENOMEM;
9671287 }
9681288
969
- /* Disabling hardware flow control is mandatory while
970
- * sending change baudrate request to wcn3990 SoC.
971
- */
972
- qcadev = serdev_device_get_drvdata(hu->serdev);
973
- if (qcadev->btsoc_type == QCA_WCN3990)
974
- hci_uart_set_flow_control(hu, true);
975
-
9761289 /* Assign commands to change baudrate and packet type. */
9771290 skb_put_data(skb, cmd, sizeof(cmd));
9781291 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
....@@ -980,16 +1293,20 @@
9801293 skb_queue_tail(&qca->txq, skb);
9811294 hci_uart_tx_wakeup(hu);
9821295
983
- /* wait 300ms to change new baudrate on controller side
984
- * controller will come back after they receive this HCI command
985
- * then host can communicate with new baudrate to controller
986
- */
987
- set_current_state(TASK_UNINTERRUPTIBLE);
988
- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
989
- set_current_state(TASK_RUNNING);
1296
+ /* Wait for the baudrate change request to be sent */
9901297
991
- if (qcadev->btsoc_type == QCA_WCN3990)
992
- hci_uart_set_flow_control(hu, false);
1298
+ while (!skb_queue_empty(&qca->txq))
1299
+ usleep_range(100, 200);
1300
+
1301
+ if (hu->serdev)
1302
+ serdev_device_wait_until_sent(hu->serdev,
1303
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
1304
+
1305
+ /* Give the controller time to process the request */
1306
+ if (qca_is_wcn399x(qca_soc_type(hu)))
1307
+ msleep(10);
1308
+ else
1309
+ msleep(300);
9931310
9941311 return 0;
9951312 }
....@@ -1002,11 +1319,11 @@
10021319 hci_uart_set_baudrate(hu, speed);
10031320 }
10041321
1005
-static int qca_send_power_pulse(struct hci_dev *hdev, u8 cmd)
1322
+static int qca_send_power_pulse(struct hci_uart *hu, bool on)
10061323 {
1007
- struct hci_uart *hu = hci_get_drvdata(hdev);
1008
- struct qca_data *qca = hu->priv;
1009
- struct sk_buff *skb;
1324
+ int ret;
1325
+ int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
1326
+ u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE;
10101327
10111328 /* These power pulses are single byte command which are sent
10121329 * at required baudrate to wcn3990. On wcn3990, we have an external
....@@ -1018,23 +1335,24 @@
10181335 * save power. Disabling hardware flow control is mandatory while
10191336 * sending power pulses to SoC.
10201337 */
1021
- bt_dev_dbg(hdev, "sending power pulse %02x to SoC", cmd);
1338
+ bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd);
10221339
1023
- skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
1024
- if (!skb)
1025
- return -ENOMEM;
1026
-
1340
+ serdev_device_write_flush(hu->serdev);
10271341 hci_uart_set_flow_control(hu, true);
1342
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
1343
+ if (ret < 0) {
1344
+ bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd);
1345
+ return ret;
1346
+ }
10281347
1029
- skb_put_u8(skb, cmd);
1030
- hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1031
-
1032
- skb_queue_tail(&qca->txq, skb);
1033
- hci_uart_tx_wakeup(hu);
1034
-
1035
- /* Wait for 100 uS for SoC to settle down */
1036
- usleep_range(100, 200);
1348
+ serdev_device_wait_until_sent(hu->serdev, timeout);
10371349 hci_uart_set_flow_control(hu, false);
1350
+
1351
+ /* Give to controller time to boot/shutdown */
1352
+ if (on)
1353
+ msleep(100);
1354
+ else
1355
+ msleep(10);
10381356
10391357 return 0;
10401358 }
....@@ -1061,10 +1379,7 @@
10611379
10621380 static int qca_check_speeds(struct hci_uart *hu)
10631381 {
1064
- struct qca_serdev *qcadev;
1065
-
1066
- qcadev = serdev_device_get_drvdata(hu->serdev);
1067
- if (qcadev->btsoc_type == QCA_WCN3990) {
1382
+ if (qca_is_wcn399x(qca_soc_type(hu))) {
10681383 if (!qca_get_speed(hu, QCA_INIT_SPEED) &&
10691384 !qca_get_speed(hu, QCA_OPER_SPEED))
10701385 return -EINVAL;
....@@ -1080,47 +1395,209 @@
10801395 static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type)
10811396 {
10821397 unsigned int speed, qca_baudrate;
1083
- int ret;
1398
+ struct qca_data *qca = hu->priv;
1399
+ int ret = 0;
10841400
10851401 if (speed_type == QCA_INIT_SPEED) {
10861402 speed = qca_get_speed(hu, QCA_INIT_SPEED);
10871403 if (speed)
10881404 host_set_baudrate(hu, speed);
10891405 } else {
1406
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
1407
+
10901408 speed = qca_get_speed(hu, QCA_OPER_SPEED);
10911409 if (!speed)
10921410 return 0;
1411
+
1412
+ /* Disable flow control for wcn3990 to deassert RTS while
1413
+ * changing the baudrate of chip and host.
1414
+ */
1415
+ if (qca_is_wcn399x(soc_type))
1416
+ hci_uart_set_flow_control(hu, true);
1417
+
1418
+ if (soc_type == QCA_WCN3990) {
1419
+ reinit_completion(&qca->drop_ev_comp);
1420
+ set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1421
+ }
10931422
10941423 qca_baudrate = qca_get_baudrate_value(speed);
10951424 bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed);
10961425 ret = qca_set_baudrate(hu->hdev, qca_baudrate);
10971426 if (ret)
1098
- return ret;
1427
+ goto error;
10991428
11001429 host_set_baudrate(hu, speed);
1430
+
1431
+error:
1432
+ if (qca_is_wcn399x(soc_type))
1433
+ hci_uart_set_flow_control(hu, false);
1434
+
1435
+ if (soc_type == QCA_WCN3990) {
1436
+ /* Wait for the controller to send the vendor event
1437
+ * for the baudrate change command.
1438
+ */
1439
+ if (!wait_for_completion_timeout(&qca->drop_ev_comp,
1440
+ msecs_to_jiffies(100))) {
1441
+ bt_dev_err(hu->hdev,
1442
+ "Failed to change controller baudrate\n");
1443
+ ret = -ETIMEDOUT;
1444
+ }
1445
+
1446
+ clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags);
1447
+ }
11011448 }
1449
+
1450
+ return ret;
1451
+}
1452
+
1453
+static int qca_send_crashbuffer(struct hci_uart *hu)
1454
+{
1455
+ struct qca_data *qca = hu->priv;
1456
+ struct sk_buff *skb;
1457
+
1458
+ skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL);
1459
+ if (!skb) {
1460
+ bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet");
1461
+ return -ENOMEM;
1462
+ }
1463
+
1464
+ /* We forcefully crash the controller, by sending 0xfb byte for
1465
+ * 1024 times. We also might have chance of losing data, To be
1466
+ * on safer side we send 1096 bytes to the SoC.
1467
+ */
1468
+ memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE,
1469
+ QCA_CRASHBYTE_PACKET_LEN);
1470
+ hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
1471
+ bt_dev_info(hu->hdev, "crash the soc to collect controller dump");
1472
+ skb_queue_tail(&qca->txq, skb);
1473
+ hci_uart_tx_wakeup(hu);
11021474
11031475 return 0;
11041476 }
11051477
1478
+static void qca_wait_for_dump_collection(struct hci_dev *hdev)
1479
+{
1480
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1481
+ struct qca_data *qca = hu->priv;
1482
+
1483
+ wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
1484
+ TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
1485
+
1486
+ clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1487
+}
1488
+
1489
+static void qca_hw_error(struct hci_dev *hdev, u8 code)
1490
+{
1491
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1492
+ struct qca_data *qca = hu->priv;
1493
+
1494
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1495
+ set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1496
+ bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
1497
+
1498
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1499
+ /* If hardware error event received for other than QCA
1500
+ * soc memory dump event, then we need to crash the SOC
1501
+ * and wait here for 8 seconds to get the dump packets.
1502
+ * This will block main thread to be on hold until we
1503
+ * collect dump.
1504
+ */
1505
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1506
+ qca_send_crashbuffer(hu);
1507
+ qca_wait_for_dump_collection(hdev);
1508
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1509
+ /* Let us wait here until memory dump collected or
1510
+ * memory dump timer expired.
1511
+ */
1512
+ bt_dev_info(hdev, "waiting for dump to complete");
1513
+ qca_wait_for_dump_collection(hdev);
1514
+ }
1515
+
1516
+ mutex_lock(&qca->hci_memdump_lock);
1517
+ if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1518
+ bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
1519
+ if (qca->qca_memdump) {
1520
+ vfree(qca->qca_memdump->memdump_buf_head);
1521
+ kfree(qca->qca_memdump);
1522
+ qca->qca_memdump = NULL;
1523
+ }
1524
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1525
+ cancel_delayed_work(&qca->ctrl_memdump_timeout);
1526
+ }
1527
+ mutex_unlock(&qca->hci_memdump_lock);
1528
+
1529
+ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT ||
1530
+ qca->memdump_state == QCA_MEMDUMP_COLLECTED) {
1531
+ cancel_work_sync(&qca->ctrl_memdump_evt);
1532
+ skb_queue_purge(&qca->rx_memdump_q);
1533
+ }
1534
+
1535
+ clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
1536
+}
1537
+
1538
+static void qca_cmd_timeout(struct hci_dev *hdev)
1539
+{
1540
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1541
+ struct qca_data *qca = hu->priv;
1542
+
1543
+ set_bit(QCA_SSR_TRIGGERED, &qca->flags);
1544
+ if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
1545
+ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
1546
+ qca_send_crashbuffer(hu);
1547
+ qca_wait_for_dump_collection(hdev);
1548
+ } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) {
1549
+ /* Let us wait here until memory dump collected or
1550
+ * memory dump timer expired.
1551
+ */
1552
+ bt_dev_info(hdev, "waiting for dump to complete");
1553
+ qca_wait_for_dump_collection(hdev);
1554
+ }
1555
+
1556
+ mutex_lock(&qca->hci_memdump_lock);
1557
+ if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
1558
+ qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
1559
+ if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
1560
+ /* Inject hw error event to reset the device
1561
+ * and driver.
1562
+ */
1563
+ hci_reset_dev(hu->hdev);
1564
+ }
1565
+ }
1566
+ mutex_unlock(&qca->hci_memdump_lock);
1567
+}
1568
+
11061569 static int qca_wcn3990_init(struct hci_uart *hu)
11071570 {
1108
- struct hci_dev *hdev = hu->hdev;
1571
+ struct qca_serdev *qcadev;
11091572 int ret;
1573
+
1574
+ /* Check for vregs status, may be hci down has turned
1575
+ * off the voltage regulator.
1576
+ */
1577
+ qcadev = serdev_device_get_drvdata(hu->serdev);
1578
+ if (!qcadev->bt_power->vregs_on) {
1579
+ serdev_device_close(hu->serdev);
1580
+ ret = qca_regulator_enable(qcadev);
1581
+ if (ret)
1582
+ return ret;
1583
+
1584
+ ret = serdev_device_open(hu->serdev);
1585
+ if (ret) {
1586
+ bt_dev_err(hu->hdev, "failed to open port");
1587
+ return ret;
1588
+ }
1589
+ }
11101590
11111591 /* Forcefully enable wcn3990 to enter in to boot mode. */
11121592 host_set_baudrate(hu, 2400);
1113
- ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWEROFF_PULSE);
1593
+ ret = qca_send_power_pulse(hu, false);
11141594 if (ret)
11151595 return ret;
11161596
11171597 qca_set_speed(hu, QCA_INIT_SPEED);
1118
- ret = qca_send_power_pulse(hdev, QCA_WCN3990_POWERON_PULSE);
1598
+ ret = qca_send_power_pulse(hu, true);
11191599 if (ret)
11201600 return ret;
1121
-
1122
- /* Wait for 100 ms for SoC to boot */
1123
- msleep(100);
11241601
11251602 /* Now the device is in ready state to communicate with host.
11261603 * To sync host with device we need to reopen port.
....@@ -1139,35 +1616,75 @@
11391616 return 0;
11401617 }
11411618
1619
+static int qca_power_on(struct hci_dev *hdev)
1620
+{
1621
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1622
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
1623
+ struct qca_serdev *qcadev;
1624
+ int ret = 0;
1625
+
1626
+ /* Non-serdev device usually is powered by external power
1627
+ * and don't need additional action in driver for power on
1628
+ */
1629
+ if (!hu->serdev)
1630
+ return 0;
1631
+
1632
+ if (qca_is_wcn399x(soc_type)) {
1633
+ ret = qca_wcn3990_init(hu);
1634
+ } else {
1635
+ qcadev = serdev_device_get_drvdata(hu->serdev);
1636
+ if (qcadev->bt_en) {
1637
+ gpiod_set_value_cansleep(qcadev->bt_en, 1);
1638
+ /* Controller needs time to bootup. */
1639
+ msleep(150);
1640
+ }
1641
+ }
1642
+
1643
+ return ret;
1644
+}
1645
+
11421646 static int qca_setup(struct hci_uart *hu)
11431647 {
11441648 struct hci_dev *hdev = hu->hdev;
11451649 struct qca_data *qca = hu->priv;
11461650 unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200;
1147
- struct qca_serdev *qcadev;
1651
+ unsigned int retries = 0;
1652
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
1653
+ const char *firmware_name = qca_get_firmware_name(hu);
11481654 int ret;
11491655 int soc_ver = 0;
1150
-
1151
- qcadev = serdev_device_get_drvdata(hu->serdev);
11521656
11531657 ret = qca_check_speeds(hu);
11541658 if (ret)
11551659 return ret;
11561660
11571661 /* Patch downloading has to be done without IBS mode */
1158
- clear_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
1662
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
11591663
1160
- if (qcadev->btsoc_type == QCA_WCN3990) {
1161
- bt_dev_info(hdev, "setting up wcn3990");
1162
- ret = qca_wcn3990_init(hu);
1163
- if (ret)
1164
- return ret;
1664
+ /* Enable controller to do both LE scan and BR/EDR inquiry
1665
+ * simultaneously.
1666
+ */
1667
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
11651668
1166
- ret = qca_read_soc_version(hdev, &soc_ver);
1669
+ bt_dev_info(hdev, "setting up %s",
1670
+ qca_is_wcn399x(soc_type) ? "wcn399x" : "ROME/QCA6390");
1671
+
1672
+ qca->memdump_state = QCA_MEMDUMP_IDLE;
1673
+
1674
+retry:
1675
+ ret = qca_power_on(hdev);
1676
+ if (ret)
1677
+ return ret;
1678
+
1679
+ clear_bit(QCA_SSR_TRIGGERED, &qca->flags);
1680
+
1681
+ if (qca_is_wcn399x(soc_type)) {
1682
+ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
1683
+
1684
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
11671685 if (ret)
11681686 return ret;
11691687 } else {
1170
- bt_dev_info(hdev, "ROME setup");
11711688 qca_set_speed(hu, QCA_INIT_SPEED);
11721689 }
11731690
....@@ -1181,19 +1698,22 @@
11811698 qca_baudrate = qca_get_baudrate_value(speed);
11821699 }
11831700
1184
- if (qcadev->btsoc_type != QCA_WCN3990) {
1701
+ if (!qca_is_wcn399x(soc_type)) {
11851702 /* Get QCA version information */
1186
- ret = qca_read_soc_version(hdev, &soc_ver);
1703
+ ret = qca_read_soc_version(hdev, &soc_ver, soc_type);
11871704 if (ret)
11881705 return ret;
11891706 }
11901707
11911708 bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver);
11921709 /* Setup patch / NVM configurations */
1193
- ret = qca_uart_setup(hdev, qca_baudrate, qcadev->btsoc_type, soc_ver);
1710
+ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, soc_ver,
1711
+ firmware_name);
11941712 if (!ret) {
1195
- set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags);
1713
+ set_bit(QCA_IBS_ENABLED, &qca->flags);
11961714 qca_debugfs_init(hdev);
1715
+ hu->hdev->hw_error = qca_hw_error;
1716
+ hu->hdev->cmd_timeout = qca_cmd_timeout;
11971717 } else if (ret == -ENOENT) {
11981718 /* No patch/nvm-config found, run with original fw/config */
11991719 ret = 0;
....@@ -1203,15 +1723,32 @@
12031723 * patch/nvm-config is found, so run with original fw/config.
12041724 */
12051725 ret = 0;
1726
+ } else {
1727
+ if (retries < MAX_INIT_RETRIES) {
1728
+ qca_power_shutdown(hu);
1729
+ if (hu->serdev) {
1730
+ serdev_device_close(hu->serdev);
1731
+ ret = serdev_device_open(hu->serdev);
1732
+ if (ret) {
1733
+ bt_dev_err(hdev, "failed to open port");
1734
+ return ret;
1735
+ }
1736
+ }
1737
+ retries++;
1738
+ goto retry;
1739
+ }
12061740 }
12071741
12081742 /* Setup bdaddr */
1209
- hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
1743
+ if (soc_type == QCA_ROME)
1744
+ hu->hdev->set_bdaddr = qca_set_bdaddr_rome;
1745
+ else
1746
+ hu->hdev->set_bdaddr = qca_set_bdaddr;
12101747
12111748 return ret;
12121749 }
12131750
1214
-static struct hci_uart_proto qca_proto = {
1751
+static const struct hci_uart_proto qca_proto = {
12151752 .id = HCI_UART_QCA,
12161753 .name = "QCA",
12171754 .manufacturer = 29,
....@@ -1226,137 +1763,198 @@
12261763 .dequeue = qca_dequeue,
12271764 };
12281765
1229
-static const struct qca_vreg_data qca_soc_data = {
1766
+static const struct qca_device_data qca_soc_data_wcn3990 = {
12301767 .soc_type = QCA_WCN3990,
12311768 .vregs = (struct qca_vreg []) {
1232
- { "vddio", 1800000, 1900000, 15000 },
1233
- { "vddxo", 1800000, 1900000, 80000 },
1234
- { "vddrf", 1300000, 1350000, 300000 },
1235
- { "vddch0", 3300000, 3400000, 450000 },
1769
+ { "vddio", 15000 },
1770
+ { "vddxo", 80000 },
1771
+ { "vddrf", 300000 },
1772
+ { "vddch0", 450000 },
12361773 },
12371774 .num_vregs = 4,
12381775 };
12391776
1777
+static const struct qca_device_data qca_soc_data_wcn3991 = {
1778
+ .soc_type = QCA_WCN3991,
1779
+ .vregs = (struct qca_vreg []) {
1780
+ { "vddio", 15000 },
1781
+ { "vddxo", 80000 },
1782
+ { "vddrf", 300000 },
1783
+ { "vddch0", 450000 },
1784
+ },
1785
+ .num_vregs = 4,
1786
+ .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES,
1787
+};
1788
+
1789
+static const struct qca_device_data qca_soc_data_wcn3998 = {
1790
+ .soc_type = QCA_WCN3998,
1791
+ .vregs = (struct qca_vreg []) {
1792
+ { "vddio", 10000 },
1793
+ { "vddxo", 80000 },
1794
+ { "vddrf", 300000 },
1795
+ { "vddch0", 450000 },
1796
+ },
1797
+ .num_vregs = 4,
1798
+};
1799
+
1800
+static const struct qca_device_data qca_soc_data_qca6390 = {
1801
+ .soc_type = QCA_QCA6390,
1802
+ .num_vregs = 0,
1803
+};
1804
+
12401805 static void qca_power_shutdown(struct hci_uart *hu)
12411806 {
1242
- struct serdev_device *serdev = hu->serdev;
1243
- unsigned char cmd = QCA_WCN3990_POWEROFF_PULSE;
1244
-
1245
- host_set_baudrate(hu, 2400);
1246
- hci_uart_set_flow_control(hu, true);
1247
- serdev_device_write_buf(serdev, &cmd, sizeof(cmd));
1248
- hci_uart_set_flow_control(hu, false);
1249
- qca_power_setup(hu, false);
1250
-}
1251
-
1252
-static int qca_enable_regulator(struct qca_vreg vregs,
1253
- struct regulator *regulator)
1254
-{
1255
- int ret;
1256
-
1257
- ret = regulator_set_voltage(regulator, vregs.min_uV,
1258
- vregs.max_uV);
1259
- if (ret)
1260
- return ret;
1261
-
1262
- if (vregs.load_uA)
1263
- ret = regulator_set_load(regulator,
1264
- vregs.load_uA);
1265
-
1266
- if (ret)
1267
- return ret;
1268
-
1269
- return regulator_enable(regulator);
1270
-
1271
-}
1272
-
1273
-static void qca_disable_regulator(struct qca_vreg vregs,
1274
- struct regulator *regulator)
1275
-{
1276
- regulator_disable(regulator);
1277
- regulator_set_voltage(regulator, 0, vregs.max_uV);
1278
- if (vregs.load_uA)
1279
- regulator_set_load(regulator, 0);
1280
-
1281
-}
1282
-
1283
-static int qca_power_setup(struct hci_uart *hu, bool on)
1284
-{
1285
- struct qca_vreg *vregs;
1286
- struct regulator_bulk_data *vreg_bulk;
12871807 struct qca_serdev *qcadev;
1288
- int i, num_vregs, ret = 0;
1808
+ struct qca_data *qca = hu->priv;
1809
+ unsigned long flags;
1810
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
1811
+
1812
+ /* From this point we go into power off state. But serial port is
1813
+ * still open, stop queueing the IBS data and flush all the buffered
1814
+ * data in skb's.
1815
+ */
1816
+ spin_lock_irqsave(&qca->hci_ibs_lock, flags);
1817
+ clear_bit(QCA_IBS_ENABLED, &qca->flags);
1818
+ qca_flush(hu);
1819
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
1820
+
1821
+ /* Non-serdev device usually is powered by external power
1822
+ * and don't need additional action in driver for power down
1823
+ */
1824
+ if (!hu->serdev)
1825
+ return;
12891826
12901827 qcadev = serdev_device_get_drvdata(hu->serdev);
1291
- if (!qcadev || !qcadev->bt_power || !qcadev->bt_power->vreg_data ||
1292
- !qcadev->bt_power->vreg_bulk)
1293
- return -EINVAL;
12941828
1295
- vregs = qcadev->bt_power->vreg_data->vregs;
1296
- vreg_bulk = qcadev->bt_power->vreg_bulk;
1297
- num_vregs = qcadev->bt_power->vreg_data->num_vregs;
1298
- BT_DBG("on: %d", on);
1299
- if (on && !qcadev->bt_power->vregs_on) {
1300
- for (i = 0; i < num_vregs; i++) {
1301
- ret = qca_enable_regulator(vregs[i],
1302
- vreg_bulk[i].consumer);
1303
- if (ret)
1304
- break;
1305
- }
1829
+ if (qca_is_wcn399x(soc_type)) {
1830
+ host_set_baudrate(hu, 2400);
1831
+ qca_send_power_pulse(hu, false);
1832
+ qca_regulator_disable(qcadev);
1833
+ } else if (qcadev->bt_en) {
1834
+ gpiod_set_value_cansleep(qcadev->bt_en, 0);
1835
+ }
1836
+}
13061837
1307
- if (ret) {
1308
- BT_ERR("failed to enable regulator:%s", vregs[i].name);
1309
- /* turn off regulators which are enabled */
1310
- for (i = i - 1; i >= 0; i--)
1311
- qca_disable_regulator(vregs[i],
1312
- vreg_bulk[i].consumer);
1313
- } else {
1314
- qcadev->bt_power->vregs_on = true;
1315
- }
1316
- } else if (!on && qcadev->bt_power->vregs_on) {
1317
- /* turn off regulator in reverse order */
1318
- i = qcadev->bt_power->vreg_data->num_vregs - 1;
1319
- for ( ; i >= 0; i--)
1320
- qca_disable_regulator(vregs[i], vreg_bulk[i].consumer);
1838
+static int qca_power_off(struct hci_dev *hdev)
1839
+{
1840
+ struct hci_uart *hu = hci_get_drvdata(hdev);
1841
+ struct qca_data *qca = hu->priv;
1842
+ enum qca_btsoc_type soc_type = qca_soc_type(hu);
13211843
1322
- qcadev->bt_power->vregs_on = false;
1844
+ hu->hdev->hw_error = NULL;
1845
+ hu->hdev->cmd_timeout = NULL;
1846
+
1847
+ del_timer_sync(&qca->wake_retrans_timer);
1848
+ del_timer_sync(&qca->tx_idle_timer);
1849
+
1850
+ /* Stop sending shutdown command if soc crashes. */
1851
+ if (soc_type != QCA_ROME
1852
+ && qca->memdump_state == QCA_MEMDUMP_IDLE) {
1853
+ qca_send_pre_shutdown_cmd(hdev);
1854
+ usleep_range(8000, 10000);
13231855 }
13241856
1857
+ qca_power_shutdown(hu);
1858
+ return 0;
1859
+}
1860
+
1861
+static int qca_regulator_enable(struct qca_serdev *qcadev)
1862
+{
1863
+ struct qca_power *power = qcadev->bt_power;
1864
+ int ret;
1865
+
1866
+ /* Already enabled */
1867
+ if (power->vregs_on)
1868
+ return 0;
1869
+
1870
+ BT_DBG("enabling %d regulators)", power->num_vregs);
1871
+
1872
+ ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk);
1873
+ if (ret)
1874
+ return ret;
1875
+
1876
+ power->vregs_on = true;
1877
+
1878
+ ret = clk_prepare_enable(qcadev->susclk);
1879
+ if (ret)
1880
+ qca_regulator_disable(qcadev);
1881
+
13251882 return ret;
1883
+}
1884
+
1885
+static void qca_regulator_disable(struct qca_serdev *qcadev)
1886
+{
1887
+ struct qca_power *power;
1888
+
1889
+ if (!qcadev)
1890
+ return;
1891
+
1892
+ power = qcadev->bt_power;
1893
+
1894
+ /* Already disabled? */
1895
+ if (!power->vregs_on)
1896
+ return;
1897
+
1898
+ regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
1899
+ power->vregs_on = false;
1900
+
1901
+ clk_disable_unprepare(qcadev->susclk);
13261902 }
13271903
13281904 static int qca_init_regulators(struct qca_power *qca,
13291905 const struct qca_vreg *vregs, size_t num_vregs)
13301906 {
1907
+ struct regulator_bulk_data *bulk;
1908
+ int ret;
13311909 int i;
13321910
1333
- qca->vreg_bulk = devm_kcalloc(qca->dev, num_vregs,
1334
- sizeof(struct regulator_bulk_data),
1335
- GFP_KERNEL);
1336
- if (!qca->vreg_bulk)
1911
+ bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL);
1912
+ if (!bulk)
13371913 return -ENOMEM;
13381914
13391915 for (i = 0; i < num_vregs; i++)
1340
- qca->vreg_bulk[i].supply = vregs[i].name;
1916
+ bulk[i].supply = vregs[i].name;
13411917
1342
- return devm_regulator_bulk_get(qca->dev, num_vregs, qca->vreg_bulk);
1918
+ ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk);
1919
+ if (ret < 0)
1920
+ return ret;
1921
+
1922
+ for (i = 0; i < num_vregs; i++) {
1923
+ ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA);
1924
+ if (ret)
1925
+ return ret;
1926
+ }
1927
+
1928
+ qca->vreg_bulk = bulk;
1929
+ qca->num_vregs = num_vregs;
1930
+
1931
+ return 0;
13431932 }
13441933
13451934 static int qca_serdev_probe(struct serdev_device *serdev)
13461935 {
13471936 struct qca_serdev *qcadev;
1348
- const struct qca_vreg_data *data;
1937
+ struct hci_dev *hdev;
1938
+ const struct qca_device_data *data;
13491939 int err;
1940
+ bool power_ctrl_enabled = true;
13501941
13511942 qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
13521943 if (!qcadev)
13531944 return -ENOMEM;
13541945
13551946 qcadev->serdev_hu.serdev = serdev;
1356
- data = of_device_get_match_data(&serdev->dev);
1947
+ data = device_get_match_data(&serdev->dev);
13571948 serdev_device_set_drvdata(serdev, qcadev);
1358
- if (data && data->soc_type == QCA_WCN3990) {
1359
- qcadev->btsoc_type = QCA_WCN3990;
1949
+ device_property_read_string(&serdev->dev, "firmware-name",
1950
+ &qcadev->firmware_name);
1951
+ device_property_read_u32(&serdev->dev, "max-speed",
1952
+ &qcadev->oper_speed);
1953
+ if (!qcadev->oper_speed)
1954
+ BT_DBG("UART will pick default operating speed");
1955
+
1956
+ if (data && qca_is_wcn399x(data->soc_type)) {
1957
+ qcadev->btsoc_type = data->soc_type;
13601958 qcadev->bt_power = devm_kzalloc(&serdev->dev,
13611959 sizeof(struct qca_power),
13621960 GFP_KERNEL);
....@@ -1364,41 +1962,44 @@
13641962 return -ENOMEM;
13651963
13661964 qcadev->bt_power->dev = &serdev->dev;
1367
- qcadev->bt_power->vreg_data = data;
13681965 err = qca_init_regulators(qcadev->bt_power, data->vregs,
13691966 data->num_vregs);
13701967 if (err) {
13711968 BT_ERR("Failed to init regulators:%d", err);
1372
- goto out;
1969
+ return err;
13731970 }
13741971
13751972 qcadev->bt_power->vregs_on = false;
13761973
1377
- device_property_read_u32(&serdev->dev, "max-speed",
1378
- &qcadev->oper_speed);
1379
- if (!qcadev->oper_speed)
1380
- BT_DBG("UART will pick default operating speed");
1381
-
1382
- err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1383
- if (err) {
1384
- BT_ERR("wcn3990 serdev registration failed");
1385
- goto out;
1386
- }
1387
- } else {
1388
- qcadev->btsoc_type = QCA_ROME;
1389
- qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
1390
- GPIOD_OUT_LOW);
1391
- if (IS_ERR(qcadev->bt_en)) {
1392
- dev_err(&serdev->dev, "failed to acquire enable gpio\n");
1393
- return PTR_ERR(qcadev->bt_en);
1394
- }
1395
-
1396
- qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
1974
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
13971975 if (IS_ERR(qcadev->susclk)) {
13981976 dev_err(&serdev->dev, "failed to acquire clk\n");
13991977 return PTR_ERR(qcadev->susclk);
14001978 }
14011979
1980
+ err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1981
+ if (err) {
1982
+ BT_ERR("wcn3990 serdev registration failed");
1983
+ return err;
1984
+ }
1985
+ } else {
1986
+ if (data)
1987
+ qcadev->btsoc_type = data->soc_type;
1988
+ else
1989
+ qcadev->btsoc_type = QCA_ROME;
1990
+
1991
+ qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
1992
+ GPIOD_OUT_LOW);
1993
+ if (IS_ERR_OR_NULL(qcadev->bt_en)) {
1994
+ dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
1995
+ power_ctrl_enabled = false;
1996
+ }
1997
+
1998
+ qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
1999
+ if (IS_ERR(qcadev->susclk)) {
2000
+ dev_warn(&serdev->dev, "failed to acquire clk\n");
2001
+ return PTR_ERR(qcadev->susclk);
2002
+ }
14022003 err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
14032004 if (err)
14042005 return err;
....@@ -1408,39 +2009,211 @@
14082009 return err;
14092010
14102011 err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
1411
- if (err)
2012
+ if (err) {
2013
+ BT_ERR("Rome serdev registration failed");
14122014 clk_disable_unprepare(qcadev->susclk);
2015
+ return err;
2016
+ }
14132017 }
14142018
1415
-out: return err;
2019
+ hdev = qcadev->serdev_hu.hdev;
14162020
2021
+ if (power_ctrl_enabled) {
2022
+ set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
2023
+ hdev->shutdown = qca_power_off;
2024
+ }
2025
+
2026
+ if (data) {
2027
+ /* Wideband speech support must be set per driver since it can't
2028
+ * be queried via hci. Same with the valid le states quirk.
2029
+ */
2030
+ if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH)
2031
+ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
2032
+ &hdev->quirks);
2033
+
2034
+ if (data->capabilities & QCA_CAP_VALID_LE_STATES)
2035
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
2036
+ }
2037
+
2038
+ return 0;
14172039 }
14182040
14192041 static void qca_serdev_remove(struct serdev_device *serdev)
14202042 {
14212043 struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2044
+ struct qca_power *power = qcadev->bt_power;
14222045
1423
- if (qcadev->btsoc_type == QCA_WCN3990)
2046
+ if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
14242047 qca_power_shutdown(&qcadev->serdev_hu);
1425
- else
2048
+ else if (qcadev->susclk)
14262049 clk_disable_unprepare(qcadev->susclk);
14272050
14282051 hci_uart_unregister_device(&qcadev->serdev_hu);
14292052 }
14302053
2054
+static void qca_serdev_shutdown(struct device *dev)
2055
+{
2056
+ int ret;
2057
+ int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
2058
+ struct serdev_device *serdev = to_serdev_device(dev);
2059
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2060
+ const u8 ibs_wake_cmd[] = { 0xFD };
2061
+ const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
2062
+
2063
+ if (qcadev->btsoc_type == QCA_QCA6390) {
2064
+ serdev_device_write_flush(serdev);
2065
+ ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
2066
+ sizeof(ibs_wake_cmd));
2067
+ if (ret < 0) {
2068
+ BT_ERR("QCA send IBS_WAKE_IND error: %d", ret);
2069
+ return;
2070
+ }
2071
+ serdev_device_wait_until_sent(serdev, timeout);
2072
+ usleep_range(8000, 10000);
2073
+
2074
+ serdev_device_write_flush(serdev);
2075
+ ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd,
2076
+ sizeof(edl_reset_soc_cmd));
2077
+ if (ret < 0) {
2078
+ BT_ERR("QCA send EDL_RESET_REQ error: %d", ret);
2079
+ return;
2080
+ }
2081
+ serdev_device_wait_until_sent(serdev, timeout);
2082
+ usleep_range(8000, 10000);
2083
+ }
2084
+}
2085
+
2086
+static int __maybe_unused qca_suspend(struct device *dev)
2087
+{
2088
+ struct serdev_device *serdev = to_serdev_device(dev);
2089
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2090
+ struct hci_uart *hu = &qcadev->serdev_hu;
2091
+ struct qca_data *qca = hu->priv;
2092
+ unsigned long flags;
2093
+ bool tx_pending = false;
2094
+ int ret = 0;
2095
+ u8 cmd;
2096
+
2097
+ set_bit(QCA_SUSPENDING, &qca->flags);
2098
+
2099
+ /* Device is downloading patch or doesn't support in-band sleep. */
2100
+ if (!test_bit(QCA_IBS_ENABLED, &qca->flags))
2101
+ return 0;
2102
+
2103
+ cancel_work_sync(&qca->ws_awake_device);
2104
+ cancel_work_sync(&qca->ws_awake_rx);
2105
+
2106
+ spin_lock_irqsave_nested(&qca->hci_ibs_lock,
2107
+ flags, SINGLE_DEPTH_NESTING);
2108
+
2109
+ switch (qca->tx_ibs_state) {
2110
+ case HCI_IBS_TX_WAKING:
2111
+ del_timer(&qca->wake_retrans_timer);
2112
+ fallthrough;
2113
+ case HCI_IBS_TX_AWAKE:
2114
+ del_timer(&qca->tx_idle_timer);
2115
+
2116
+ serdev_device_write_flush(hu->serdev);
2117
+ cmd = HCI_IBS_SLEEP_IND;
2118
+ ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd));
2119
+
2120
+ if (ret < 0) {
2121
+ BT_ERR("Failed to send SLEEP to device");
2122
+ break;
2123
+ }
2124
+
2125
+ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP;
2126
+ qca->ibs_sent_slps++;
2127
+ tx_pending = true;
2128
+ break;
2129
+
2130
+ case HCI_IBS_TX_ASLEEP:
2131
+ break;
2132
+
2133
+ default:
2134
+ BT_ERR("Spurious tx state %d", qca->tx_ibs_state);
2135
+ ret = -EINVAL;
2136
+ break;
2137
+ }
2138
+
2139
+ spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
2140
+
2141
+ if (ret < 0)
2142
+ goto error;
2143
+
2144
+ if (tx_pending) {
2145
+ serdev_device_wait_until_sent(hu->serdev,
2146
+ msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS));
2147
+ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu);
2148
+ }
2149
+
2150
+ /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going
2151
+ * to sleep, so that the packet does not wake the system later.
2152
+ */
2153
+ ret = wait_event_interruptible_timeout(qca->suspend_wait_q,
2154
+ qca->rx_ibs_state == HCI_IBS_RX_ASLEEP,
2155
+ msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS));
2156
+ if (ret == 0) {
2157
+ ret = -ETIMEDOUT;
2158
+ goto error;
2159
+ }
2160
+
2161
+ return 0;
2162
+
2163
+error:
2164
+ clear_bit(QCA_SUSPENDING, &qca->flags);
2165
+
2166
+ return ret;
2167
+}
2168
+
2169
+static int __maybe_unused qca_resume(struct device *dev)
2170
+{
2171
+ struct serdev_device *serdev = to_serdev_device(dev);
2172
+ struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
2173
+ struct hci_uart *hu = &qcadev->serdev_hu;
2174
+ struct qca_data *qca = hu->priv;
2175
+
2176
+ clear_bit(QCA_SUSPENDING, &qca->flags);
2177
+
2178
+ return 0;
2179
+}
2180
+
2181
+static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume);
2182
+
2183
+#ifdef CONFIG_OF
14312184 static const struct of_device_id qca_bluetooth_of_match[] = {
14322185 { .compatible = "qcom,qca6174-bt" },
1433
- { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data},
2186
+ { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390},
2187
+ { .compatible = "qcom,qca9377-bt" },
2188
+ { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990},
2189
+ { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991},
2190
+ { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998},
14342191 { /* sentinel */ }
14352192 };
14362193 MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match);
2194
+#endif
2195
+
2196
+#ifdef CONFIG_ACPI
2197
+static const struct acpi_device_id qca_bluetooth_acpi_match[] = {
2198
+ { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2199
+ { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2200
+ { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2201
+ { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 },
2202
+ { },
2203
+};
2204
+MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match);
2205
+#endif
2206
+
14372207
14382208 static struct serdev_device_driver qca_serdev_driver = {
14392209 .probe = qca_serdev_probe,
14402210 .remove = qca_serdev_remove,
14412211 .driver = {
14422212 .name = "hci_uart_qca",
1443
- .of_match_table = qca_bluetooth_of_match,
2213
+ .of_match_table = of_match_ptr(qca_bluetooth_of_match),
2214
+ .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match),
2215
+ .shutdown = qca_serdev_shutdown,
2216
+ .pm = &qca_pm_ops,
14442217 },
14452218 };
14462219