forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 cde9070d9970eef1f7ec2360586c802a16230ad8
kernel/drivers/net/wireless/rockchip_wlan/cywdhd/bcmdhd/dhd_linux_wq.c
....@@ -1,16 +1,17 @@
1
-/* SPDX-License-Identifier: GPL-2.0 */
21 /*
32 * Broadcom Dongle Host Driver (DHD), Generic work queue framework
43 * Generic interface to handle dhd deferred work events
54 *
6
- * Copyright (C) 1999-2019, Broadcom Corporation
7
- *
5
+ * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
6
+ *
7
+ * Copyright (C) 1999-2017, Broadcom Corporation
8
+ *
89 * Unless you and Broadcom execute a separate written software license
910 * agreement governing use of this software, this software is licensed to you
1011 * under the terms of the GNU General Public License version 2 (the "GPL"),
1112 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
1213 * following added to such license:
13
- *
14
+ *
1415 * As a special exception, the copyright holders of this software give you
1516 * permission to link this software with independent modules, and to copy and
1617 * distribute the resulting executable under terms of your choice, provided that
....@@ -18,7 +19,7 @@
1819 * the license of that module. An independent module is a module which is not
1920 * derived from this software. The special exception does not apply to any
2021 * modifications of the software.
21
- *
22
+ *
2223 * Notwithstanding the above, under no circumstances may you combine this
2324 * software in any way with any other Broadcom software provided under a license
2425 * other than the GPL, without Broadcom's express prior written consent.
....@@ -26,7 +27,7 @@
2627 *
2728 * <<Broadcom-WL-IPTag/Open:>>
2829 *
29
- * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $
30
+ * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $
3031 */
3132
3233 #include <linux/init.h>
....@@ -47,28 +48,37 @@
4748 #include <dhd_dbg.h>
4849 #include <dhd_linux_wq.h>
4950
50
-struct dhd_deferred_event_t {
51
- u8 event; /* holds the event */
52
- void *event_data; /* Holds event specific data */
51
+typedef struct dhd_deferred_event {
52
+ u8 event; /* holds the event */
53
+ void *event_data; /* holds event specific data */
5354 event_handler_t event_handler;
54
-};
55
-#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t)
55
+ unsigned long pad; /* for memory alignment to power of 2 */
56
+} dhd_deferred_event_t;
57
+
58
+#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t))
59
+
60
+/*
61
+ * work events may occur simultaneously.
62
+ * can hold upto 64 low priority events and 16 high priority events
63
+ */
64
+#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE)
65
+#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE)
66
+
67
+#define DHD_FIFO_HAS_FREE_SPACE(fifo) \
68
+ ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE))
69
+#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \
70
+ ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE))
5671
5772 struct dhd_deferred_wq {
58
- struct work_struct deferred_work; /* should be the first member */
73
+ struct work_struct deferred_work; /* should be the first member */
5974
60
- /*
61
- * work events may occur simultaneously.
62
- * Can hold upto 64 low priority events and 4 high priority events
63
- */
64
-#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t))
65
-#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t))
66
- struct kfifo *prio_fifo;
75
+ struct kfifo *prio_fifo;
6776 struct kfifo *work_fifo;
6877 u8 *prio_fifo_buf;
6978 u8 *work_fifo_buf;
7079 spinlock_t work_lock;
7180 void *dhd_info; /* review: does it require */
81
+ u32 event_skip_mask;
7282 };
7383
7484 static inline struct kfifo*
....@@ -77,15 +87,11 @@
7787 struct kfifo *fifo;
7888 gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC;
7989
80
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33))
81
- fifo = kfifo_init(buf, size, flags, lock);
82
-#else
8390 fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags);
8491 if (!fifo) {
8592 return NULL;
8693 }
8794 kfifo_init(fifo, buf, size);
88
-#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */
8995 return fifo;
9096 }
9197
....@@ -93,10 +99,7 @@
9399 dhd_kfifo_free(struct kfifo *fifo)
94100 {
95101 kfifo_free(fifo);
96
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31))
97
- /* FC11 releases the fifo memory */
98102 kfree(fifo);
99
-#endif
100103 }
101104
102105 /* deferred work functions */
....@@ -117,9 +120,8 @@
117120
118121 work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq),
119122 flags);
120
-
121123 if (!work) {
122
- DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__));
124
+ DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__));
123125 goto return_null;
124126 }
125127
....@@ -130,10 +132,12 @@
130132
131133 /* allocate buffer to hold prio events */
132134 fifo_size = DHD_PRIO_WORK_FIFO_SIZE;
133
- fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
135
+ fifo_size = is_power_of_2(fifo_size) ? fifo_size :
136
+ roundup_pow_of_two(fifo_size);
134137 buf = (u8*)kzalloc(fifo_size, flags);
135138 if (!buf) {
136
- DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__));
139
+ DHD_ERROR(("%s: prio work fifo allocation failed\n",
140
+ __FUNCTION__));
137141 goto return_null;
138142 }
139143
....@@ -146,10 +150,11 @@
146150
147151 /* allocate buffer to hold work events */
148152 fifo_size = DHD_WORK_FIFO_SIZE;
149
- fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size);
153
+ fifo_size = is_power_of_2(fifo_size) ? fifo_size :
154
+ roundup_pow_of_two(fifo_size);
150155 buf = (u8*)kzalloc(fifo_size, flags);
151156 if (!buf) {
152
- DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__));
157
+ DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__));
153158 goto return_null;
154159 }
155160
....@@ -161,13 +166,14 @@
161166 }
162167
163168 work->dhd_info = dhd_info;
164
- DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__));
169
+ work->event_skip_mask = 0;
170
+ DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__));
165171 return work;
166172
167173 return_null:
168
-
169
- if (work)
174
+ if (work) {
170175 dhd_deferred_work_deinit(work);
176
+ }
171177
172178 return NULL;
173179 }
....@@ -177,9 +183,9 @@
177183 {
178184 struct dhd_deferred_wq *deferred_work = work;
179185
180
-
181186 if (!deferred_work) {
182
- DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__));
187
+ DHD_ERROR(("%s: deferred work has been freed already\n",
188
+ __FUNCTION__));
183189 return;
184190 }
185191
....@@ -190,13 +196,29 @@
190196 * free work event fifo.
191197 * kfifo_free frees locally allocated fifo buffer
192198 */
193
- if (deferred_work->prio_fifo)
199
+ if (deferred_work->prio_fifo) {
194200 dhd_kfifo_free(deferred_work->prio_fifo);
201
+ }
195202
196
- if (deferred_work->work_fifo)
203
+ if (deferred_work->work_fifo) {
197204 dhd_kfifo_free(deferred_work->work_fifo);
205
+ }
198206
199207 kfree(deferred_work);
208
+}
209
+
210
+/* select kfifo according to priority */
211
+static inline struct kfifo *
212
+dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq,
213
+ u8 priority)
214
+{
215
+ if (priority == DHD_WQ_WORK_PRIORITY_HIGH) {
216
+ return deferred_wq->prio_fifo;
217
+ } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) {
218
+ return deferred_wq->work_fifo;
219
+ } else {
220
+ return NULL;
221
+ }
200222 }
201223
202224 /*
....@@ -207,19 +229,33 @@
207229 dhd_deferred_schedule_work(void *workq, void *event_data, u8 event,
208230 event_handler_t event_handler, u8 priority)
209231 {
210
- struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq;
211
- struct dhd_deferred_event_t deferred_event;
212
- int status;
232
+ struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq;
233
+ struct kfifo *fifo;
234
+ dhd_deferred_event_t deferred_event;
235
+ int bytes_copied = 0;
213236
214237 if (!deferred_wq) {
215
- DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
238
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
216239 ASSERT(0);
217240 return DHD_WQ_STS_UNINITIALIZED;
218241 }
219242
220243 if (!event || (event >= DHD_MAX_WQ_EVENTS)) {
221
- DHD_ERROR(("%s: Unknown event \n", __FUNCTION__));
244
+ DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__,
245
+ event));
222246 return DHD_WQ_STS_UNKNOWN_EVENT;
247
+ }
248
+
249
+ if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) {
250
+ DHD_ERROR(("%s: unknown priority, priority=%d\n",
251
+ __FUNCTION__, priority));
252
+ return DHD_WQ_STS_UNKNOWN_PRIORITY;
253
+ }
254
+
255
+ if ((deferred_wq->event_skip_mask & (1 << event))) {
256
+ DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n",
257
+ __FUNCTION__, deferred_wq->event_skip_mask));
258
+ return DHD_WQ_STS_EVENT_SKIPPED;
223259 }
224260
225261 /*
....@@ -235,28 +271,29 @@
235271 deferred_event.event_data = event_data;
236272 deferred_event.event_handler = event_handler;
237273
238
- if (priority == DHD_WORK_PRIORITY_HIGH) {
239
- status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event,
240
- DEFRD_EVT_SIZE, &deferred_wq->work_lock);
241
- } else {
242
- status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event,
274
+ fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority);
275
+ if (DHD_FIFO_HAS_FREE_SPACE(fifo)) {
276
+ bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event,
243277 DEFRD_EVT_SIZE, &deferred_wq->work_lock);
244278 }
245
-
246
- if (!status) {
279
+ if (bytes_copied != DEFRD_EVT_SIZE) {
280
+ DHD_ERROR(("%s: failed to schedule deferred work, "
281
+ "priority=%d, bytes_copied=%d\n", __FUNCTION__,
282
+ priority, bytes_copied));
247283 return DHD_WQ_STS_SCHED_FAILED;
248284 }
249285 schedule_work((struct work_struct *)deferred_wq);
250286 return DHD_WQ_STS_OK;
251287 }
252288
253
-static int
254
-dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event)
289
+static bool
290
+dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq,
291
+ dhd_deferred_event_t *event)
255292 {
256
- int status = 0;
293
+ int bytes_copied = 0;
257294
258295 if (!deferred_wq) {
259
- DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__));
296
+ DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
260297 return DHD_WQ_STS_UNINITIALIZED;
261298 }
262299
....@@ -269,17 +306,36 @@
269306 ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1);
270307 ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1);
271308
272
- /* first read priorit event fifo */
273
- status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event,
274
- DEFRD_EVT_SIZE, &deferred_wq->work_lock);
275
-
276
- if (!status) {
277
- /* priority fifo is empty. Now read low prio work fifo */
278
- status = kfifo_out_spinlocked(deferred_wq->work_fifo, event,
279
- DEFRD_EVT_SIZE, &deferred_wq->work_lock);
309
+ /* handle priority work */
310
+ if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) {
311
+ bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo,
312
+ event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
280313 }
281314
282
- return status;
315
+ /* handle normal work if priority work doesn't have enough data */
316
+ if ((bytes_copied != DEFRD_EVT_SIZE) &&
317
+ DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) {
318
+ bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo,
319
+ event, DEFRD_EVT_SIZE, &deferred_wq->work_lock);
320
+ }
321
+
322
+ return (bytes_copied == DEFRD_EVT_SIZE);
323
+}
324
+
325
+static inline void
326
+dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event)
327
+{
328
+ if (!work_event) {
329
+ DHD_ERROR(("%s: work_event is null\n", __FUNCTION__));
330
+ return;
331
+ }
332
+
333
+ DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__,
334
+ work_event->event));
335
+ DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__,
336
+ work_event->event_data));
337
+ DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__,
338
+ work_event->event_handler));
283339 }
284340
285341 /*
....@@ -288,9 +344,8 @@
288344 static void
289345 dhd_deferred_work_handler(struct work_struct *work)
290346 {
291
- struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
292
- struct dhd_deferred_event_t work_event;
293
- int status;
347
+ struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work;
348
+ dhd_deferred_event_t work_event;
294349
295350 if (!deferred_work) {
296351 DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__));
....@@ -298,24 +353,47 @@
298353 }
299354
300355 do {
301
- status = dhd_get_scheduled_work(deferred_work, &work_event);
302
- DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status));
303
- if (!status) {
304
- DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status));
356
+ if (!dhd_get_scheduled_work(deferred_work, &work_event)) {
357
+ DHD_TRACE(("%s: no event to handle\n", __FUNCTION__));
305358 break;
306359 }
307360
308
- if (work_event.event > DHD_MAX_WQ_EVENTS) {
309
- DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event));
310
- break;
361
+ if (work_event.event >= DHD_MAX_WQ_EVENTS) {
362
+ DHD_ERROR(("%s: unknown event\n", __FUNCTION__));
363
+ dhd_deferred_dump_work_event(&work_event);
364
+ ASSERT(work_event.event < DHD_MAX_WQ_EVENTS);
365
+ continue;
311366 }
312367
313368 if (work_event.event_handler) {
314369 work_event.event_handler(deferred_work->dhd_info,
315370 work_event.event_data, work_event.event);
316371 } else {
317
- DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event));
372
+ DHD_ERROR(("%s: event handler is null\n",
373
+ __FUNCTION__));
374
+ dhd_deferred_dump_work_event(&work_event);
375
+ ASSERT(work_event.event_handler != NULL);
318376 }
319377 } while (1);
378
+
320379 return;
321380 }
381
+
382
+void
383
+dhd_deferred_work_set_skip(void *work, u8 event, bool set)
384
+{
385
+ struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work;
386
+
387
+ if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) {
388
+ DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__));
389
+ return;
390
+ }
391
+
392
+ if (set) {
393
+ /* Set */
394
+ deferred_wq->event_skip_mask |= (1 << event);
395
+ } else {
396
+ /* Clear */
397
+ deferred_wq->event_skip_mask &= ~(1 << event);
398
+ }
399
+}