| .. | .. |
|---|
| 1 | | -/* SPDX-License-Identifier: GPL-2.0 */ |
|---|
| 2 | 1 | /* |
|---|
| 3 | 2 | * Broadcom Dongle Host Driver (DHD), Generic work queue framework |
|---|
| 4 | 3 | * Generic interface to handle dhd deferred work events |
|---|
| 5 | 4 | * |
|---|
| 6 | | - * Copyright (C) 1999-2019, Broadcom Corporation |
|---|
| 7 | | - * |
|---|
| 5 | + * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation |
|---|
| 6 | + * |
|---|
| 7 | + * Copyright (C) 1999-2017, Broadcom Corporation |
|---|
| 8 | + * |
|---|
| 8 | 9 | * Unless you and Broadcom execute a separate written software license |
|---|
| 9 | 10 | * agreement governing use of this software, this software is licensed to you |
|---|
| 10 | 11 | * under the terms of the GNU General Public License version 2 (the "GPL"), |
|---|
| 11 | 12 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the |
|---|
| 12 | 13 | * following added to such license: |
|---|
| 13 | | - * |
|---|
| 14 | + * |
|---|
| 14 | 15 | * As a special exception, the copyright holders of this software give you |
|---|
| 15 | 16 | * permission to link this software with independent modules, and to copy and |
|---|
| 16 | 17 | * distribute the resulting executable under terms of your choice, provided that |
|---|
| .. | .. |
|---|
| 18 | 19 | * the license of that module. An independent module is a module which is not |
|---|
| 19 | 20 | * derived from this software. The special exception does not apply to any |
|---|
| 20 | 21 | * modifications of the software. |
|---|
| 21 | | - * |
|---|
| 22 | + * |
|---|
| 22 | 23 | * Notwithstanding the above, under no circumstances may you combine this |
|---|
| 23 | 24 | * software in any way with any other Broadcom software provided under a license |
|---|
| 24 | 25 | * other than the GPL, without Broadcom's express prior written consent. |
|---|
| .. | .. |
|---|
| 26 | 27 | * |
|---|
| 27 | 28 | * <<Broadcom-WL-IPTag/Open:>> |
|---|
| 28 | 29 | * |
|---|
| 29 | | - * $Id: dhd_linux_wq.c 514727 2014-11-12 03:02:48Z $ |
|---|
| 30 | + * $Id: dhd_linux_wq.c 675839 2016-12-19 03:07:26Z $ |
|---|
| 30 | 31 | */ |
|---|
| 31 | 32 | |
|---|
| 32 | 33 | #include <linux/init.h> |
|---|
| .. | .. |
|---|
| 47 | 48 | #include <dhd_dbg.h> |
|---|
| 48 | 49 | #include <dhd_linux_wq.h> |
|---|
| 49 | 50 | |
|---|
| 50 | | -struct dhd_deferred_event_t { |
|---|
| 51 | | - u8 event; /* holds the event */ |
|---|
| 52 | | - void *event_data; /* Holds event specific data */ |
|---|
| 51 | +typedef struct dhd_deferred_event { |
|---|
| 52 | + u8 event; /* holds the event */ |
|---|
| 53 | + void *event_data; /* holds event specific data */ |
|---|
| 53 | 54 | event_handler_t event_handler; |
|---|
| 54 | | -}; |
|---|
| 55 | | -#define DEFRD_EVT_SIZE sizeof(struct dhd_deferred_event_t) |
|---|
| 55 | + unsigned long pad; /* for memory alignment to power of 2 */ |
|---|
| 56 | +} dhd_deferred_event_t; |
|---|
| 57 | + |
|---|
| 58 | +#define DEFRD_EVT_SIZE (sizeof(dhd_deferred_event_t)) |
|---|
| 59 | + |
|---|
| 60 | +/* |
|---|
| 61 | + * work events may occur simultaneously. |
|---|
| 62 | + * can hold upto 64 low priority events and 16 high priority events |
|---|
| 63 | + */ |
|---|
| 64 | +#define DHD_PRIO_WORK_FIFO_SIZE (16 * DEFRD_EVT_SIZE) |
|---|
| 65 | +#define DHD_WORK_FIFO_SIZE (64 * DEFRD_EVT_SIZE) |
|---|
| 66 | + |
|---|
| 67 | +#define DHD_FIFO_HAS_FREE_SPACE(fifo) \ |
|---|
| 68 | + ((fifo) && (kfifo_avail(fifo) >= DEFRD_EVT_SIZE)) |
|---|
| 69 | +#define DHD_FIFO_HAS_ENOUGH_DATA(fifo) \ |
|---|
| 70 | + ((fifo) && (kfifo_len(fifo) >= DEFRD_EVT_SIZE)) |
|---|
| 56 | 71 | |
|---|
| 57 | 72 | struct dhd_deferred_wq { |
|---|
| 58 | | - struct work_struct deferred_work; /* should be the first member */ |
|---|
| 73 | + struct work_struct deferred_work; /* should be the first member */ |
|---|
| 59 | 74 | |
|---|
| 60 | | - /* |
|---|
| 61 | | - * work events may occur simultaneously. |
|---|
| 62 | | - * Can hold upto 64 low priority events and 4 high priority events |
|---|
| 63 | | - */ |
|---|
| 64 | | -#define DHD_PRIO_WORK_FIFO_SIZE (4 * sizeof(struct dhd_deferred_event_t)) |
|---|
| 65 | | -#define DHD_WORK_FIFO_SIZE (64 * sizeof(struct dhd_deferred_event_t)) |
|---|
| 66 | | - struct kfifo *prio_fifo; |
|---|
| 75 | + struct kfifo *prio_fifo; |
|---|
| 67 | 76 | struct kfifo *work_fifo; |
|---|
| 68 | 77 | u8 *prio_fifo_buf; |
|---|
| 69 | 78 | u8 *work_fifo_buf; |
|---|
| 70 | 79 | spinlock_t work_lock; |
|---|
| 71 | 80 | void *dhd_info; /* review: does it require */ |
|---|
| 81 | + u32 event_skip_mask; |
|---|
| 72 | 82 | }; |
|---|
| 73 | 83 | |
|---|
| 74 | 84 | static inline struct kfifo* |
|---|
| .. | .. |
|---|
| 77 | 87 | struct kfifo *fifo; |
|---|
| 78 | 88 | gfp_t flags = CAN_SLEEP()? GFP_KERNEL : GFP_ATOMIC; |
|---|
| 79 | 89 | |
|---|
| 80 | | -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) |
|---|
| 81 | | - fifo = kfifo_init(buf, size, flags, lock); |
|---|
| 82 | | -#else |
|---|
| 83 | 90 | fifo = (struct kfifo *)kzalloc(sizeof(struct kfifo), flags); |
|---|
| 84 | 91 | if (!fifo) { |
|---|
| 85 | 92 | return NULL; |
|---|
| 86 | 93 | } |
|---|
| 87 | 94 | kfifo_init(fifo, buf, size); |
|---|
| 88 | | -#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33)) */ |
|---|
| 89 | 95 | return fifo; |
|---|
| 90 | 96 | } |
|---|
| 91 | 97 | |
|---|
| .. | .. |
|---|
| 93 | 99 | dhd_kfifo_free(struct kfifo *fifo) |
|---|
| 94 | 100 | { |
|---|
| 95 | 101 | kfifo_free(fifo); |
|---|
| 96 | | -#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 31)) |
|---|
| 97 | | - /* FC11 releases the fifo memory */ |
|---|
| 98 | 102 | kfree(fifo); |
|---|
| 99 | | -#endif |
|---|
| 100 | 103 | } |
|---|
| 101 | 104 | |
|---|
| 102 | 105 | /* deferred work functions */ |
|---|
| .. | .. |
|---|
| 117 | 120 | |
|---|
| 118 | 121 | work = (struct dhd_deferred_wq *)kzalloc(sizeof(struct dhd_deferred_wq), |
|---|
| 119 | 122 | flags); |
|---|
| 120 | | - |
|---|
| 121 | 123 | if (!work) { |
|---|
| 122 | | - DHD_ERROR(("%s: work queue creation failed \n", __FUNCTION__)); |
|---|
| 124 | + DHD_ERROR(("%s: work queue creation failed\n", __FUNCTION__)); |
|---|
| 123 | 125 | goto return_null; |
|---|
| 124 | 126 | } |
|---|
| 125 | 127 | |
|---|
| .. | .. |
|---|
| 130 | 132 | |
|---|
| 131 | 133 | /* allocate buffer to hold prio events */ |
|---|
| 132 | 134 | fifo_size = DHD_PRIO_WORK_FIFO_SIZE; |
|---|
| 133 | | - fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); |
|---|
| 135 | + fifo_size = is_power_of_2(fifo_size) ? fifo_size : |
|---|
| 136 | + roundup_pow_of_two(fifo_size); |
|---|
| 134 | 137 | buf = (u8*)kzalloc(fifo_size, flags); |
|---|
| 135 | 138 | if (!buf) { |
|---|
| 136 | | - DHD_ERROR(("%s: prio work fifo allocation failed \n", __FUNCTION__)); |
|---|
| 139 | + DHD_ERROR(("%s: prio work fifo allocation failed\n", |
|---|
| 140 | + __FUNCTION__)); |
|---|
| 137 | 141 | goto return_null; |
|---|
| 138 | 142 | } |
|---|
| 139 | 143 | |
|---|
| .. | .. |
|---|
| 146 | 150 | |
|---|
| 147 | 151 | /* allocate buffer to hold work events */ |
|---|
| 148 | 152 | fifo_size = DHD_WORK_FIFO_SIZE; |
|---|
| 149 | | - fifo_size = is_power_of_2(fifo_size)? fifo_size : roundup_pow_of_two(fifo_size); |
|---|
| 153 | + fifo_size = is_power_of_2(fifo_size) ? fifo_size : |
|---|
| 154 | + roundup_pow_of_two(fifo_size); |
|---|
| 150 | 155 | buf = (u8*)kzalloc(fifo_size, flags); |
|---|
| 151 | 156 | if (!buf) { |
|---|
| 152 | | - DHD_ERROR(("%s: work fifo allocation failed \n", __FUNCTION__)); |
|---|
| 157 | + DHD_ERROR(("%s: work fifo allocation failed\n", __FUNCTION__)); |
|---|
| 153 | 158 | goto return_null; |
|---|
| 154 | 159 | } |
|---|
| 155 | 160 | |
|---|
| .. | .. |
|---|
| 161 | 166 | } |
|---|
| 162 | 167 | |
|---|
| 163 | 168 | work->dhd_info = dhd_info; |
|---|
| 164 | | - DHD_ERROR(("%s: work queue initialized \n", __FUNCTION__)); |
|---|
| 169 | + work->event_skip_mask = 0; |
|---|
| 170 | + DHD_ERROR(("%s: work queue initialized\n", __FUNCTION__)); |
|---|
| 165 | 171 | return work; |
|---|
| 166 | 172 | |
|---|
| 167 | 173 | return_null: |
|---|
| 168 | | - |
|---|
| 169 | | - if (work) |
|---|
| 174 | + if (work) { |
|---|
| 170 | 175 | dhd_deferred_work_deinit(work); |
|---|
| 176 | + } |
|---|
| 171 | 177 | |
|---|
| 172 | 178 | return NULL; |
|---|
| 173 | 179 | } |
|---|
| .. | .. |
|---|
| 177 | 183 | { |
|---|
| 178 | 184 | struct dhd_deferred_wq *deferred_work = work; |
|---|
| 179 | 185 | |
|---|
| 180 | | - |
|---|
| 181 | 186 | if (!deferred_work) { |
|---|
| 182 | | - DHD_ERROR(("%s: deferred work has been freed alread \n", __FUNCTION__)); |
|---|
| 187 | + DHD_ERROR(("%s: deferred work has been freed already\n", |
|---|
| 188 | + __FUNCTION__)); |
|---|
| 183 | 189 | return; |
|---|
| 184 | 190 | } |
|---|
| 185 | 191 | |
|---|
| .. | .. |
|---|
| 190 | 196 | * free work event fifo. |
|---|
| 191 | 197 | * kfifo_free frees locally allocated fifo buffer |
|---|
| 192 | 198 | */ |
|---|
| 193 | | - if (deferred_work->prio_fifo) |
|---|
| 199 | + if (deferred_work->prio_fifo) { |
|---|
| 194 | 200 | dhd_kfifo_free(deferred_work->prio_fifo); |
|---|
| 201 | + } |
|---|
| 195 | 202 | |
|---|
| 196 | | - if (deferred_work->work_fifo) |
|---|
| 203 | + if (deferred_work->work_fifo) { |
|---|
| 197 | 204 | dhd_kfifo_free(deferred_work->work_fifo); |
|---|
| 205 | + } |
|---|
| 198 | 206 | |
|---|
| 199 | 207 | kfree(deferred_work); |
|---|
| 208 | +} |
|---|
| 209 | + |
|---|
| 210 | +/* select kfifo according to priority */ |
|---|
| 211 | +static inline struct kfifo * |
|---|
| 212 | +dhd_deferred_work_select_kfifo(struct dhd_deferred_wq *deferred_wq, |
|---|
| 213 | + u8 priority) |
|---|
| 214 | +{ |
|---|
| 215 | + if (priority == DHD_WQ_WORK_PRIORITY_HIGH) { |
|---|
| 216 | + return deferred_wq->prio_fifo; |
|---|
| 217 | + } else if (priority == DHD_WQ_WORK_PRIORITY_LOW) { |
|---|
| 218 | + return deferred_wq->work_fifo; |
|---|
| 219 | + } else { |
|---|
| 220 | + return NULL; |
|---|
| 221 | + } |
|---|
| 200 | 222 | } |
|---|
| 201 | 223 | |
|---|
| 202 | 224 | /* |
|---|
| .. | .. |
|---|
| 207 | 229 | dhd_deferred_schedule_work(void *workq, void *event_data, u8 event, |
|---|
| 208 | 230 | event_handler_t event_handler, u8 priority) |
|---|
| 209 | 231 | { |
|---|
| 210 | | - struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *) workq; |
|---|
| 211 | | - struct dhd_deferred_event_t deferred_event; |
|---|
| 212 | | - int status; |
|---|
| 232 | + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)workq; |
|---|
| 233 | + struct kfifo *fifo; |
|---|
| 234 | + dhd_deferred_event_t deferred_event; |
|---|
| 235 | + int bytes_copied = 0; |
|---|
| 213 | 236 | |
|---|
| 214 | 237 | if (!deferred_wq) { |
|---|
| 215 | | - DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); |
|---|
| 238 | + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); |
|---|
| 216 | 239 | ASSERT(0); |
|---|
| 217 | 240 | return DHD_WQ_STS_UNINITIALIZED; |
|---|
| 218 | 241 | } |
|---|
| 219 | 242 | |
|---|
| 220 | 243 | if (!event || (event >= DHD_MAX_WQ_EVENTS)) { |
|---|
| 221 | | - DHD_ERROR(("%s: Unknown event \n", __FUNCTION__)); |
|---|
| 244 | + DHD_ERROR(("%s: unknown event, event=%d\n", __FUNCTION__, |
|---|
| 245 | + event)); |
|---|
| 222 | 246 | return DHD_WQ_STS_UNKNOWN_EVENT; |
|---|
| 247 | + } |
|---|
| 248 | + |
|---|
| 249 | + if (!priority || (priority >= DHD_WQ_MAX_PRIORITY)) { |
|---|
| 250 | + DHD_ERROR(("%s: unknown priority, priority=%d\n", |
|---|
| 251 | + __FUNCTION__, priority)); |
|---|
| 252 | + return DHD_WQ_STS_UNKNOWN_PRIORITY; |
|---|
| 253 | + } |
|---|
| 254 | + |
|---|
| 255 | + if ((deferred_wq->event_skip_mask & (1 << event))) { |
|---|
| 256 | + DHD_ERROR(("%s: Skip event requested. Mask = 0x%x\n", |
|---|
| 257 | + __FUNCTION__, deferred_wq->event_skip_mask)); |
|---|
| 258 | + return DHD_WQ_STS_EVENT_SKIPPED; |
|---|
| 223 | 259 | } |
|---|
| 224 | 260 | |
|---|
| 225 | 261 | /* |
|---|
| .. | .. |
|---|
| 235 | 271 | deferred_event.event_data = event_data; |
|---|
| 236 | 272 | deferred_event.event_handler = event_handler; |
|---|
| 237 | 273 | |
|---|
| 238 | | - if (priority == DHD_WORK_PRIORITY_HIGH) { |
|---|
| 239 | | - status = kfifo_in_spinlocked(deferred_wq->prio_fifo, &deferred_event, |
|---|
| 240 | | - DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 241 | | - } else { |
|---|
| 242 | | - status = kfifo_in_spinlocked(deferred_wq->work_fifo, &deferred_event, |
|---|
| 274 | + fifo = dhd_deferred_work_select_kfifo(deferred_wq, priority); |
|---|
| 275 | + if (DHD_FIFO_HAS_FREE_SPACE(fifo)) { |
|---|
| 276 | + bytes_copied = kfifo_in_spinlocked(fifo, &deferred_event, |
|---|
| 243 | 277 | DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 244 | 278 | } |
|---|
| 245 | | - |
|---|
| 246 | | - if (!status) { |
|---|
| 279 | + if (bytes_copied != DEFRD_EVT_SIZE) { |
|---|
| 280 | + DHD_ERROR(("%s: failed to schedule deferred work, " |
|---|
| 281 | + "priority=%d, bytes_copied=%d\n", __FUNCTION__, |
|---|
| 282 | + priority, bytes_copied)); |
|---|
| 247 | 283 | return DHD_WQ_STS_SCHED_FAILED; |
|---|
| 248 | 284 | } |
|---|
| 249 | 285 | schedule_work((struct work_struct *)deferred_wq); |
|---|
| 250 | 286 | return DHD_WQ_STS_OK; |
|---|
| 251 | 287 | } |
|---|
| 252 | 288 | |
|---|
| 253 | | -static int |
|---|
| 254 | | -dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, struct dhd_deferred_event_t *event) |
|---|
| 289 | +static bool |
|---|
| 290 | +dhd_get_scheduled_work(struct dhd_deferred_wq *deferred_wq, |
|---|
| 291 | + dhd_deferred_event_t *event) |
|---|
| 255 | 292 | { |
|---|
| 256 | | - int status = 0; |
|---|
| 293 | + int bytes_copied = 0; |
|---|
| 257 | 294 | |
|---|
| 258 | 295 | if (!deferred_wq) { |
|---|
| 259 | | - DHD_ERROR(("%s: work queue not initialized \n", __FUNCTION__)); |
|---|
| 296 | + DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); |
|---|
| 260 | 297 | return DHD_WQ_STS_UNINITIALIZED; |
|---|
| 261 | 298 | } |
|---|
| 262 | 299 | |
|---|
| .. | .. |
|---|
| 269 | 306 | ASSERT(kfifo_esize(deferred_wq->prio_fifo) == 1); |
|---|
| 270 | 307 | ASSERT(kfifo_esize(deferred_wq->work_fifo) == 1); |
|---|
| 271 | 308 | |
|---|
| 272 | | - /* first read priorit event fifo */ |
|---|
| 273 | | - status = kfifo_out_spinlocked(deferred_wq->prio_fifo, event, |
|---|
| 274 | | - DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 275 | | - |
|---|
| 276 | | - if (!status) { |
|---|
| 277 | | - /* priority fifo is empty. Now read low prio work fifo */ |
|---|
| 278 | | - status = kfifo_out_spinlocked(deferred_wq->work_fifo, event, |
|---|
| 279 | | - DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 309 | + /* handle priority work */ |
|---|
| 310 | + if (DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->prio_fifo)) { |
|---|
| 311 | + bytes_copied = kfifo_out_spinlocked(deferred_wq->prio_fifo, |
|---|
| 312 | + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 280 | 313 | } |
|---|
| 281 | 314 | |
|---|
| 282 | | - return status; |
|---|
| 315 | + /* handle normal work if priority work doesn't have enough data */ |
|---|
| 316 | + if ((bytes_copied != DEFRD_EVT_SIZE) && |
|---|
| 317 | + DHD_FIFO_HAS_ENOUGH_DATA(deferred_wq->work_fifo)) { |
|---|
| 318 | + bytes_copied = kfifo_out_spinlocked(deferred_wq->work_fifo, |
|---|
| 319 | + event, DEFRD_EVT_SIZE, &deferred_wq->work_lock); |
|---|
| 320 | + } |
|---|
| 321 | + |
|---|
| 322 | + return (bytes_copied == DEFRD_EVT_SIZE); |
|---|
| 323 | +} |
|---|
| 324 | + |
|---|
| 325 | +static inline void |
|---|
| 326 | +dhd_deferred_dump_work_event(dhd_deferred_event_t *work_event) |
|---|
| 327 | +{ |
|---|
| 328 | + if (!work_event) { |
|---|
| 329 | + DHD_ERROR(("%s: work_event is null\n", __FUNCTION__)); |
|---|
| 330 | + return; |
|---|
| 331 | + } |
|---|
| 332 | + |
|---|
| 333 | + DHD_ERROR(("%s: work_event->event = %d\n", __FUNCTION__, |
|---|
| 334 | + work_event->event)); |
|---|
| 335 | + DHD_ERROR(("%s: work_event->event_data = %p\n", __FUNCTION__, |
|---|
| 336 | + work_event->event_data)); |
|---|
| 337 | + DHD_ERROR(("%s: work_event->event_handler = %p\n", __FUNCTION__, |
|---|
| 338 | + work_event->event_handler)); |
|---|
| 283 | 339 | } |
|---|
| 284 | 340 | |
|---|
| 285 | 341 | /* |
|---|
| .. | .. |
|---|
| 288 | 344 | static void |
|---|
| 289 | 345 | dhd_deferred_work_handler(struct work_struct *work) |
|---|
| 290 | 346 | { |
|---|
| 291 | | - struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; |
|---|
| 292 | | - struct dhd_deferred_event_t work_event; |
|---|
| 293 | | - int status; |
|---|
| 347 | + struct dhd_deferred_wq *deferred_work = (struct dhd_deferred_wq *)work; |
|---|
| 348 | + dhd_deferred_event_t work_event; |
|---|
| 294 | 349 | |
|---|
| 295 | 350 | if (!deferred_work) { |
|---|
| 296 | 351 | DHD_ERROR(("%s: work queue not initialized\n", __FUNCTION__)); |
|---|
| .. | .. |
|---|
| 298 | 353 | } |
|---|
| 299 | 354 | |
|---|
| 300 | 355 | do { |
|---|
| 301 | | - status = dhd_get_scheduled_work(deferred_work, &work_event); |
|---|
| 302 | | - DHD_TRACE(("%s: event to handle %d \n", __FUNCTION__, status)); |
|---|
| 303 | | - if (!status) { |
|---|
| 304 | | - DHD_TRACE(("%s: No event to handle %d \n", __FUNCTION__, status)); |
|---|
| 356 | + if (!dhd_get_scheduled_work(deferred_work, &work_event)) { |
|---|
| 357 | + DHD_TRACE(("%s: no event to handle\n", __FUNCTION__)); |
|---|
| 305 | 358 | break; |
|---|
| 306 | 359 | } |
|---|
| 307 | 360 | |
|---|
| 308 | | - if (work_event.event > DHD_MAX_WQ_EVENTS) { |
|---|
| 309 | | - DHD_TRACE(("%s: Unknown event %d \n", __FUNCTION__, work_event.event)); |
|---|
| 310 | | - break; |
|---|
| 361 | + if (work_event.event >= DHD_MAX_WQ_EVENTS) { |
|---|
| 362 | + DHD_ERROR(("%s: unknown event\n", __FUNCTION__)); |
|---|
| 363 | + dhd_deferred_dump_work_event(&work_event); |
|---|
| 364 | + ASSERT(work_event.event < DHD_MAX_WQ_EVENTS); |
|---|
| 365 | + continue; |
|---|
| 311 | 366 | } |
|---|
| 312 | 367 | |
|---|
| 313 | 368 | if (work_event.event_handler) { |
|---|
| 314 | 369 | work_event.event_handler(deferred_work->dhd_info, |
|---|
| 315 | 370 | work_event.event_data, work_event.event); |
|---|
| 316 | 371 | } else { |
|---|
| 317 | | - DHD_ERROR(("%s: event not defined %d\n", __FUNCTION__, work_event.event)); |
|---|
| 372 | + DHD_ERROR(("%s: event handler is null\n", |
|---|
| 373 | + __FUNCTION__)); |
|---|
| 374 | + dhd_deferred_dump_work_event(&work_event); |
|---|
| 375 | + ASSERT(work_event.event_handler != NULL); |
|---|
| 318 | 376 | } |
|---|
| 319 | 377 | } while (1); |
|---|
| 378 | + |
|---|
| 320 | 379 | return; |
|---|
| 321 | 380 | } |
|---|
| 381 | + |
|---|
| 382 | +void |
|---|
| 383 | +dhd_deferred_work_set_skip(void *work, u8 event, bool set) |
|---|
| 384 | +{ |
|---|
| 385 | + struct dhd_deferred_wq *deferred_wq = (struct dhd_deferred_wq *)work; |
|---|
| 386 | + |
|---|
| 387 | + if (!deferred_wq || !event || (event >= DHD_MAX_WQ_EVENTS)) { |
|---|
| 388 | + DHD_ERROR(("%s: Invalid!!\n", __FUNCTION__)); |
|---|
| 389 | + return; |
|---|
| 390 | + } |
|---|
| 391 | + |
|---|
| 392 | + if (set) { |
|---|
| 393 | + /* Set */ |
|---|
| 394 | + deferred_wq->event_skip_mask |= (1 << event); |
|---|
| 395 | + } else { |
|---|
| 396 | + /* Clear */ |
|---|
| 397 | + deferred_wq->event_skip_mask &= ~(1 << event); |
|---|
| 398 | + } |
|---|
| 399 | +} |
|---|