.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
---|
1 | 2 | /* |
---|
2 | 3 | * Copyright (C) 2012 Intel, Inc. |
---|
3 | 4 | * Copyright (C) 2013 Intel, Inc. |
---|
.. | .. |
---|
46 | 47 | * exchange is properly mapped during a transfer. |
---|
47 | 48 | */ |
---|
48 | 49 | |
---|
49 | | - |
---|
50 | 50 | #include <linux/module.h> |
---|
51 | 51 | #include <linux/mod_devicetable.h> |
---|
52 | 52 | #include <linux/interrupt.h> |
---|
.. | .. |
---|
59 | 59 | #include <linux/bitops.h> |
---|
60 | 60 | #include <linux/slab.h> |
---|
61 | 61 | #include <linux/io.h> |
---|
62 | | -#include <linux/goldfish.h> |
---|
63 | 62 | #include <linux/dma-mapping.h> |
---|
64 | 63 | #include <linux/mm.h> |
---|
65 | 64 | #include <linux/acpi.h> |
---|
| 65 | +#include <linux/bug.h> |
---|
| 66 | +#include "goldfish_pipe_qemu.h" |
---|
66 | 67 | |
---|
67 | 68 | /* |
---|
68 | 69 | * Update this when something changes in the driver's behavior so the host |
---|
.. | .. |
---|
73 | 74 | PIPE_CURRENT_DEVICE_VERSION = 2 |
---|
74 | 75 | }; |
---|
75 | 76 | |
---|
76 | | -/* |
---|
77 | | - * IMPORTANT: The following constants must match the ones used and defined |
---|
78 | | - * in external/qemu/hw/goldfish_pipe.c in the Android source tree. |
---|
79 | | - */ |
---|
80 | | - |
---|
81 | | -/* List of bitflags returned in status of CMD_POLL command */ |
---|
82 | | -enum PipePollFlags { |
---|
83 | | - PIPE_POLL_IN = 1 << 0, |
---|
84 | | - PIPE_POLL_OUT = 1 << 1, |
---|
85 | | - PIPE_POLL_HUP = 1 << 2 |
---|
86 | | -}; |
---|
87 | | - |
---|
88 | | -/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ |
---|
89 | | -enum PipeErrors { |
---|
90 | | - PIPE_ERROR_INVAL = -1, |
---|
91 | | - PIPE_ERROR_AGAIN = -2, |
---|
92 | | - PIPE_ERROR_NOMEM = -3, |
---|
93 | | - PIPE_ERROR_IO = -4 |
---|
94 | | -}; |
---|
95 | | - |
---|
96 | | -/* Bit-flags used to signal events from the emulator */ |
---|
97 | | -enum PipeWakeFlags { |
---|
98 | | - PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */ |
---|
99 | | - PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */ |
---|
100 | | - PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */ |
---|
101 | | -}; |
---|
102 | | - |
---|
103 | | -/* Bit flags for the 'flags' field */ |
---|
104 | | -enum PipeFlagsBits { |
---|
105 | | - BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */ |
---|
106 | | - BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */ |
---|
107 | | - BIT_WAKE_ON_READ = 2, /* want to be woken on reads */ |
---|
108 | | -}; |
---|
109 | | - |
---|
110 | | -enum PipeRegs { |
---|
111 | | - PIPE_REG_CMD = 0, |
---|
112 | | - |
---|
113 | | - PIPE_REG_SIGNAL_BUFFER_HIGH = 4, |
---|
114 | | - PIPE_REG_SIGNAL_BUFFER = 8, |
---|
115 | | - PIPE_REG_SIGNAL_BUFFER_COUNT = 12, |
---|
116 | | - |
---|
117 | | - PIPE_REG_OPEN_BUFFER_HIGH = 20, |
---|
118 | | - PIPE_REG_OPEN_BUFFER = 24, |
---|
119 | | - |
---|
120 | | - PIPE_REG_VERSION = 36, |
---|
121 | | - |
---|
122 | | - PIPE_REG_GET_SIGNALLED = 48, |
---|
123 | | -}; |
---|
124 | | - |
---|
125 | | -enum PipeCmdCode { |
---|
126 | | - PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */ |
---|
127 | | - PIPE_CMD_CLOSE, |
---|
128 | | - PIPE_CMD_POLL, |
---|
129 | | - PIPE_CMD_WRITE, |
---|
130 | | - PIPE_CMD_WAKE_ON_WRITE, |
---|
131 | | - PIPE_CMD_READ, |
---|
132 | | - PIPE_CMD_WAKE_ON_READ, |
---|
133 | | - |
---|
134 | | - /* |
---|
135 | | - * TODO(zyy): implement a deferred read/write execution to allow |
---|
136 | | - * parallel processing of pipe operations on the host. |
---|
137 | | - */ |
---|
138 | | - PIPE_CMD_WAKE_ON_DONE_IO, |
---|
139 | | -}; |
---|
140 | | - |
---|
141 | 77 | enum { |
---|
142 | 78 | MAX_BUFFERS_PER_COMMAND = 336, |
---|
143 | 79 | MAX_SIGNALLED_PIPES = 64, |
---|
.. | .. |
---|
145 | 81 | }; |
---|
146 | 82 | |
---|
147 | 83 | struct goldfish_pipe_dev; |
---|
148 | | -struct goldfish_pipe; |
---|
149 | | -struct goldfish_pipe_command; |
---|
150 | 84 | |
---|
151 | 85 | /* A per-pipe command structure, shared with the host */ |
---|
152 | 86 | struct goldfish_pipe_command { |
---|
153 | | - s32 cmd; /* PipeCmdCode, guest -> host */ |
---|
154 | | - s32 id; /* pipe id, guest -> host */ |
---|
155 | | - s32 status; /* command execution status, host -> guest */ |
---|
| 87 | + s32 cmd; /* PipeCmdCode, guest -> host */ |
---|
| 88 | + s32 id; /* pipe id, guest -> host */ |
---|
| 89 | + s32 status; /* command execution status, host -> guest */ |
---|
156 | 90 | s32 reserved; /* to pad to 64-bit boundary */ |
---|
157 | 91 | union { |
---|
158 | 92 | /* Parameters for PIPE_CMD_{READ,WRITE} */ |
---|
.. | .. |
---|
184 | 118 | /* Device-level set of buffers shared with the host */ |
---|
185 | 119 | struct goldfish_pipe_dev_buffers { |
---|
186 | 120 | struct open_command_param open_command_params; |
---|
187 | | - struct signalled_pipe_buffer signalled_pipe_buffers[ |
---|
188 | | - MAX_SIGNALLED_PIPES]; |
---|
| 121 | + struct signalled_pipe_buffer |
---|
| 122 | + signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; |
---|
189 | 123 | }; |
---|
190 | 124 | |
---|
191 | 125 | /* This data type models a given pipe instance */ |
---|
192 | 126 | struct goldfish_pipe { |
---|
193 | 127 | /* pipe ID - index into goldfish_pipe_dev::pipes array */ |
---|
194 | 128 | u32 id; |
---|
| 129 | + |
---|
195 | 130 | /* The wake flags pipe is waiting for |
---|
196 | 131 | * Note: not protected with any lock, uses atomic operations |
---|
197 | 132 | * and barriers to make it thread-safe. |
---|
198 | 133 | */ |
---|
199 | 134 | unsigned long flags; |
---|
| 135 | + |
---|
200 | 136 | /* wake flags host have signalled, |
---|
201 | 137 | * - protected by goldfish_pipe_dev::lock |
---|
202 | 138 | */ |
---|
.. | .. |
---|
220 | 156 | |
---|
221 | 157 | /* A wake queue for sleeping until host signals an event */ |
---|
222 | 158 | wait_queue_head_t wake_queue; |
---|
| 159 | + |
---|
223 | 160 | /* Pointer to the parent goldfish_pipe_dev instance */ |
---|
224 | 161 | struct goldfish_pipe_dev *dev; |
---|
| 162 | + |
---|
| 163 | + /* A buffer of pages, too large to fit into a stack frame */ |
---|
| 164 | + struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
---|
225 | 165 | }; |
---|
226 | 166 | |
---|
227 | 167 | /* The global driver data. Holds a reference to the i/o page used to |
---|
.. | .. |
---|
229 | 169 | * waiting to be awoken. |
---|
230 | 170 | */ |
---|
231 | 171 | struct goldfish_pipe_dev { |
---|
| 172 | + /* A magic number to check if this is an instance of this struct */ |
---|
| 173 | + void *magic; |
---|
| 174 | + |
---|
232 | 175 | /* |
---|
233 | 176 | * Global device spinlock. Protects the following members: |
---|
234 | 177 | * - pipes, pipes_capacity |
---|
.. | .. |
---|
261 | 204 | /* Head of a doubly linked list of signalled pipes */ |
---|
262 | 205 | struct goldfish_pipe *first_signalled_pipe; |
---|
263 | 206 | |
---|
| 207 | + /* ptr to platform device's device struct */ |
---|
| 208 | + struct device *pdev_dev; |
---|
| 209 | + |
---|
264 | 210 | /* Some device-specific data */ |
---|
265 | 211 | int irq; |
---|
266 | 212 | int version; |
---|
267 | 213 | unsigned char __iomem *base; |
---|
| 214 | + |
---|
| 215 | + /* an irq tasklet to run goldfish_interrupt_task */ |
---|
| 216 | + struct tasklet_struct irq_tasklet; |
---|
| 217 | + |
---|
| 218 | + struct miscdevice miscdev; |
---|
268 | 219 | }; |
---|
269 | 220 | |
---|
270 | | -static struct goldfish_pipe_dev pipe_dev[1] = {}; |
---|
271 | | - |
---|
272 | | -static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
---|
| 221 | +static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe, |
---|
| 222 | + enum PipeCmdCode cmd) |
---|
273 | 223 | { |
---|
274 | 224 | pipe->command_buffer->cmd = cmd; |
---|
275 | 225 | /* failure by default */ |
---|
.. | .. |
---|
278 | 228 | return pipe->command_buffer->status; |
---|
279 | 229 | } |
---|
280 | 230 | |
---|
281 | | -static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
---|
| 231 | +static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) |
---|
282 | 232 | { |
---|
283 | 233 | int status; |
---|
284 | 234 | |
---|
285 | 235 | if (mutex_lock_interruptible(&pipe->lock)) |
---|
286 | 236 | return PIPE_ERROR_IO; |
---|
287 | | - status = goldfish_cmd_locked(pipe, cmd); |
---|
| 237 | + status = goldfish_pipe_cmd_locked(pipe, cmd); |
---|
288 | 238 | mutex_unlock(&pipe->lock); |
---|
289 | 239 | return status; |
---|
290 | 240 | } |
---|
.. | .. |
---|
307 | 257 | } |
---|
308 | 258 | } |
---|
309 | 259 | |
---|
310 | | -static int pin_user_pages(unsigned long first_page, unsigned long last_page, |
---|
311 | | - unsigned int last_page_size, int is_write, |
---|
312 | | - struct page *pages[MAX_BUFFERS_PER_COMMAND], |
---|
313 | | - unsigned int *iter_last_page_size) |
---|
| 260 | +static int goldfish_pin_pages(unsigned long first_page, |
---|
| 261 | + unsigned long last_page, |
---|
| 262 | + unsigned int last_page_size, |
---|
| 263 | + int is_write, |
---|
| 264 | + struct page *pages[MAX_BUFFERS_PER_COMMAND], |
---|
| 265 | + unsigned int *iter_last_page_size) |
---|
314 | 266 | { |
---|
315 | 267 | int ret; |
---|
316 | 268 | int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; |
---|
.. | .. |
---|
322 | 274 | *iter_last_page_size = last_page_size; |
---|
323 | 275 | } |
---|
324 | 276 | |
---|
325 | | - ret = get_user_pages_fast( |
---|
326 | | - first_page, requested_pages, !is_write, pages); |
---|
| 277 | + ret = pin_user_pages_fast(first_page, requested_pages, |
---|
| 278 | + !is_write ? FOLL_WRITE : 0, |
---|
| 279 | + pages); |
---|
327 | 280 | if (ret <= 0) |
---|
328 | 281 | return -EFAULT; |
---|
329 | 282 | if (ret < requested_pages) |
---|
330 | 283 | *iter_last_page_size = PAGE_SIZE; |
---|
| 284 | + |
---|
331 | 285 | return ret; |
---|
332 | | - |
---|
333 | | -} |
---|
334 | | - |
---|
335 | | -static void release_user_pages(struct page **pages, int pages_count, |
---|
336 | | - int is_write, s32 consumed_size) |
---|
337 | | -{ |
---|
338 | | - int i; |
---|
339 | | - |
---|
340 | | - for (i = 0; i < pages_count; i++) { |
---|
341 | | - if (!is_write && consumed_size > 0) |
---|
342 | | - set_page_dirty(pages[i]); |
---|
343 | | - put_page(pages[i]); |
---|
344 | | - } |
---|
345 | 286 | } |
---|
346 | 287 | |
---|
347 | 288 | /* Populate the call parameters, merging adjacent pages together */ |
---|
348 | | -static void populate_rw_params( |
---|
349 | | - struct page **pages, int pages_count, |
---|
350 | | - unsigned long address, unsigned long address_end, |
---|
351 | | - unsigned long first_page, unsigned long last_page, |
---|
352 | | - unsigned int iter_last_page_size, int is_write, |
---|
353 | | - struct goldfish_pipe_command *command) |
---|
| 289 | +static void populate_rw_params(struct page **pages, |
---|
| 290 | + int pages_count, |
---|
| 291 | + unsigned long address, |
---|
| 292 | + unsigned long address_end, |
---|
| 293 | + unsigned long first_page, |
---|
| 294 | + unsigned long last_page, |
---|
| 295 | + unsigned int iter_last_page_size, |
---|
| 296 | + int is_write, |
---|
| 297 | + struct goldfish_pipe_command *command) |
---|
354 | 298 | { |
---|
355 | 299 | /* |
---|
356 | 300 | * Process the first page separately - it's the only page that |
---|
.. | .. |
---|
382 | 326 | } |
---|
383 | 327 | |
---|
384 | 328 | static int transfer_max_buffers(struct goldfish_pipe *pipe, |
---|
385 | | - unsigned long address, unsigned long address_end, int is_write, |
---|
386 | | - unsigned long last_page, unsigned int last_page_size, |
---|
387 | | - s32 *consumed_size, int *status) |
---|
| 329 | + unsigned long address, |
---|
| 330 | + unsigned long address_end, |
---|
| 331 | + int is_write, |
---|
| 332 | + unsigned long last_page, |
---|
| 333 | + unsigned int last_page_size, |
---|
| 334 | + s32 *consumed_size, |
---|
| 335 | + int *status) |
---|
388 | 336 | { |
---|
389 | | - static struct page *pages[MAX_BUFFERS_PER_COMMAND]; |
---|
390 | 337 | unsigned long first_page = address & PAGE_MASK; |
---|
391 | 338 | unsigned int iter_last_page_size; |
---|
392 | | - int pages_count = pin_user_pages(first_page, last_page, |
---|
393 | | - last_page_size, is_write, |
---|
394 | | - pages, &iter_last_page_size); |
---|
395 | | - |
---|
396 | | - if (pages_count < 0) |
---|
397 | | - return pages_count; |
---|
| 339 | + int pages_count; |
---|
398 | 340 | |
---|
399 | 341 | /* Serialize access to the pipe command buffers */ |
---|
400 | 342 | if (mutex_lock_interruptible(&pipe->lock)) |
---|
401 | 343 | return -ERESTARTSYS; |
---|
402 | 344 | |
---|
403 | | - populate_rw_params(pages, pages_count, address, address_end, |
---|
404 | | - first_page, last_page, iter_last_page_size, is_write, |
---|
405 | | - pipe->command_buffer); |
---|
| 345 | + pages_count = goldfish_pin_pages(first_page, last_page, |
---|
| 346 | + last_page_size, is_write, |
---|
| 347 | + pipe->pages, &iter_last_page_size); |
---|
| 348 | + if (pages_count < 0) { |
---|
| 349 | + mutex_unlock(&pipe->lock); |
---|
| 350 | + return pages_count; |
---|
| 351 | + } |
---|
| 352 | + |
---|
| 353 | + populate_rw_params(pipe->pages, pages_count, address, address_end, |
---|
| 354 | + first_page, last_page, iter_last_page_size, is_write, |
---|
| 355 | + pipe->command_buffer); |
---|
406 | 356 | |
---|
407 | 357 | /* Transfer the data */ |
---|
408 | | - *status = goldfish_cmd_locked(pipe, |
---|
| 358 | + *status = goldfish_pipe_cmd_locked(pipe, |
---|
409 | 359 | is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); |
---|
410 | 360 | |
---|
411 | 361 | *consumed_size = pipe->command_buffer->rw_params.consumed_size; |
---|
412 | 362 | |
---|
413 | | - release_user_pages(pages, pages_count, is_write, *consumed_size); |
---|
| 363 | + unpin_user_pages_dirty_lock(pipe->pages, pages_count, |
---|
| 364 | + !is_write && *consumed_size > 0); |
---|
414 | 365 | |
---|
415 | 366 | mutex_unlock(&pipe->lock); |
---|
416 | | - |
---|
417 | 367 | return 0; |
---|
418 | 368 | } |
---|
419 | 369 | |
---|
420 | 370 | static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write) |
---|
421 | 371 | { |
---|
422 | | - u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
---|
| 372 | + u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ; |
---|
423 | 373 | |
---|
424 | | - set_bit(wakeBit, &pipe->flags); |
---|
| 374 | + set_bit(wake_bit, &pipe->flags); |
---|
425 | 375 | |
---|
426 | 376 | /* Tell the emulator we're going to wait for a wake event */ |
---|
427 | | - (void)goldfish_cmd(pipe, |
---|
| 377 | + goldfish_pipe_cmd(pipe, |
---|
428 | 378 | is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ); |
---|
429 | 379 | |
---|
430 | | - while (test_bit(wakeBit, &pipe->flags)) { |
---|
431 | | - if (wait_event_interruptible( |
---|
432 | | - pipe->wake_queue, |
---|
433 | | - !test_bit(wakeBit, &pipe->flags))) |
---|
| 380 | + while (test_bit(wake_bit, &pipe->flags)) { |
---|
| 381 | + if (wait_event_interruptible(pipe->wake_queue, |
---|
| 382 | + !test_bit(wake_bit, &pipe->flags))) |
---|
434 | 383 | return -ERESTARTSYS; |
---|
435 | 384 | |
---|
436 | 385 | if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) |
---|
.. | .. |
---|
441 | 390 | } |
---|
442 | 391 | |
---|
443 | 392 | static ssize_t goldfish_pipe_read_write(struct file *filp, |
---|
444 | | - char __user *buffer, size_t bufflen, int is_write) |
---|
| 393 | + char __user *buffer, |
---|
| 394 | + size_t bufflen, |
---|
| 395 | + int is_write) |
---|
445 | 396 | { |
---|
446 | 397 | struct goldfish_pipe *pipe = filp->private_data; |
---|
447 | 398 | int count = 0, ret = -EINVAL; |
---|
.. | .. |
---|
455 | 406 | if (unlikely(bufflen == 0)) |
---|
456 | 407 | return 0; |
---|
457 | 408 | /* Check the buffer range for access */ |
---|
458 | | - if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ, |
---|
459 | | - buffer, bufflen))) |
---|
| 409 | + if (unlikely(!access_ok(buffer, bufflen))) |
---|
460 | 410 | return -EFAULT; |
---|
461 | 411 | |
---|
462 | 412 | address = (unsigned long)buffer; |
---|
.. | .. |
---|
469 | 419 | int status; |
---|
470 | 420 | |
---|
471 | 421 | ret = transfer_max_buffers(pipe, address, address_end, is_write, |
---|
472 | | - last_page, last_page_size, &consumed_size, |
---|
473 | | - &status); |
---|
| 422 | + last_page, last_page_size, |
---|
| 423 | + &consumed_size, &status); |
---|
474 | 424 | if (ret < 0) |
---|
475 | 425 | break; |
---|
476 | 426 | |
---|
.. | .. |
---|
496 | 446 | * err. |
---|
497 | 447 | */ |
---|
498 | 448 | if (status != PIPE_ERROR_AGAIN) |
---|
499 | | - pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", |
---|
| 449 | + dev_err_ratelimited(pipe->dev->pdev_dev, |
---|
| 450 | + "backend error %d on %s\n", |
---|
500 | 451 | status, is_write ? "write" : "read"); |
---|
501 | 452 | break; |
---|
502 | 453 | } |
---|
.. | .. |
---|
522 | 473 | } |
---|
523 | 474 | |
---|
524 | 475 | static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, |
---|
525 | | - size_t bufflen, loff_t *ppos) |
---|
| 476 | + size_t bufflen, loff_t *ppos) |
---|
526 | 477 | { |
---|
527 | 478 | return goldfish_pipe_read_write(filp, buffer, bufflen, |
---|
528 | | - /* is_write */ 0); |
---|
| 479 | + /* is_write */ 0); |
---|
529 | 480 | } |
---|
530 | 481 | |
---|
531 | 482 | static ssize_t goldfish_pipe_write(struct file *filp, |
---|
532 | | - const char __user *buffer, size_t bufflen, |
---|
533 | | - loff_t *ppos) |
---|
| 483 | + const char __user *buffer, size_t bufflen, |
---|
| 484 | + loff_t *ppos) |
---|
534 | 485 | { |
---|
535 | | - return goldfish_pipe_read_write(filp, |
---|
536 | | - /* cast away the const */(char __user *)buffer, bufflen, |
---|
537 | | - /* is_write */ 1); |
---|
| 486 | + /* cast away the const */ |
---|
| 487 | + char __user *no_const_buffer = (char __user *)buffer; |
---|
| 488 | + |
---|
| 489 | + return goldfish_pipe_read_write(filp, no_const_buffer, bufflen, |
---|
| 490 | + /* is_write */ 1); |
---|
538 | 491 | } |
---|
539 | 492 | |
---|
540 | 493 | static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait) |
---|
.. | .. |
---|
545 | 498 | |
---|
546 | 499 | poll_wait(filp, &pipe->wake_queue, wait); |
---|
547 | 500 | |
---|
548 | | - status = goldfish_cmd(pipe, PIPE_CMD_POLL); |
---|
| 501 | + status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL); |
---|
549 | 502 | if (status < 0) |
---|
550 | 503 | return -ERESTARTSYS; |
---|
551 | 504 | |
---|
.. | .. |
---|
562 | 515 | } |
---|
563 | 516 | |
---|
564 | 517 | static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, |
---|
565 | | - u32 id, u32 flags) |
---|
| 518 | + u32 id, u32 flags) |
---|
566 | 519 | { |
---|
567 | 520 | struct goldfish_pipe *pipe; |
---|
568 | 521 | |
---|
.. | .. |
---|
574 | 527 | return; |
---|
575 | 528 | pipe->signalled_flags |= flags; |
---|
576 | 529 | |
---|
577 | | - if (pipe->prev_signalled || pipe->next_signalled |
---|
578 | | - || dev->first_signalled_pipe == pipe) |
---|
| 530 | + if (pipe->prev_signalled || pipe->next_signalled || |
---|
| 531 | + dev->first_signalled_pipe == pipe) |
---|
579 | 532 | return; /* already in the list */ |
---|
580 | 533 | pipe->next_signalled = dev->first_signalled_pipe; |
---|
581 | 534 | if (dev->first_signalled_pipe) |
---|
.. | .. |
---|
584 | 537 | } |
---|
585 | 538 | |
---|
586 | 539 | static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, |
---|
587 | | - struct goldfish_pipe *pipe) { |
---|
| 540 | + struct goldfish_pipe *pipe) |
---|
| 541 | +{ |
---|
588 | 542 | if (pipe->prev_signalled) |
---|
589 | 543 | pipe->prev_signalled->next_signalled = pipe->next_signalled; |
---|
590 | 544 | if (pipe->next_signalled) |
---|
.. | .. |
---|
623 | 577 | return pipe; |
---|
624 | 578 | } |
---|
625 | 579 | |
---|
626 | | -static void goldfish_interrupt_task(unsigned long unused) |
---|
| 580 | +static void goldfish_interrupt_task(unsigned long dev_addr) |
---|
627 | 581 | { |
---|
628 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
629 | 582 | /* Iterate over the signalled pipes and wake them one by one */ |
---|
| 583 | + struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr; |
---|
630 | 584 | struct goldfish_pipe *pipe; |
---|
631 | 585 | int wakes; |
---|
632 | 586 | |
---|
.. | .. |
---|
646 | 600 | wake_up_interruptible(&pipe->wake_queue); |
---|
647 | 601 | } |
---|
648 | 602 | } |
---|
649 | | -static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0); |
---|
| 603 | + |
---|
| 604 | +static void goldfish_pipe_device_deinit(struct platform_device *pdev, |
---|
| 605 | + struct goldfish_pipe_dev *dev); |
---|
650 | 606 | |
---|
651 | 607 | /* |
---|
652 | 608 | * The general idea of the interrupt handling: |
---|
.. | .. |
---|
668 | 624 | unsigned long flags; |
---|
669 | 625 | struct goldfish_pipe_dev *dev = dev_id; |
---|
670 | 626 | |
---|
671 | | - if (dev != pipe_dev) |
---|
| 627 | + if (dev->magic != &goldfish_pipe_device_deinit) |
---|
672 | 628 | return IRQ_NONE; |
---|
673 | 629 | |
---|
674 | 630 | /* Request the signalled pipes from the device */ |
---|
.. | .. |
---|
689 | 645 | |
---|
690 | 646 | spin_unlock_irqrestore(&dev->lock, flags); |
---|
691 | 647 | |
---|
692 | | - tasklet_schedule(&goldfish_interrupt_tasklet); |
---|
| 648 | + tasklet_schedule(&dev->irq_tasklet); |
---|
693 | 649 | return IRQ_HANDLED; |
---|
694 | 650 | } |
---|
695 | 651 | |
---|
.. | .. |
---|
702 | 658 | return id; |
---|
703 | 659 | |
---|
704 | 660 | { |
---|
705 | | - /* Reallocate the array */ |
---|
| 661 | + /* Reallocate the array. |
---|
| 662 | + * Since get_free_pipe_id_locked runs with interrupts disabled, |
---|
| 663 | + * we don't want to make calls that could lead to sleep. |
---|
| 664 | + */ |
---|
706 | 665 | u32 new_capacity = 2 * dev->pipes_capacity; |
---|
707 | 666 | struct goldfish_pipe **pipes = |
---|
708 | 667 | kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC); |
---|
.. | .. |
---|
715 | 674 | dev->pipes_capacity = new_capacity; |
---|
716 | 675 | } |
---|
717 | 676 | return id; |
---|
| 677 | +} |
---|
| 678 | + |
---|
| 679 | +/* A helper function to get the instance of goldfish_pipe_dev from file */ |
---|
| 680 | +static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file) |
---|
| 681 | +{ |
---|
| 682 | + struct miscdevice *miscdev = file->private_data; |
---|
| 683 | + |
---|
| 684 | + return container_of(miscdev, struct goldfish_pipe_dev, miscdev); |
---|
718 | 685 | } |
---|
719 | 686 | |
---|
720 | 687 | /** |
---|
.. | .. |
---|
730 | 697 | */ |
---|
731 | 698 | static int goldfish_pipe_open(struct inode *inode, struct file *file) |
---|
732 | 699 | { |
---|
733 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
| 700 | + struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file); |
---|
734 | 701 | unsigned long flags; |
---|
735 | 702 | int id; |
---|
736 | 703 | int status; |
---|
737 | 704 | |
---|
738 | 705 | /* Allocate new pipe kernel object */ |
---|
739 | 706 | struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL); |
---|
740 | | - if (pipe == NULL) |
---|
| 707 | + |
---|
| 708 | + if (!pipe) |
---|
741 | 709 | return -ENOMEM; |
---|
742 | 710 | |
---|
743 | 711 | pipe->dev = dev; |
---|
.. | .. |
---|
748 | 716 | * Command buffer needs to be allocated on its own page to make sure |
---|
749 | 717 | * it is physically contiguous in host's address space. |
---|
750 | 718 | */ |
---|
| 719 | + BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE); |
---|
751 | 720 | pipe->command_buffer = |
---|
752 | 721 | (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); |
---|
753 | 722 | if (!pipe->command_buffer) { |
---|
.. | .. |
---|
772 | 741 | MAX_BUFFERS_PER_COMMAND; |
---|
773 | 742 | dev->buffers->open_command_params.command_buffer_ptr = |
---|
774 | 743 | (u64)(unsigned long)__pa(pipe->command_buffer); |
---|
775 | | - status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); |
---|
| 744 | + status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN); |
---|
776 | 745 | spin_unlock_irqrestore(&dev->lock, flags); |
---|
777 | 746 | if (status < 0) |
---|
778 | 747 | goto err_cmd; |
---|
.. | .. |
---|
798 | 767 | struct goldfish_pipe_dev *dev = pipe->dev; |
---|
799 | 768 | |
---|
800 | 769 | /* The guest is closing the channel, so tell the emulator right now */ |
---|
801 | | - (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); |
---|
| 770 | + goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE); |
---|
802 | 771 | |
---|
803 | 772 | spin_lock_irqsave(&dev->lock, flags); |
---|
804 | 773 | dev->pipes[pipe->id] = NULL; |
---|
.. | .. |
---|
820 | 789 | .release = goldfish_pipe_release, |
---|
821 | 790 | }; |
---|
822 | 791 | |
---|
823 | | -static struct miscdevice goldfish_pipe_dev = { |
---|
824 | | - .minor = MISC_DYNAMIC_MINOR, |
---|
825 | | - .name = "goldfish_pipe", |
---|
826 | | - .fops = &goldfish_pipe_fops, |
---|
827 | | -}; |
---|
828 | | - |
---|
829 | | -static int goldfish_pipe_device_init(struct platform_device *pdev) |
---|
| 792 | +static void init_miscdevice(struct miscdevice *miscdev) |
---|
830 | 793 | { |
---|
831 | | - char *page; |
---|
832 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
833 | | - int err = devm_request_irq(&pdev->dev, dev->irq, |
---|
834 | | - goldfish_pipe_interrupt, |
---|
835 | | - IRQF_SHARED, "goldfish_pipe", dev); |
---|
| 794 | + memset(miscdev, 0, sizeof(*miscdev)); |
---|
| 795 | + |
---|
| 796 | + miscdev->minor = MISC_DYNAMIC_MINOR; |
---|
| 797 | + miscdev->name = "goldfish_pipe"; |
---|
| 798 | + miscdev->fops = &goldfish_pipe_fops; |
---|
| 799 | +} |
---|
| 800 | + |
---|
| 801 | +static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth) |
---|
| 802 | +{ |
---|
| 803 | + const unsigned long paddr = __pa(addr); |
---|
| 804 | + |
---|
| 805 | + writel(upper_32_bits(paddr), porth); |
---|
| 806 | + writel(lower_32_bits(paddr), portl); |
---|
| 807 | +} |
---|
| 808 | + |
---|
| 809 | +static int goldfish_pipe_device_init(struct platform_device *pdev, |
---|
| 810 | + struct goldfish_pipe_dev *dev) |
---|
| 811 | +{ |
---|
| 812 | + int err; |
---|
| 813 | + |
---|
| 814 | + tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task, |
---|
| 815 | + (unsigned long)dev); |
---|
| 816 | + |
---|
| 817 | + err = devm_request_irq(&pdev->dev, dev->irq, |
---|
| 818 | + goldfish_pipe_interrupt, |
---|
| 819 | + IRQF_SHARED, "goldfish_pipe", dev); |
---|
836 | 820 | if (err) { |
---|
837 | 821 | dev_err(&pdev->dev, "unable to allocate IRQ for v2\n"); |
---|
838 | 822 | return err; |
---|
839 | 823 | } |
---|
840 | 824 | |
---|
841 | | - err = misc_register(&goldfish_pipe_dev); |
---|
| 825 | + init_miscdevice(&dev->miscdev); |
---|
| 826 | + err = misc_register(&dev->miscdev); |
---|
842 | 827 | if (err) { |
---|
843 | 828 | dev_err(&pdev->dev, "unable to register v2 device\n"); |
---|
844 | 829 | return err; |
---|
845 | 830 | } |
---|
846 | 831 | |
---|
| 832 | + dev->pdev_dev = &pdev->dev; |
---|
847 | 833 | dev->first_signalled_pipe = NULL; |
---|
848 | 834 | dev->pipes_capacity = INITIAL_PIPES_CAPACITY; |
---|
849 | 835 | dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), |
---|
850 | | - GFP_KERNEL); |
---|
851 | | - if (!dev->pipes) |
---|
| 836 | + GFP_KERNEL); |
---|
| 837 | + if (!dev->pipes) { |
---|
| 838 | + misc_deregister(&dev->miscdev); |
---|
852 | 839 | return -ENOMEM; |
---|
| 840 | + } |
---|
853 | 841 | |
---|
854 | 842 | /* |
---|
855 | 843 | * We're going to pass two buffers, open_command_params and |
---|
.. | .. |
---|
857 | 845 | * needs to be contained in a single physical page. The easiest choice |
---|
858 | 846 | * is to just allocate a page and place the buffers in it. |
---|
859 | 847 | */ |
---|
860 | | - if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE)) |
---|
861 | | - return -ENOMEM; |
---|
862 | | - |
---|
863 | | - page = (char *)__get_free_page(GFP_KERNEL); |
---|
864 | | - if (!page) { |
---|
| 848 | + BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE); |
---|
| 849 | + dev->buffers = (struct goldfish_pipe_dev_buffers *) |
---|
| 850 | + __get_free_page(GFP_KERNEL); |
---|
| 851 | + if (!dev->buffers) { |
---|
865 | 852 | kfree(dev->pipes); |
---|
| 853 | + misc_deregister(&dev->miscdev); |
---|
866 | 854 | return -ENOMEM; |
---|
867 | 855 | } |
---|
868 | | - dev->buffers = (struct goldfish_pipe_dev_buffers *)page; |
---|
869 | 856 | |
---|
870 | 857 | /* Send the buffer addresses to the host */ |
---|
871 | | - { |
---|
872 | | - u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); |
---|
| 858 | + write_pa_addr(&dev->buffers->signalled_pipe_buffers, |
---|
| 859 | + dev->base + PIPE_REG_SIGNAL_BUFFER, |
---|
| 860 | + dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); |
---|
873 | 861 | |
---|
874 | | - writel((u32)(unsigned long)(paddr >> 32), |
---|
875 | | - dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); |
---|
876 | | - writel((u32)(unsigned long)paddr, |
---|
877 | | - dev->base + PIPE_REG_SIGNAL_BUFFER); |
---|
878 | | - writel((u32)MAX_SIGNALLED_PIPES, |
---|
879 | | - dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
---|
| 862 | + writel(MAX_SIGNALLED_PIPES, |
---|
| 863 | + dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); |
---|
880 | 864 | |
---|
881 | | - paddr = __pa(&dev->buffers->open_command_params); |
---|
882 | | - writel((u32)(unsigned long)(paddr >> 32), |
---|
883 | | - dev->base + PIPE_REG_OPEN_BUFFER_HIGH); |
---|
884 | | - writel((u32)(unsigned long)paddr, |
---|
885 | | - dev->base + PIPE_REG_OPEN_BUFFER); |
---|
886 | | - } |
---|
| 865 | + write_pa_addr(&dev->buffers->open_command_params, |
---|
| 866 | + dev->base + PIPE_REG_OPEN_BUFFER, |
---|
| 867 | + dev->base + PIPE_REG_OPEN_BUFFER_HIGH); |
---|
| 868 | + |
---|
| 869 | + platform_set_drvdata(pdev, dev); |
---|
887 | 870 | return 0; |
---|
888 | 871 | } |
---|
889 | 872 | |
---|
890 | | -static void goldfish_pipe_device_deinit(struct platform_device *pdev) |
---|
| 873 | +static void goldfish_pipe_device_deinit(struct platform_device *pdev, |
---|
| 874 | + struct goldfish_pipe_dev *dev) |
---|
891 | 875 | { |
---|
892 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
893 | | - |
---|
894 | | - misc_deregister(&goldfish_pipe_dev); |
---|
| 876 | + misc_deregister(&dev->miscdev); |
---|
| 877 | + tasklet_kill(&dev->irq_tasklet); |
---|
895 | 878 | kfree(dev->pipes); |
---|
896 | 879 | free_page((unsigned long)dev->buffers); |
---|
897 | 880 | } |
---|
898 | 881 | |
---|
899 | 882 | static int goldfish_pipe_probe(struct platform_device *pdev) |
---|
900 | 883 | { |
---|
901 | | - int err; |
---|
902 | 884 | struct resource *r; |
---|
903 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
| 885 | + struct goldfish_pipe_dev *dev; |
---|
904 | 886 | |
---|
905 | | - if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE)) |
---|
| 887 | + dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); |
---|
| 888 | + if (!dev) |
---|
906 | 889 | return -ENOMEM; |
---|
907 | 890 | |
---|
908 | | - /* not thread safe, but this should not happen */ |
---|
909 | | - WARN_ON(dev->base != NULL); |
---|
910 | | - |
---|
| 891 | + dev->magic = &goldfish_pipe_device_deinit; |
---|
911 | 892 | spin_lock_init(&dev->lock); |
---|
912 | 893 | |
---|
913 | 894 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
---|
914 | | - if (r == NULL || resource_size(r) < PAGE_SIZE) { |
---|
| 895 | + if (!r || resource_size(r) < PAGE_SIZE) { |
---|
915 | 896 | dev_err(&pdev->dev, "can't allocate i/o page\n"); |
---|
916 | 897 | return -EINVAL; |
---|
917 | 898 | } |
---|
918 | 899 | dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); |
---|
919 | | - if (dev->base == NULL) { |
---|
| 900 | + if (!dev->base) { |
---|
920 | 901 | dev_err(&pdev->dev, "ioremap failed\n"); |
---|
921 | 902 | return -EINVAL; |
---|
922 | 903 | } |
---|
923 | 904 | |
---|
924 | 905 | r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
---|
925 | | - if (r == NULL) { |
---|
926 | | - err = -EINVAL; |
---|
927 | | - goto error; |
---|
928 | | - } |
---|
| 906 | + if (!r) |
---|
| 907 | + return -EINVAL; |
---|
| 908 | + |
---|
929 | 909 | dev->irq = r->start; |
---|
930 | 910 | |
---|
931 | 911 | /* |
---|
.. | .. |
---|
935 | 915 | * reading device version back: this allows the host implementation to |
---|
936 | 916 | * detect the old driver (if there was no version write before read). |
---|
937 | 917 | */ |
---|
938 | | - writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
---|
| 918 | + writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION); |
---|
939 | 919 | dev->version = readl(dev->base + PIPE_REG_VERSION); |
---|
940 | 920 | if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION)) |
---|
941 | 921 | return -EINVAL; |
---|
942 | 922 | |
---|
943 | | - err = goldfish_pipe_device_init(pdev); |
---|
944 | | - if (!err) |
---|
945 | | - return 0; |
---|
946 | | - |
---|
947 | | -error: |
---|
948 | | - dev->base = NULL; |
---|
949 | | - return err; |
---|
| 923 | + return goldfish_pipe_device_init(pdev, dev); |
---|
950 | 924 | } |
---|
951 | 925 | |
---|
952 | 926 | static int goldfish_pipe_remove(struct platform_device *pdev) |
---|
953 | 927 | { |
---|
954 | | - struct goldfish_pipe_dev *dev = pipe_dev; |
---|
955 | | - goldfish_pipe_device_deinit(pdev); |
---|
956 | | - dev->base = NULL; |
---|
| 928 | + struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev); |
---|
| 929 | + |
---|
| 930 | + goldfish_pipe_device_deinit(pdev, dev); |
---|
957 | 931 | return 0; |
---|
958 | 932 | } |
---|
959 | 933 | |
---|
.. | .. |
---|
981 | 955 | |
---|
982 | 956 | module_platform_driver(goldfish_pipe_driver); |
---|
983 | 957 | MODULE_AUTHOR("David Turner <digit@google.com>"); |
---|
984 | | -MODULE_LICENSE("GPL"); |
---|
| 958 | +MODULE_LICENSE("GPL v2"); |
---|