.. | .. |
---|
23 | 23 | * |
---|
24 | 24 | */ |
---|
25 | 25 | |
---|
26 | | -#include <drm/drmP.h> |
---|
27 | | - |
---|
28 | 26 | #include "dm_services_types.h" |
---|
29 | 27 | #include "dc.h" |
---|
30 | 28 | |
---|
.. | .. |
---|
32 | 30 | #include "amdgpu_dm.h" |
---|
33 | 31 | #include "amdgpu_dm_irq.h" |
---|
34 | 32 | |
---|
| 33 | +/** |
---|
| 34 | + * DOC: overview |
---|
| 35 | + * |
---|
| 36 | + * DM provides another layer of IRQ management on top of what the base driver |
---|
| 37 | + * already provides. This is something that could be cleaned up, and is a |
---|
| 38 | + * future TODO item. |
---|
| 39 | + * |
---|
| 40 | + * The base driver provides IRQ source registration with DRM, handler |
---|
| 41 | + * registration into the base driver's IRQ table, and a handler callback |
---|
| 42 | + * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic |
---|
| 43 | + * handler looks up the IRQ table, and calls the respective |
---|
| 44 | + * &amdgpu_irq_src_funcs.process hookups. |
---|
| 45 | + * |
---|
| 46 | + * What DM provides on top are two IRQ tables specifically for top-half and |
---|
| 47 | + * bottom-half IRQ handling, with the bottom-half implementing workqueues: |
---|
| 48 | + * |
---|
| 49 | + * - &amdgpu_display_manager.irq_handler_list_high_tab |
---|
| 50 | + * - &amdgpu_display_manager.irq_handler_list_low_tab |
---|
| 51 | + * |
---|
| 52 | + * They override the base driver's IRQ table, and the effect can be seen |
---|
| 53 | + * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They |
---|
| 54 | + * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up |
---|
| 55 | + * DM's IRQ tables. However, in order for base driver to recognize this hook, DM |
---|
| 56 | + * still needs to register the IRQ with the base driver. See |
---|
| 57 | + * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). |
---|
| 58 | + * |
---|
| 59 | + * To expose DC's hardware interrupt toggle to the base driver, DM implements |
---|
| 60 | + * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through |
---|
| 61 | + * amdgpu_irq_update() to enable or disable the interrupt. |
---|
| 62 | + */ |
---|
| 63 | + |
---|
35 | 64 | /****************************************************************************** |
---|
36 | 65 | * Private declarations. |
---|
37 | 66 | *****************************************************************************/ |
---|
38 | 67 | |
---|
39 | | -struct handler_common_data { |
---|
| 68 | +/** |
---|
| 69 | + * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. |
---|
| 70 | + * |
---|
| 71 | + * @list: Linked list entry referencing the next/previous handler |
---|
| 72 | + * @handler: Handler function |
---|
| 73 | + * @handler_arg: Argument passed to the handler when triggered |
---|
| 74 | + * @dm: DM which this handler belongs to |
---|
| 75 | + * @irq_source: DC interrupt source that this handler is registered for |
---|
| 76 | + */ |
---|
| 77 | +struct amdgpu_dm_irq_handler_data { |
---|
40 | 78 | struct list_head list; |
---|
41 | 79 | interrupt_handler handler; |
---|
42 | 80 | void *handler_arg; |
---|
43 | 81 | |
---|
44 | | - /* DM which this handler belongs to */ |
---|
45 | 82 | struct amdgpu_display_manager *dm; |
---|
46 | | -}; |
---|
47 | | - |
---|
48 | | -struct amdgpu_dm_irq_handler_data { |
---|
49 | | - struct handler_common_data hcd; |
---|
50 | 83 | /* DAL irq source which registered for this interrupt. */ |
---|
51 | 84 | enum dc_irq_source irq_source; |
---|
| 85 | + struct work_struct work; |
---|
52 | 86 | }; |
---|
53 | 87 | |
---|
54 | 88 | #define DM_IRQ_TABLE_LOCK(adev, flags) \ |
---|
.. | .. |
---|
61 | 95 | * Private functions. |
---|
62 | 96 | *****************************************************************************/ |
---|
63 | 97 | |
---|
64 | | -static void init_handler_common_data(struct handler_common_data *hcd, |
---|
| 98 | +static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, |
---|
65 | 99 | void (*ih)(void *), |
---|
66 | 100 | void *args, |
---|
67 | 101 | struct amdgpu_display_manager *dm) |
---|
.. | .. |
---|
72 | 106 | } |
---|
73 | 107 | |
---|
74 | 108 | /** |
---|
75 | | - * dm_irq_work_func - Handle an IRQ outside of the interrupt handler proper. |
---|
| 109 | + * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. |
---|
76 | 110 | * |
---|
77 | 111 | * @work: work struct |
---|
78 | 112 | */ |
---|
79 | 113 | static void dm_irq_work_func(struct work_struct *work) |
---|
80 | 114 | { |
---|
81 | | - struct list_head *entry; |
---|
82 | | - struct irq_list_head *irq_list_head = |
---|
83 | | - container_of(work, struct irq_list_head, work); |
---|
84 | | - struct list_head *handler_list = &irq_list_head->head; |
---|
85 | | - struct amdgpu_dm_irq_handler_data *handler_data; |
---|
| 115 | + struct amdgpu_dm_irq_handler_data *handler_data = |
---|
| 116 | + container_of(work, struct amdgpu_dm_irq_handler_data, work); |
---|
86 | 117 | |
---|
87 | | - list_for_each(entry, handler_list) { |
---|
88 | | - handler_data = |
---|
89 | | - list_entry( |
---|
90 | | - entry, |
---|
91 | | - struct amdgpu_dm_irq_handler_data, |
---|
92 | | - hcd.list); |
---|
93 | | - |
---|
94 | | - DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", |
---|
95 | | - handler_data->irq_source); |
---|
96 | | - |
---|
97 | | - DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", |
---|
98 | | - handler_data->irq_source); |
---|
99 | | - |
---|
100 | | - handler_data->hcd.handler(handler_data->hcd.handler_arg); |
---|
101 | | - } |
---|
| 118 | + handler_data->handler(handler_data->handler_arg); |
---|
102 | 119 | |
---|
103 | 120 | /* Call a DAL subcomponent which registered for interrupt notification |
---|
104 | 121 | * at INTERRUPT_LOW_IRQ_CONTEXT. |
---|
105 | 122 | * (The most common use is HPD interrupt) */ |
---|
106 | 123 | } |
---|
107 | 124 | |
---|
108 | | -/** |
---|
109 | | - * Remove a handler and return a pointer to hander list from which the |
---|
| 125 | +/* |
---|
| 126 | + * Remove a handler and return a pointer to handler list from which the |
---|
110 | 127 | * handler was removed. |
---|
111 | 128 | */ |
---|
112 | 129 | static struct list_head *remove_irq_handler(struct amdgpu_device *adev, |
---|
.. | .. |
---|
130 | 147 | break; |
---|
131 | 148 | case INTERRUPT_LOW_IRQ_CONTEXT: |
---|
132 | 149 | default: |
---|
133 | | - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; |
---|
| 150 | + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; |
---|
134 | 151 | break; |
---|
135 | 152 | } |
---|
136 | 153 | |
---|
137 | 154 | list_for_each_safe(entry, tmp, hnd_list) { |
---|
138 | 155 | |
---|
139 | 156 | handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, |
---|
140 | | - hcd.list); |
---|
| 157 | + list); |
---|
141 | 158 | |
---|
142 | 159 | if (ih == handler) { |
---|
143 | 160 | /* Found our handler. Remove it from the list. */ |
---|
144 | | - list_del(&handler->hcd.list); |
---|
| 161 | + list_del(&handler->list); |
---|
145 | 162 | handler_removed = true; |
---|
146 | 163 | break; |
---|
147 | 164 | } |
---|
.. | .. |
---|
209 | 226 | * Note: caller is responsible for input validation. |
---|
210 | 227 | *****************************************************************************/ |
---|
211 | 228 | |
---|
| 229 | +/** |
---|
| 230 | + * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. |
---|
| 231 | + * @adev: The base driver device containing the DM device. |
---|
| 232 | + * @int_params: Interrupt parameters containing the source, and handler context |
---|
| 233 | + * @ih: Function pointer to the interrupt handler to register |
---|
| 234 | + * @handler_args: Arguments passed to the handler when the interrupt occurs |
---|
| 235 | + * |
---|
| 236 | + * Register an interrupt handler for the given IRQ source, under the given |
---|
| 237 | + * context. The context can either be high or low. High context handlers are |
---|
| 238 | + * executed directly within ISR context, while low context is executed within a |
---|
| 239 | + * workqueue, thereby allowing operations that sleep. |
---|
| 240 | + * |
---|
| 241 | + * Registered handlers are called in a FIFO manner, i.e. the most recently |
---|
| 242 | + * registered handler will be called first. |
---|
| 243 | + * |
---|
| 244 | + * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ |
---|
| 245 | + * source, handler function, and args |
---|
| 246 | + */ |
---|
212 | 247 | void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, |
---|
213 | 248 | struct dc_interrupt_params *int_params, |
---|
214 | 249 | void (*ih)(void *), |
---|
.. | .. |
---|
228 | 263 | return DAL_INVALID_IRQ_HANDLER_IDX; |
---|
229 | 264 | } |
---|
230 | 265 | |
---|
231 | | - memset(handler_data, 0, sizeof(*handler_data)); |
---|
232 | | - |
---|
233 | | - init_handler_common_data(&handler_data->hcd, ih, handler_args, |
---|
234 | | - &adev->dm); |
---|
| 266 | + init_handler_common_data(handler_data, ih, handler_args, &adev->dm); |
---|
235 | 267 | |
---|
236 | 268 | irq_source = int_params->irq_source; |
---|
237 | 269 | |
---|
.. | .. |
---|
246 | 278 | break; |
---|
247 | 279 | case INTERRUPT_LOW_IRQ_CONTEXT: |
---|
248 | 280 | default: |
---|
249 | | - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; |
---|
| 281 | + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; |
---|
| 282 | + INIT_WORK(&handler_data->work, dm_irq_work_func); |
---|
250 | 283 | break; |
---|
251 | 284 | } |
---|
252 | 285 | |
---|
253 | | - list_add_tail(&handler_data->hcd.list, hnd_list); |
---|
| 286 | + list_add_tail(&handler_data->list, hnd_list); |
---|
254 | 287 | |
---|
255 | 288 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
---|
256 | 289 | |
---|
.. | .. |
---|
268 | 301 | return handler_data; |
---|
269 | 302 | } |
---|
270 | 303 | |
---|
| 304 | +/** |
---|
| 305 | + * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table |
---|
| 306 | + * @adev: The base driver device containing the DM device |
---|
| 307 | + * @irq_source: IRQ source to remove the given handler from |
---|
| 308 | + * @ih: Function pointer to the interrupt handler to unregister |
---|
| 309 | + * |
---|
| 310 | + * Go through both low and high context IRQ tables, and find the given handler |
---|
| 311 | + * for the given irq source. If found, remove it. Otherwise, do nothing. |
---|
| 312 | + */ |
---|
271 | 313 | void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, |
---|
272 | 314 | enum dc_irq_source irq_source, |
---|
273 | 315 | void *ih) |
---|
.. | .. |
---|
302 | 344 | } |
---|
303 | 345 | } |
---|
304 | 346 | |
---|
| 347 | +/** |
---|
| 348 | + * amdgpu_dm_irq_init() - Initialize DM IRQ management |
---|
| 349 | + * @adev: The base driver device containing the DM device |
---|
| 350 | + * |
---|
| 351 | + * Initialize DM's high and low context IRQ tables. |
---|
| 352 | + * |
---|
| 353 | + * The N by M table contains N IRQ sources, with M |
---|
| 354 | + * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The |
---|
| 355 | + * list_heads are initialized here. When an interrupt n is triggered, all m |
---|
| 356 | + * handlers are called in sequence, FIFO according to registration order. |
---|
| 357 | + * |
---|
| 358 | + * The low context table requires special steps to initialize, since handlers |
---|
| 359 | + * will be deferred to a workqueue. See &struct irq_list_head. |
---|
| 360 | + */ |
---|
305 | 361 | int amdgpu_dm_irq_init(struct amdgpu_device *adev) |
---|
306 | 362 | { |
---|
307 | 363 | int src; |
---|
308 | | - struct irq_list_head *lh; |
---|
| 364 | + struct list_head *lh; |
---|
309 | 365 | |
---|
310 | 366 | DRM_DEBUG_KMS("DM_IRQ\n"); |
---|
311 | 367 | |
---|
.. | .. |
---|
314 | 370 | for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { |
---|
315 | 371 | /* low context handler list init */ |
---|
316 | 372 | lh = &adev->dm.irq_handler_list_low_tab[src]; |
---|
317 | | - INIT_LIST_HEAD(&lh->head); |
---|
318 | | - INIT_WORK(&lh->work, dm_irq_work_func); |
---|
319 | | - |
---|
| 373 | + INIT_LIST_HEAD(lh); |
---|
320 | 374 | /* high context handler init */ |
---|
321 | 375 | INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); |
---|
322 | 376 | } |
---|
.. | .. |
---|
324 | 378 | return 0; |
---|
325 | 379 | } |
---|
326 | 380 | |
---|
327 | | -/* DM IRQ and timer resource release */ |
---|
| 381 | +/** |
---|
| 382 | + * amdgpu_dm_irq_fini() - Tear down DM IRQ management |
---|
| 383 | + * @adev: The base driver device containing the DM device |
---|
| 384 | + * |
---|
| 385 | + * Flush all work within the low context IRQ table. |
---|
| 386 | + */ |
---|
328 | 387 | void amdgpu_dm_irq_fini(struct amdgpu_device *adev) |
---|
329 | 388 | { |
---|
330 | 389 | int src; |
---|
331 | | - struct irq_list_head *lh; |
---|
| 390 | + struct list_head *lh; |
---|
| 391 | + struct list_head *entry, *tmp; |
---|
| 392 | + struct amdgpu_dm_irq_handler_data *handler; |
---|
332 | 393 | unsigned long irq_table_flags; |
---|
| 394 | + |
---|
333 | 395 | DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); |
---|
334 | 396 | for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { |
---|
335 | 397 | DM_IRQ_TABLE_LOCK(adev, irq_table_flags); |
---|
.. | .. |
---|
338 | 400 | * (because no code can schedule a new one). */ |
---|
339 | 401 | lh = &adev->dm.irq_handler_list_low_tab[src]; |
---|
340 | 402 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
---|
341 | | - flush_work(&lh->work); |
---|
| 403 | + |
---|
| 404 | + if (!list_empty(lh)) { |
---|
| 405 | + list_for_each_safe(entry, tmp, lh) { |
---|
| 406 | + handler = list_entry( |
---|
| 407 | + entry, |
---|
| 408 | + struct amdgpu_dm_irq_handler_data, |
---|
| 409 | + list); |
---|
| 410 | + flush_work(&handler->work); |
---|
| 411 | + } |
---|
| 412 | + } |
---|
342 | 413 | } |
---|
343 | 414 | } |
---|
344 | 415 | |
---|
.. | .. |
---|
348 | 419 | struct list_head *hnd_list_h; |
---|
349 | 420 | struct list_head *hnd_list_l; |
---|
350 | 421 | unsigned long irq_table_flags; |
---|
| 422 | + struct list_head *entry, *tmp; |
---|
| 423 | + struct amdgpu_dm_irq_handler_data *handler; |
---|
351 | 424 | |
---|
352 | 425 | DM_IRQ_TABLE_LOCK(adev, irq_table_flags); |
---|
353 | 426 | |
---|
.. | .. |
---|
358 | 431 | * will be disabled from manage_dm_interrupts on disable CRTC. |
---|
359 | 432 | */ |
---|
360 | 433 | for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { |
---|
361 | | - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; |
---|
| 434 | + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; |
---|
362 | 435 | hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; |
---|
363 | 436 | if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) |
---|
364 | 437 | dc_interrupt_set(adev->dm.dc, src, false); |
---|
365 | 438 | |
---|
366 | 439 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
---|
367 | | - flush_work(&adev->dm.irq_handler_list_low_tab[src].work); |
---|
368 | 440 | |
---|
| 441 | + if (!list_empty(hnd_list_l)) { |
---|
| 442 | + list_for_each_safe (entry, tmp, hnd_list_l) { |
---|
| 443 | + handler = list_entry( |
---|
| 444 | + entry, |
---|
| 445 | + struct amdgpu_dm_irq_handler_data, |
---|
| 446 | + list); |
---|
| 447 | + flush_work(&handler->work); |
---|
| 448 | + } |
---|
| 449 | + } |
---|
369 | 450 | DM_IRQ_TABLE_LOCK(adev, irq_table_flags); |
---|
370 | 451 | } |
---|
371 | 452 | |
---|
.. | .. |
---|
385 | 466 | |
---|
386 | 467 | /* re-enable short pulse interrupts HW interrupt */ |
---|
387 | 468 | for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { |
---|
388 | | - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; |
---|
| 469 | + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; |
---|
389 | 470 | hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; |
---|
390 | 471 | if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) |
---|
391 | 472 | dc_interrupt_set(adev->dm.dc, src, true); |
---|
.. | .. |
---|
411 | 492 | * will be enabled from manage_dm_interrupts on enable CRTC. |
---|
412 | 493 | */ |
---|
413 | 494 | for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { |
---|
414 | | - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; |
---|
| 495 | + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; |
---|
415 | 496 | hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; |
---|
416 | 497 | if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) |
---|
417 | 498 | dc_interrupt_set(adev->dm.dc, src, true); |
---|
.. | .. |
---|
421 | 502 | return 0; |
---|
422 | 503 | } |
---|
423 | 504 | |
---|
424 | | -/** |
---|
| 505 | +/* |
---|
425 | 506 | * amdgpu_dm_irq_schedule_work - schedule all work items registered for the |
---|
426 | 507 | * "irq_source". |
---|
427 | 508 | */ |
---|
428 | 509 | static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, |
---|
429 | 510 | enum dc_irq_source irq_source) |
---|
430 | 511 | { |
---|
431 | | - unsigned long irq_table_flags; |
---|
432 | | - struct work_struct *work = NULL; |
---|
| 512 | + struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; |
---|
| 513 | + struct amdgpu_dm_irq_handler_data *handler_data; |
---|
| 514 | + bool work_queued = false; |
---|
433 | 515 | |
---|
434 | | - DM_IRQ_TABLE_LOCK(adev, irq_table_flags); |
---|
| 516 | + if (list_empty(handler_list)) |
---|
| 517 | + return; |
---|
435 | 518 | |
---|
436 | | - if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) |
---|
437 | | - work = &adev->dm.irq_handler_list_low_tab[irq_source].work; |
---|
438 | | - |
---|
439 | | - DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
---|
440 | | - |
---|
441 | | - if (work) { |
---|
442 | | - if (!schedule_work(work)) |
---|
443 | | - DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", |
---|
444 | | - irq_source); |
---|
| 519 | + list_for_each_entry (handler_data, handler_list, list) { |
---|
| 520 | + if (!queue_work(system_highpri_wq, &handler_data->work)) { |
---|
| 521 | + continue; |
---|
| 522 | + } else { |
---|
| 523 | + work_queued = true; |
---|
| 524 | + break; |
---|
| 525 | + } |
---|
445 | 526 | } |
---|
446 | 527 | |
---|
| 528 | + if (!work_queued) { |
---|
| 529 | + struct amdgpu_dm_irq_handler_data *handler_data_add; |
---|
| 530 | + /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ |
---|
| 531 | + handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); |
---|
| 532 | + |
---|
| 533 | + /*allocate a new amdgpu_dm_irq_handler_data*/ |
---|
| 534 | + handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); |
---|
| 535 | + if (!handler_data_add) { |
---|
| 536 | + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); |
---|
| 537 | + return; |
---|
| 538 | + } |
---|
| 539 | + |
---|
| 540 | + /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ |
---|
| 541 | + handler_data_add->handler = handler_data->handler; |
---|
| 542 | + handler_data_add->handler_arg = handler_data->handler_arg; |
---|
| 543 | + handler_data_add->dm = handler_data->dm; |
---|
| 544 | + handler_data_add->irq_source = irq_source; |
---|
| 545 | + |
---|
| 546 | + list_add_tail(&handler_data_add->list, handler_list); |
---|
| 547 | + |
---|
| 548 | + INIT_WORK(&handler_data_add->work, dm_irq_work_func); |
---|
| 549 | + |
---|
| 550 | + if (queue_work(system_highpri_wq, &handler_data_add->work)) |
---|
| 551 | + DRM_DEBUG("Queued work for handling interrupt from " |
---|
| 552 | + "display for IRQ source %d\n", |
---|
| 553 | + irq_source); |
---|
| 554 | + else |
---|
| 555 | + DRM_ERROR("Failed to queue work for handling interrupt " |
---|
| 556 | + "from display for IRQ source %d\n", |
---|
| 557 | + irq_source); |
---|
| 558 | + } |
---|
447 | 559 | } |
---|
448 | 560 | |
---|
449 | | -/** amdgpu_dm_irq_immediate_work |
---|
450 | | - * Callback high irq work immediately, don't send to work queue |
---|
| 561 | +/* |
---|
| 562 | + * amdgpu_dm_irq_immediate_work |
---|
| 563 | + * Callback high irq work immediately, don't send to work queue |
---|
451 | 564 | */ |
---|
452 | 565 | static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, |
---|
453 | 566 | enum dc_irq_source irq_source) |
---|
454 | 567 | { |
---|
455 | 568 | struct amdgpu_dm_irq_handler_data *handler_data; |
---|
456 | | - struct list_head *entry; |
---|
457 | 569 | unsigned long irq_table_flags; |
---|
458 | 570 | |
---|
459 | 571 | DM_IRQ_TABLE_LOCK(adev, irq_table_flags); |
---|
460 | 572 | |
---|
461 | | - list_for_each( |
---|
462 | | - entry, |
---|
463 | | - &adev->dm.irq_handler_list_high_tab[irq_source]) { |
---|
464 | | - |
---|
465 | | - handler_data = |
---|
466 | | - list_entry( |
---|
467 | | - entry, |
---|
468 | | - struct amdgpu_dm_irq_handler_data, |
---|
469 | | - hcd.list); |
---|
470 | | - |
---|
| 573 | + list_for_each_entry(handler_data, |
---|
| 574 | + &adev->dm.irq_handler_list_high_tab[irq_source], |
---|
| 575 | + list) { |
---|
471 | 576 | /* Call a subcomponent which registered for immediate |
---|
472 | 577 | * interrupt notification */ |
---|
473 | | - handler_data->hcd.handler(handler_data->hcd.handler_arg); |
---|
| 578 | + handler_data->handler(handler_data->handler_arg); |
---|
474 | 579 | } |
---|
475 | 580 | |
---|
476 | 581 | DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); |
---|
477 | 582 | } |
---|
478 | 583 | |
---|
479 | | -/* |
---|
480 | | - * amdgpu_dm_irq_handler |
---|
| 584 | +/** |
---|
| 585 | + * amdgpu_dm_irq_handler - Generic DM IRQ handler |
---|
| 586 | + * @adev: amdgpu base driver device containing the DM device |
---|
| 587 | + * @source: Unused |
---|
| 588 | + * @entry: Data about the triggered interrupt |
---|
481 | 589 | * |
---|
482 | | - * Generic IRQ handler, calls all registered high irq work immediately, and |
---|
483 | | - * schedules work for low irq |
---|
| 590 | + * Calls all registered high irq work immediately, and schedules work for low |
---|
| 591 | + * irq. The DM IRQ table is used to find the corresponding handlers. |
---|
484 | 592 | */ |
---|
485 | 593 | static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, |
---|
486 | 594 | struct amdgpu_irq_src *source, |
---|
.. | .. |
---|
594 | 702 | __func__); |
---|
595 | 703 | } |
---|
596 | 704 | |
---|
| 705 | +static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, |
---|
| 706 | + struct amdgpu_irq_src *source, |
---|
| 707 | + unsigned int crtc_id, |
---|
| 708 | + enum amdgpu_interrupt_state state) |
---|
| 709 | +{ |
---|
| 710 | + return dm_irq_state( |
---|
| 711 | + adev, |
---|
| 712 | + source, |
---|
| 713 | + crtc_id, |
---|
| 714 | + state, |
---|
| 715 | + IRQ_TYPE_VUPDATE, |
---|
| 716 | + __func__); |
---|
| 717 | +} |
---|
| 718 | + |
---|
597 | 719 | static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { |
---|
598 | 720 | .set = amdgpu_dm_set_crtc_irq_state, |
---|
| 721 | + .process = amdgpu_dm_irq_handler, |
---|
| 722 | +}; |
---|
| 723 | + |
---|
| 724 | +static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { |
---|
| 725 | + .set = amdgpu_dm_set_vupdate_irq_state, |
---|
599 | 726 | .process = amdgpu_dm_irq_handler, |
---|
600 | 727 | }; |
---|
601 | 728 | |
---|
.. | .. |
---|
615 | 742 | adev->crtc_irq.num_types = adev->mode_info.num_crtc; |
---|
616 | 743 | adev->crtc_irq.funcs = &dm_crtc_irq_funcs; |
---|
617 | 744 | |
---|
| 745 | + adev->vupdate_irq.num_types = adev->mode_info.num_crtc; |
---|
| 746 | + adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; |
---|
| 747 | + |
---|
618 | 748 | adev->pageflip_irq.num_types = adev->mode_info.num_crtc; |
---|
619 | 749 | adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; |
---|
620 | 750 | |
---|
.. | .. |
---|
622 | 752 | adev->hpd_irq.funcs = &dm_hpd_irq_funcs; |
---|
623 | 753 | } |
---|
624 | 754 | |
---|
625 | | -/* |
---|
| 755 | +/** |
---|
626 | 756 | * amdgpu_dm_hpd_init - hpd setup callback. |
---|
627 | 757 | * |
---|
628 | 758 | * @adev: amdgpu_device pointer |
---|
.. | .. |
---|
632 | 762 | */ |
---|
633 | 763 | void amdgpu_dm_hpd_init(struct amdgpu_device *adev) |
---|
634 | 764 | { |
---|
635 | | - struct drm_device *dev = adev->ddev; |
---|
| 765 | + struct drm_device *dev = adev_to_drm(adev); |
---|
636 | 766 | struct drm_connector *connector; |
---|
| 767 | + struct drm_connector_list_iter iter; |
---|
637 | 768 | |
---|
638 | | - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
---|
| 769 | + drm_connector_list_iter_begin(dev, &iter); |
---|
| 770 | + drm_for_each_connector_iter(connector, &iter) { |
---|
639 | 771 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
---|
640 | 772 | to_amdgpu_dm_connector(connector); |
---|
641 | 773 | |
---|
.. | .. |
---|
653 | 785 | true); |
---|
654 | 786 | } |
---|
655 | 787 | } |
---|
| 788 | + drm_connector_list_iter_end(&iter); |
---|
656 | 789 | } |
---|
657 | 790 | |
---|
658 | 791 | /** |
---|
.. | .. |
---|
665 | 798 | */ |
---|
666 | 799 | void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) |
---|
667 | 800 | { |
---|
668 | | - struct drm_device *dev = adev->ddev; |
---|
| 801 | + struct drm_device *dev = adev_to_drm(adev); |
---|
669 | 802 | struct drm_connector *connector; |
---|
| 803 | + struct drm_connector_list_iter iter; |
---|
670 | 804 | |
---|
671 | | - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
---|
| 805 | + drm_connector_list_iter_begin(dev, &iter); |
---|
| 806 | + drm_for_each_connector_iter(connector, &iter) { |
---|
672 | 807 | struct amdgpu_dm_connector *amdgpu_dm_connector = |
---|
673 | 808 | to_amdgpu_dm_connector(connector); |
---|
674 | 809 | const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; |
---|
.. | .. |
---|
681 | 816 | false); |
---|
682 | 817 | } |
---|
683 | 818 | } |
---|
| 819 | + drm_connector_list_iter_end(&iter); |
---|
684 | 820 | } |
---|