| .. | .. |
|---|
| 43 | 43 | */ |
|---|
| 44 | 44 | |
|---|
| 45 | 45 | #include <linux/irq.h> |
|---|
| 46 | | -#include <drm/drmP.h> |
|---|
| 46 | +#include <linux/pci.h> |
|---|
| 47 | + |
|---|
| 47 | 48 | #include <drm/drm_crtc_helper.h> |
|---|
| 49 | +#include <drm/drm_irq.h> |
|---|
| 50 | +#include <drm/drm_vblank.h> |
|---|
| 48 | 51 | #include <drm/amdgpu_drm.h> |
|---|
| 49 | 52 | #include "amdgpu.h" |
|---|
| 50 | 53 | #include "amdgpu_ih.h" |
|---|
| 51 | 54 | #include "atom.h" |
|---|
| 52 | 55 | #include "amdgpu_connectors.h" |
|---|
| 53 | 56 | #include "amdgpu_trace.h" |
|---|
| 57 | +#include "amdgpu_amdkfd.h" |
|---|
| 58 | +#include "amdgpu_ras.h" |
|---|
| 54 | 59 | |
|---|
| 55 | 60 | #include <linux/pm_runtime.h> |
|---|
| 56 | 61 | |
|---|
| .. | .. |
|---|
| 80 | 85 | { |
|---|
| 81 | 86 | struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
|---|
| 82 | 87 | hotplug_work); |
|---|
| 83 | | - struct drm_device *dev = adev->ddev; |
|---|
| 88 | + struct drm_device *dev = adev_to_drm(adev); |
|---|
| 84 | 89 | struct drm_mode_config *mode_config = &dev->mode_config; |
|---|
| 85 | 90 | struct drm_connector *connector; |
|---|
| 91 | + struct drm_connector_list_iter iter; |
|---|
| 86 | 92 | |
|---|
| 87 | 93 | mutex_lock(&mode_config->mutex); |
|---|
| 88 | | - list_for_each_entry(connector, &mode_config->connector_list, head) |
|---|
| 94 | + drm_connector_list_iter_begin(dev, &iter); |
|---|
| 95 | + drm_for_each_connector_iter(connector, &iter) |
|---|
| 89 | 96 | amdgpu_connector_hotplug(connector); |
|---|
| 97 | + drm_connector_list_iter_end(&iter); |
|---|
| 90 | 98 | mutex_unlock(&mode_config->mutex); |
|---|
| 91 | 99 | /* Just fire off a uevent and let userspace tell us what to do */ |
|---|
| 92 | 100 | drm_helper_hpd_irq_event(dev); |
|---|
| 93 | | -} |
|---|
| 94 | | - |
|---|
| 95 | | -/** |
|---|
| 96 | | - * amdgpu_irq_reset_work_func - execute GPU reset |
|---|
| 97 | | - * |
|---|
| 98 | | - * @work: work struct pointer |
|---|
| 99 | | - * |
|---|
| 100 | | - * Execute scheduled GPU reset (Cayman+). |
|---|
| 101 | | - * This function is called when the IRQ handler thinks we need a GPU reset. |
|---|
| 102 | | - */ |
|---|
| 103 | | -static void amdgpu_irq_reset_work_func(struct work_struct *work) |
|---|
| 104 | | -{ |
|---|
| 105 | | - struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
|---|
| 106 | | - reset_work); |
|---|
| 107 | | - |
|---|
| 108 | | - if (!amdgpu_sriov_vf(adev)) |
|---|
| 109 | | - amdgpu_device_gpu_recover(adev, NULL, false); |
|---|
| 110 | 101 | } |
|---|
| 111 | 102 | |
|---|
| 112 | 103 | /** |
|---|
| .. | .. |
|---|
| 123 | 114 | int r; |
|---|
| 124 | 115 | |
|---|
| 125 | 116 | spin_lock_irqsave(&adev->irq.lock, irqflags); |
|---|
| 126 | | - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
|---|
| 117 | + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { |
|---|
| 127 | 118 | if (!adev->irq.client[i].sources) |
|---|
| 128 | 119 | continue; |
|---|
| 129 | 120 | |
|---|
| .. | .. |
|---|
| 160 | 151 | irqreturn_t amdgpu_irq_handler(int irq, void *arg) |
|---|
| 161 | 152 | { |
|---|
| 162 | 153 | struct drm_device *dev = (struct drm_device *) arg; |
|---|
| 163 | | - struct amdgpu_device *adev = dev->dev_private; |
|---|
| 154 | + struct amdgpu_device *adev = drm_to_adev(dev); |
|---|
| 164 | 155 | irqreturn_t ret; |
|---|
| 165 | 156 | |
|---|
| 166 | | - ret = amdgpu_ih_process(adev); |
|---|
| 157 | + ret = amdgpu_ih_process(adev, &adev->irq.ih); |
|---|
| 167 | 158 | if (ret == IRQ_HANDLED) |
|---|
| 168 | 159 | pm_runtime_mark_last_busy(dev->dev); |
|---|
| 160 | + |
|---|
| 161 | + /* For the hardware that cannot enable bif ring for both ras_controller_irq |
|---|
| 162 | + * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status |
|---|
| 163 | + * register to check whether the interrupt is triggered or not, and properly |
|---|
| 164 | + * ack the interrupt if it is there |
|---|
| 165 | + */ |
|---|
| 166 | + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) { |
|---|
| 167 | + if (adev->nbio.funcs && |
|---|
| 168 | + adev->nbio.funcs->handle_ras_controller_intr_no_bifring) |
|---|
| 169 | + adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); |
|---|
| 170 | + |
|---|
| 171 | + if (adev->nbio.funcs && |
|---|
| 172 | + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) |
|---|
| 173 | + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); |
|---|
| 174 | + } |
|---|
| 175 | + |
|---|
| 169 | 176 | return ret; |
|---|
| 177 | +} |
|---|
| 178 | + |
|---|
| 179 | +/** |
|---|
| 180 | + * amdgpu_irq_handle_ih1 - kick of processing for IH1 |
|---|
| 181 | + * |
|---|
| 182 | + * @work: work structure in struct amdgpu_irq |
|---|
| 183 | + * |
|---|
| 184 | + * Kick of processing IH ring 1. |
|---|
| 185 | + */ |
|---|
| 186 | +static void amdgpu_irq_handle_ih1(struct work_struct *work) |
|---|
| 187 | +{ |
|---|
| 188 | + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
|---|
| 189 | + irq.ih1_work); |
|---|
| 190 | + |
|---|
| 191 | + amdgpu_ih_process(adev, &adev->irq.ih1); |
|---|
| 192 | +} |
|---|
| 193 | + |
|---|
| 194 | +/** |
|---|
| 195 | + * amdgpu_irq_handle_ih2 - kick of processing for IH2 |
|---|
| 196 | + * |
|---|
| 197 | + * @work: work structure in struct amdgpu_irq |
|---|
| 198 | + * |
|---|
| 199 | + * Kick of processing IH ring 2. |
|---|
| 200 | + */ |
|---|
| 201 | +static void amdgpu_irq_handle_ih2(struct work_struct *work) |
|---|
| 202 | +{ |
|---|
| 203 | + struct amdgpu_device *adev = container_of(work, struct amdgpu_device, |
|---|
| 204 | + irq.ih2_work); |
|---|
| 205 | + |
|---|
| 206 | + amdgpu_ih_process(adev, &adev->irq.ih2); |
|---|
| 170 | 207 | } |
|---|
| 171 | 208 | |
|---|
| 172 | 209 | /** |
|---|
| .. | .. |
|---|
| 211 | 248 | adev->irq.msi_enabled = false; |
|---|
| 212 | 249 | |
|---|
| 213 | 250 | if (amdgpu_msi_ok(adev)) { |
|---|
| 214 | | - int ret = pci_enable_msi(adev->pdev); |
|---|
| 215 | | - if (!ret) { |
|---|
| 251 | + int nvec = pci_msix_vec_count(adev->pdev); |
|---|
| 252 | + unsigned int flags; |
|---|
| 253 | + |
|---|
| 254 | + if (nvec <= 0) { |
|---|
| 255 | + flags = PCI_IRQ_MSI; |
|---|
| 256 | + } else { |
|---|
| 257 | + flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; |
|---|
| 258 | + } |
|---|
| 259 | + /* we only need one vector */ |
|---|
| 260 | + nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); |
|---|
| 261 | + if (nvec > 0) { |
|---|
| 216 | 262 | adev->irq.msi_enabled = true; |
|---|
| 217 | | - dev_dbg(adev->dev, "amdgpu: using MSI.\n"); |
|---|
| 263 | + dev_dbg(adev->dev, "using MSI/MSI-X.\n"); |
|---|
| 218 | 264 | } |
|---|
| 219 | 265 | } |
|---|
| 220 | 266 | |
|---|
| 221 | 267 | if (!amdgpu_device_has_dc_support(adev)) { |
|---|
| 222 | 268 | if (!adev->enable_virtual_display) |
|---|
| 223 | 269 | /* Disable vblank IRQs aggressively for power-saving */ |
|---|
| 224 | | - /* XXX: can this be enabled for DC? */ |
|---|
| 225 | | - adev->ddev->vblank_disable_immediate = true; |
|---|
| 270 | + adev_to_drm(adev)->vblank_disable_immediate = true; |
|---|
| 226 | 271 | |
|---|
| 227 | | - r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc); |
|---|
| 272 | + r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); |
|---|
| 228 | 273 | if (r) |
|---|
| 229 | 274 | return r; |
|---|
| 230 | 275 | |
|---|
| .. | .. |
|---|
| 233 | 278 | amdgpu_hotplug_work_func); |
|---|
| 234 | 279 | } |
|---|
| 235 | 280 | |
|---|
| 236 | | - INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func); |
|---|
| 281 | + INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1); |
|---|
| 282 | + INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); |
|---|
| 237 | 283 | |
|---|
| 238 | 284 | adev->irq.installed = true; |
|---|
| 239 | | - r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); |
|---|
| 285 | + /* Use vector 0 for MSI-X */ |
|---|
| 286 | + r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0)); |
|---|
| 240 | 287 | if (r) { |
|---|
| 241 | 288 | adev->irq.installed = false; |
|---|
| 242 | 289 | if (!amdgpu_device_has_dc_support(adev)) |
|---|
| 243 | 290 | flush_work(&adev->hotplug_work); |
|---|
| 244 | | - cancel_work_sync(&adev->reset_work); |
|---|
| 245 | 291 | return r; |
|---|
| 246 | 292 | } |
|---|
| 247 | | - adev->ddev->max_vblank_count = 0x00ffffff; |
|---|
| 293 | + adev_to_drm(adev)->max_vblank_count = 0x00ffffff; |
|---|
| 248 | 294 | |
|---|
| 249 | 295 | DRM_DEBUG("amdgpu: irq initialized.\n"); |
|---|
| 250 | 296 | return 0; |
|---|
| .. | .. |
|---|
| 264 | 310 | unsigned i, j; |
|---|
| 265 | 311 | |
|---|
| 266 | 312 | if (adev->irq.installed) { |
|---|
| 267 | | - drm_irq_uninstall(adev->ddev); |
|---|
| 313 | + drm_irq_uninstall(adev_to_drm(adev)); |
|---|
| 268 | 314 | adev->irq.installed = false; |
|---|
| 269 | 315 | if (adev->irq.msi_enabled) |
|---|
| 270 | | - pci_disable_msi(adev->pdev); |
|---|
| 316 | + pci_free_irq_vectors(adev->pdev); |
|---|
| 271 | 317 | if (!amdgpu_device_has_dc_support(adev)) |
|---|
| 272 | 318 | flush_work(&adev->hotplug_work); |
|---|
| 273 | | - cancel_work_sync(&adev->reset_work); |
|---|
| 274 | 319 | } |
|---|
| 275 | 320 | |
|---|
| 276 | | - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
|---|
| 321 | + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { |
|---|
| 277 | 322 | if (!adev->irq.client[i].sources) |
|---|
| 278 | 323 | continue; |
|---|
| 279 | 324 | |
|---|
| .. | .. |
|---|
| 313 | 358 | unsigned client_id, unsigned src_id, |
|---|
| 314 | 359 | struct amdgpu_irq_src *source) |
|---|
| 315 | 360 | { |
|---|
| 316 | | - if (client_id >= AMDGPU_IH_CLIENTID_MAX) |
|---|
| 361 | + if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) |
|---|
| 317 | 362 | return -EINVAL; |
|---|
| 318 | 363 | |
|---|
| 319 | 364 | if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) |
|---|
| .. | .. |
|---|
| 353 | 398 | * amdgpu_irq_dispatch - dispatch IRQ to IP blocks |
|---|
| 354 | 399 | * |
|---|
| 355 | 400 | * @adev: amdgpu device pointer |
|---|
| 356 | | - * @entry: interrupt vector pointer |
|---|
| 401 | + * @ih: interrupt ring instance |
|---|
| 357 | 402 | * |
|---|
| 358 | 403 | * Dispatches IRQ to IP blocks. |
|---|
| 359 | 404 | */ |
|---|
| 360 | 405 | void amdgpu_irq_dispatch(struct amdgpu_device *adev, |
|---|
| 361 | | - struct amdgpu_iv_entry *entry) |
|---|
| 406 | + struct amdgpu_ih_ring *ih) |
|---|
| 362 | 407 | { |
|---|
| 363 | | - unsigned client_id = entry->client_id; |
|---|
| 364 | | - unsigned src_id = entry->src_id; |
|---|
| 408 | + u32 ring_index = ih->rptr >> 2; |
|---|
| 409 | + struct amdgpu_iv_entry entry; |
|---|
| 410 | + unsigned client_id, src_id; |
|---|
| 365 | 411 | struct amdgpu_irq_src *src; |
|---|
| 412 | + bool handled = false; |
|---|
| 366 | 413 | int r; |
|---|
| 367 | 414 | |
|---|
| 368 | | - trace_amdgpu_iv(entry); |
|---|
| 415 | + entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; |
|---|
| 416 | + amdgpu_ih_decode_iv(adev, &entry); |
|---|
| 369 | 417 | |
|---|
| 370 | | - if (client_id >= AMDGPU_IH_CLIENTID_MAX) { |
|---|
| 418 | + trace_amdgpu_iv(ih - &adev->irq.ih, &entry); |
|---|
| 419 | + |
|---|
| 420 | + client_id = entry.client_id; |
|---|
| 421 | + src_id = entry.src_id; |
|---|
| 422 | + |
|---|
| 423 | + if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { |
|---|
| 371 | 424 | DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); |
|---|
| 372 | | - return; |
|---|
| 373 | | - } |
|---|
| 374 | 425 | |
|---|
| 375 | | - if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { |
|---|
| 426 | + } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { |
|---|
| 376 | 427 | DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); |
|---|
| 377 | | - return; |
|---|
| 378 | | - } |
|---|
| 379 | 428 | |
|---|
| 380 | | - if (adev->irq.virq[src_id]) { |
|---|
| 429 | + } else if (adev->irq.virq[src_id]) { |
|---|
| 381 | 430 | generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id)); |
|---|
| 382 | | - } else { |
|---|
| 383 | | - if (!adev->irq.client[client_id].sources) { |
|---|
| 384 | | - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", |
|---|
| 385 | | - client_id, src_id); |
|---|
| 386 | | - return; |
|---|
| 387 | | - } |
|---|
| 388 | 431 | |
|---|
| 389 | | - src = adev->irq.client[client_id].sources[src_id]; |
|---|
| 390 | | - if (!src) { |
|---|
| 391 | | - DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); |
|---|
| 392 | | - return; |
|---|
| 393 | | - } |
|---|
| 432 | + } else if (!adev->irq.client[client_id].sources) { |
|---|
| 433 | + DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", |
|---|
| 434 | + client_id, src_id); |
|---|
| 394 | 435 | |
|---|
| 395 | | - r = src->funcs->process(adev, src, entry); |
|---|
| 396 | | - if (r) |
|---|
| 436 | + } else if ((src = adev->irq.client[client_id].sources[src_id])) { |
|---|
| 437 | + r = src->funcs->process(adev, src, &entry); |
|---|
| 438 | + if (r < 0) |
|---|
| 397 | 439 | DRM_ERROR("error processing interrupt (%d)\n", r); |
|---|
| 440 | + else if (r) |
|---|
| 441 | + handled = true; |
|---|
| 442 | + |
|---|
| 443 | + } else { |
|---|
| 444 | + DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id); |
|---|
| 398 | 445 | } |
|---|
| 446 | + |
|---|
| 447 | + /* Send it to amdkfd as well if it isn't already handled */ |
|---|
| 448 | + if (!handled) |
|---|
| 449 | + amdgpu_amdkfd_interrupt(adev, entry.iv_entry); |
|---|
| 399 | 450 | } |
|---|
| 400 | 451 | |
|---|
| 401 | 452 | /** |
|---|
| .. | .. |
|---|
| 440 | 491 | { |
|---|
| 441 | 492 | int i, j, k; |
|---|
| 442 | 493 | |
|---|
| 443 | | - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { |
|---|
| 494 | + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { |
|---|
| 444 | 495 | if (!adev->irq.client[i].sources) |
|---|
| 445 | 496 | continue; |
|---|
| 446 | 497 | |
|---|
| .. | .. |
|---|
| 470 | 521 | int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, |
|---|
| 471 | 522 | unsigned type) |
|---|
| 472 | 523 | { |
|---|
| 473 | | - if (!adev->ddev->irq_enabled) |
|---|
| 524 | + if (!adev_to_drm(adev)->irq_enabled) |
|---|
| 474 | 525 | return -ENOENT; |
|---|
| 475 | 526 | |
|---|
| 476 | 527 | if (type >= src->num_types) |
|---|
| .. | .. |
|---|
| 500 | 551 | int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, |
|---|
| 501 | 552 | unsigned type) |
|---|
| 502 | 553 | { |
|---|
| 503 | | - if (!adev->ddev->irq_enabled) |
|---|
| 554 | + if (!adev_to_drm(adev)->irq_enabled) |
|---|
| 504 | 555 | return -ENOENT; |
|---|
| 505 | 556 | |
|---|
| 506 | 557 | if (type >= src->num_types) |
|---|
| .. | .. |
|---|
| 531 | 582 | bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, |
|---|
| 532 | 583 | unsigned type) |
|---|
| 533 | 584 | { |
|---|
| 534 | | - if (!adev->ddev->irq_enabled) |
|---|
| 585 | + if (!adev_to_drm(adev)->irq_enabled) |
|---|
| 535 | 586 | return false; |
|---|
| 536 | 587 | |
|---|
| 537 | 588 | if (type >= src->num_types) |
|---|