| .. | .. |
|---|
| 75 | 75 | /* Array of package pointers */ |
|---|
| 76 | 76 | static struct pkg_device **packages; |
|---|
| 77 | 77 | /* Serializes interrupt notification, work and hotplug */ |
|---|
| 78 | | -static DEFINE_SPINLOCK(pkg_temp_lock); |
|---|
| 78 | +static DEFINE_RAW_SPINLOCK(pkg_temp_lock); |
|---|
| 79 | 79 | /* Protects zone operation in the work function against hotplug removal */ |
|---|
| 80 | 80 | static DEFINE_MUTEX(thermal_zone_mutex); |
|---|
| 81 | 81 | |
|---|
| .. | .. |
|---|
| 291 | 291 | u64 msr_val, wr_val; |
|---|
| 292 | 292 | |
|---|
| 293 | 293 | mutex_lock(&thermal_zone_mutex); |
|---|
| 294 | | - spin_lock_irq(&pkg_temp_lock); |
|---|
| 294 | + raw_spin_lock_irq(&pkg_temp_lock); |
|---|
| 295 | 295 | ++pkg_work_cnt; |
|---|
| 296 | 296 | |
|---|
| 297 | 297 | pkgdev = pkg_temp_thermal_get_dev(cpu); |
|---|
| 298 | 298 | if (!pkgdev) { |
|---|
| 299 | | - spin_unlock_irq(&pkg_temp_lock); |
|---|
| 299 | + raw_spin_unlock_irq(&pkg_temp_lock); |
|---|
| 300 | 300 | mutex_unlock(&thermal_zone_mutex); |
|---|
| 301 | 301 | return; |
|---|
| 302 | 302 | } |
|---|
| .. | .. |
|---|
| 310 | 310 | } |
|---|
| 311 | 311 | |
|---|
| 312 | 312 | enable_pkg_thres_interrupt(); |
|---|
| 313 | | - spin_unlock_irq(&pkg_temp_lock); |
|---|
| 313 | + raw_spin_unlock_irq(&pkg_temp_lock); |
|---|
| 314 | 314 | |
|---|
| 315 | 315 | /* |
|---|
| 316 | 316 | * If tzone is not NULL, then thermal_zone_mutex will prevent the |
|---|
| .. | .. |
|---|
| 335 | 335 | struct pkg_device *pkgdev; |
|---|
| 336 | 336 | unsigned long flags; |
|---|
| 337 | 337 | |
|---|
| 338 | | - spin_lock_irqsave(&pkg_temp_lock, flags); |
|---|
| 338 | + raw_spin_lock_irqsave(&pkg_temp_lock, flags); |
|---|
| 339 | 339 | ++pkg_interrupt_cnt; |
|---|
| 340 | 340 | |
|---|
| 341 | 341 | disable_pkg_thres_interrupt(); |
|---|
| .. | .. |
|---|
| 347 | 347 | pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work); |
|---|
| 348 | 348 | } |
|---|
| 349 | 349 | |
|---|
| 350 | | - spin_unlock_irqrestore(&pkg_temp_lock, flags); |
|---|
| 350 | + raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); |
|---|
| 351 | 351 | return 0; |
|---|
| 352 | 352 | } |
|---|
| 353 | 353 | |
|---|
| .. | .. |
|---|
| 393 | 393 | pkgdev->msr_pkg_therm_high); |
|---|
| 394 | 394 | |
|---|
| 395 | 395 | cpumask_set_cpu(cpu, &pkgdev->cpumask); |
|---|
| 396 | | - spin_lock_irq(&pkg_temp_lock); |
|---|
| 396 | + raw_spin_lock_irq(&pkg_temp_lock); |
|---|
| 397 | 397 | packages[pkgid] = pkgdev; |
|---|
| 398 | | - spin_unlock_irq(&pkg_temp_lock); |
|---|
| 398 | + raw_spin_unlock_irq(&pkg_temp_lock); |
|---|
| 399 | 399 | return 0; |
|---|
| 400 | 400 | } |
|---|
| 401 | 401 | |
|---|
| .. | .. |
|---|
| 432 | 432 | } |
|---|
| 433 | 433 | |
|---|
| 434 | 434 | /* Protect against work and interrupts */ |
|---|
| 435 | | - spin_lock_irq(&pkg_temp_lock); |
|---|
| 435 | + raw_spin_lock_irq(&pkg_temp_lock); |
|---|
| 436 | 436 | |
|---|
| 437 | 437 | /* |
|---|
| 438 | 438 | * Check whether this cpu was the current target and store the new |
|---|
| .. | .. |
|---|
| 464 | 464 | * To cancel the work we need to drop the lock, otherwise |
|---|
| 465 | 465 | * we might deadlock if the work needs to be flushed. |
|---|
| 466 | 466 | */ |
|---|
| 467 | | - spin_unlock_irq(&pkg_temp_lock); |
|---|
| 467 | + raw_spin_unlock_irq(&pkg_temp_lock); |
|---|
| 468 | 468 | cancel_delayed_work_sync(&pkgdev->work); |
|---|
| 469 | | - spin_lock_irq(&pkg_temp_lock); |
|---|
| 469 | + raw_spin_lock_irq(&pkg_temp_lock); |
|---|
| 470 | 470 | /* |
|---|
| 471 | 471 | * If this is not the last cpu in the package and the work |
|---|
| 472 | 472 | * did not run after we dropped the lock above, then we |
|---|
| .. | .. |
|---|
| 477 | 477 | pkg_thermal_schedule_work(target, &pkgdev->work); |
|---|
| 478 | 478 | } |
|---|
| 479 | 479 | |
|---|
| 480 | | - spin_unlock_irq(&pkg_temp_lock); |
|---|
| 480 | + raw_spin_unlock_irq(&pkg_temp_lock); |
|---|
| 481 | 481 | |
|---|
| 482 | 482 | /* Final cleanup if this is the last cpu */ |
|---|
| 483 | 483 | if (lastcpu) |
|---|