| .. | .. |
|---|
| 110 | 110 | * remember the state so we can allow user space to enable it later. |
|---|
| 111 | 111 | */ |
|---|
| 112 | 112 | |
|---|
| 113 | + spin_lock(&priv->lock); |
|---|
| 113 | 114 | if (!test_and_set_bit(0, &priv->flags)) |
|---|
| 114 | 115 | disable_irq_nosync(irq); |
|---|
| 116 | + spin_unlock(&priv->lock); |
|---|
| 115 | 117 | |
|---|
| 116 | 118 | return IRQ_HANDLED; |
|---|
| 117 | 119 | } |
|---|
| .. | .. |
|---|
| 125 | 127 | * in the interrupt controller, but keep track of the |
|---|
| 126 | 128 | * state to prevent per-irq depth damage. |
|---|
| 127 | 129 | * |
|---|
| 128 | | - * Serialize this operation to support multiple tasks. |
|---|
| 130 | + * Serialize this operation to support multiple tasks and concurrency |
|---|
| 131 | + * with irq handler on SMP systems. |
|---|
| 129 | 132 | */ |
|---|
| 130 | 133 | |
|---|
| 131 | 134 | spin_lock_irqsave(&priv->lock, flags); |
|---|
| 132 | 135 | if (irq_on) { |
|---|
| 133 | 136 | if (test_and_clear_bit(0, &priv->flags)) |
|---|
| 134 | 137 | enable_irq(dev_info->irq); |
|---|
| 135 | | - spin_unlock_irqrestore(&priv->lock, flags); |
|---|
| 136 | 138 | } else { |
|---|
| 137 | | - if (!test_and_set_bit(0, &priv->flags)) { |
|---|
| 138 | | - spin_unlock_irqrestore(&priv->lock, flags); |
|---|
| 139 | | - disable_irq(dev_info->irq); |
|---|
| 140 | | - } |
|---|
| 139 | + if (!test_and_set_bit(0, &priv->flags)) |
|---|
| 140 | + disable_irq_nosync(dev_info->irq); |
|---|
| 141 | 141 | } |
|---|
| 142 | + spin_unlock_irqrestore(&priv->lock, flags); |
|---|
| 142 | 143 | |
|---|
| 143 | 144 | return 0; |
|---|
| 144 | 145 | } |
|---|