.. | .. |
---|
19 | 19 | struct list_head node; |
---|
20 | 20 | }; |
---|
21 | 21 | |
---|
| 22 | +struct virt_dma_lockops; |
---|
| 23 | + |
---|
22 | 24 | struct virt_dma_chan { |
---|
23 | 25 | struct dma_chan chan; |
---|
24 | 26 | struct tasklet_struct task; |
---|
25 | 27 | void (*desc_free)(struct virt_dma_desc *); |
---|
26 | 28 | |
---|
| 29 | +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB |
---|
| 30 | + struct virt_dma_lockops *lock_ops; |
---|
| 31 | + union { |
---|
| 32 | + spinlock_t lock; |
---|
| 33 | + hard_spinlock_t oob_lock; |
---|
| 34 | + }; |
---|
| 35 | +#else |
---|
27 | 36 | spinlock_t lock; |
---|
| 37 | +#endif |
---|
28 | 38 | |
---|
29 | 39 | /* protected by vc.lock */ |
---|
30 | 40 | struct list_head desc_allocated; |
---|
.. | .. |
---|
40 | 50 | { |
---|
41 | 51 | return container_of(chan, struct virt_dma_chan, chan); |
---|
42 | 52 | } |
---|
| 53 | + |
---|
| 54 | +#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB |
---|
| 55 | + |
---|
| 56 | +struct virt_dma_lockops { |
---|
| 57 | + void (*init)(struct virt_dma_chan *vc); |
---|
| 58 | + void (*lock)(struct virt_dma_chan *vc); |
---|
| 59 | + void (*unlock)(struct virt_dma_chan *vc); |
---|
| 60 | + void (*lock_irq)(struct virt_dma_chan *vc); |
---|
| 61 | + void (*unlock_irq)(struct virt_dma_chan *vc); |
---|
| 62 | + unsigned long (*lock_irqsave)(struct virt_dma_chan *vc); |
---|
| 63 | + void (*unlock_irqrestore)(struct virt_dma_chan *vc, |
---|
| 64 | + unsigned long flags); |
---|
| 65 | +}; |
---|
| 66 | + |
---|
| 67 | +static inline void vchan_lock_init(struct virt_dma_chan *vc) |
---|
| 68 | +{ |
---|
| 69 | + vc->lock_ops->init(vc); |
---|
| 70 | +} |
---|
| 71 | + |
---|
| 72 | +static inline void vchan_lock(struct virt_dma_chan *vc) |
---|
| 73 | +{ |
---|
| 74 | + vc->lock_ops->lock(vc); |
---|
| 75 | +} |
---|
| 76 | + |
---|
| 77 | +static inline void vchan_unlock(struct virt_dma_chan *vc) |
---|
| 78 | +{ |
---|
| 79 | + vc->lock_ops->unlock(vc); |
---|
| 80 | +} |
---|
| 81 | + |
---|
| 82 | +static inline void vchan_lock_irq(struct virt_dma_chan *vc) |
---|
| 83 | +{ |
---|
| 84 | + vc->lock_ops->lock_irq(vc); |
---|
| 85 | +} |
---|
| 86 | + |
---|
| 87 | +static inline void vchan_unlock_irq(struct virt_dma_chan *vc) |
---|
| 88 | +{ |
---|
| 89 | + vc->lock_ops->unlock_irq(vc); |
---|
| 90 | +} |
---|
| 91 | + |
---|
| 92 | +static inline |
---|
| 93 | +unsigned long __vchan_lock_irqsave(struct virt_dma_chan *vc) |
---|
| 94 | +{ |
---|
| 95 | + return vc->lock_ops->lock_irqsave(vc); |
---|
| 96 | +} |
---|
| 97 | + |
---|
| 98 | +#define vchan_lock_irqsave(__vc, __flags) \ |
---|
| 99 | + do { \ |
---|
| 100 | + (__flags) = __vchan_lock_irqsave(__vc); \ |
---|
| 101 | + } while (0) |
---|
| 102 | + |
---|
| 103 | +static inline |
---|
| 104 | +void vchan_unlock_irqrestore(struct virt_dma_chan *vc, |
---|
| 105 | + unsigned long flags) |
---|
| 106 | +{ |
---|
| 107 | + vc->lock_ops->unlock_irqrestore(vc, flags); |
---|
| 108 | +} |
---|
| 109 | + |
---|
| 110 | +static inline bool vchan_oob_handled(struct virt_dma_desc *vd) |
---|
| 111 | +{ |
---|
| 112 | + return !!(vd->tx.flags & DMA_OOB_INTERRUPT); |
---|
| 113 | +} |
---|
| 114 | + |
---|
| 115 | +static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd) |
---|
| 116 | +{ |
---|
| 117 | + return !!(vd->tx.flags & DMA_OOB_PULSE); |
---|
| 118 | +} |
---|
| 119 | + |
---|
| 120 | +#else |
---|
| 121 | + |
---|
| 122 | +#define vchan_lock_init(__vc) \ |
---|
| 123 | + spin_lock_init(&(__vc)->lock) |
---|
| 124 | + |
---|
| 125 | +#define vchan_lock(__vc) \ |
---|
| 126 | + spin_lock(&(__vc)->lock) |
---|
| 127 | + |
---|
| 128 | +#define vchan_unlock(__vc) \ |
---|
| 129 | + spin_unlock(&(__vc)->lock) |
---|
| 130 | + |
---|
| 131 | +#define vchan_lock_irq(__vc) \ |
---|
| 132 | + spin_lock_irq(&(__vc)->lock) |
---|
| 133 | + |
---|
| 134 | +#define vchan_unlock_irq(__vc) \ |
---|
| 135 | + spin_unlock_irq(&(__vc)->lock) |
---|
| 136 | + |
---|
| 137 | +#define vchan_lock_irqsave(__vc, __flags) \ |
---|
| 138 | + spin_lock_irqsave(&(__vc)->lock, __flags) |
---|
| 139 | + |
---|
| 140 | +#define vchan_unlock_irqrestore(__vc, __flags) \ |
---|
| 141 | + spin_unlock_irqrestore(&(__vc)->lock, __flags) |
---|
| 142 | + |
---|
| 143 | +static inline bool vchan_oob_handled(struct virt_dma_desc *vd) |
---|
| 144 | +{ |
---|
| 145 | + return false; |
---|
| 146 | +} |
---|
| 147 | + |
---|
| 148 | +static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd) |
---|
| 149 | +{ |
---|
| 150 | + return false; |
---|
| 151 | +} |
---|
| 152 | + |
---|
| 153 | +#endif /* !CONFIG_DMA_VIRTUAL_CHANNELS_OOB */ |
---|
43 | 154 | |
---|
44 | 155 | void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); |
---|
45 | 156 | void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); |
---|
.. | .. |
---|
66 | 177 | vd->tx_result.result = DMA_TRANS_NOERROR; |
---|
67 | 178 | vd->tx_result.residue = 0; |
---|
68 | 179 | |
---|
69 | | - spin_lock_irqsave(&vc->lock, flags); |
---|
| 180 | + vchan_lock_irqsave(vc, flags); |
---|
70 | 181 | list_add_tail(&vd->node, &vc->desc_allocated); |
---|
71 | | - spin_unlock_irqrestore(&vc->lock, flags); |
---|
| 182 | + vchan_unlock_irqrestore(vc, flags); |
---|
72 | 183 | |
---|
73 | 184 | return &vd->tx; |
---|
74 | 185 | } |
---|
.. | .. |
---|
116 | 227 | if (dmaengine_desc_test_reuse(&vd->tx)) { |
---|
117 | 228 | unsigned long flags; |
---|
118 | 229 | |
---|
119 | | - spin_lock_irqsave(&vc->lock, flags); |
---|
| 230 | + vchan_lock_irqsave(vc, flags); |
---|
120 | 231 | list_add(&vd->node, &vc->desc_allocated); |
---|
121 | | - spin_unlock_irqrestore(&vc->lock, flags); |
---|
| 232 | + vchan_unlock_irqrestore(vc, flags); |
---|
122 | 233 | } else { |
---|
123 | 234 | vc->desc_free(vd); |
---|
124 | 235 | } |
---|
.. | .. |
---|
190 | 301 | unsigned long flags; |
---|
191 | 302 | LIST_HEAD(head); |
---|
192 | 303 | |
---|
193 | | - spin_lock_irqsave(&vc->lock, flags); |
---|
| 304 | + vchan_lock_irqsave(vc, flags); |
---|
194 | 305 | vchan_get_all_descriptors(vc, &head); |
---|
195 | 306 | list_for_each_entry(vd, &head, node) |
---|
196 | 307 | dmaengine_desc_clear_reuse(&vd->tx); |
---|
197 | | - spin_unlock_irqrestore(&vc->lock, flags); |
---|
| 308 | + vchan_unlock_irqrestore(vc, flags); |
---|
198 | 309 | |
---|
199 | 310 | vchan_dma_desc_free_list(vc, &head); |
---|
200 | 311 | } |
---|
.. | .. |
---|
215 | 326 | |
---|
216 | 327 | tasklet_kill(&vc->task); |
---|
217 | 328 | |
---|
218 | | - spin_lock_irqsave(&vc->lock, flags); |
---|
| 329 | + vchan_lock_irqsave(vc, flags); |
---|
219 | 330 | |
---|
220 | 331 | list_splice_tail_init(&vc->desc_terminated, &head); |
---|
221 | 332 | |
---|
222 | | - spin_unlock_irqrestore(&vc->lock, flags); |
---|
| 333 | + vchan_unlock_irqrestore(vc, flags); |
---|
223 | 334 | |
---|
224 | 335 | vchan_dma_desc_free_list(vc, &head); |
---|
225 | 336 | } |
---|