.. | .. |
---|
65 | 65 | clk_disable_unprepare(dev->sclk); |
---|
66 | 66 | } |
---|
67 | 67 | |
---|
68 | | -static int check_alignment(struct scatterlist *sg_src, |
---|
69 | | - struct scatterlist *sg_dst, |
---|
70 | | - int align_mask) |
---|
71 | | -{ |
---|
72 | | - int in, out, align; |
---|
73 | | - |
---|
74 | | - in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && |
---|
75 | | - IS_ALIGNED((uint32_t)sg_src->length, align_mask); |
---|
76 | | - if (!sg_dst) |
---|
77 | | - return in; |
---|
78 | | - out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && |
---|
79 | | - IS_ALIGNED((uint32_t)sg_dst->length, align_mask); |
---|
80 | | - align = in && out; |
---|
81 | | - |
---|
82 | | - return (align && (sg_src->length == sg_dst->length)); |
---|
83 | | -} |
---|
84 | | - |
---|
85 | | -static int rk_load_data(struct rk_crypto_info *dev, |
---|
86 | | - struct scatterlist *sg_src, |
---|
87 | | - struct scatterlist *sg_dst) |
---|
88 | | -{ |
---|
89 | | - unsigned int count; |
---|
90 | | - |
---|
91 | | - dev->aligned = dev->aligned ? |
---|
92 | | - check_alignment(sg_src, sg_dst, dev->align_size) : |
---|
93 | | - dev->aligned; |
---|
94 | | - if (dev->aligned) { |
---|
95 | | - count = min(dev->left_bytes, sg_src->length); |
---|
96 | | - dev->left_bytes -= count; |
---|
97 | | - |
---|
98 | | - if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { |
---|
99 | | - dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", |
---|
100 | | - __func__, __LINE__); |
---|
101 | | - return -EINVAL; |
---|
102 | | - } |
---|
103 | | - dev->addr_in = sg_dma_address(sg_src); |
---|
104 | | - |
---|
105 | | - if (sg_dst) { |
---|
106 | | - if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { |
---|
107 | | - dev_err(dev->dev, |
---|
108 | | - "[%s:%d] dma_map_sg(dst) error\n", |
---|
109 | | - __func__, __LINE__); |
---|
110 | | - dma_unmap_sg(dev->dev, sg_src, 1, |
---|
111 | | - DMA_TO_DEVICE); |
---|
112 | | - return -EINVAL; |
---|
113 | | - } |
---|
114 | | - dev->addr_out = sg_dma_address(sg_dst); |
---|
115 | | - } |
---|
116 | | - } else { |
---|
117 | | - count = (dev->left_bytes > PAGE_SIZE) ? |
---|
118 | | - PAGE_SIZE : dev->left_bytes; |
---|
119 | | - |
---|
120 | | - if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, |
---|
121 | | - dev->addr_vir, count, |
---|
122 | | - dev->total - dev->left_bytes)) { |
---|
123 | | - dev_err(dev->dev, "[%s:%d] pcopy err\n", |
---|
124 | | - __func__, __LINE__); |
---|
125 | | - return -EINVAL; |
---|
126 | | - } |
---|
127 | | - dev->left_bytes -= count; |
---|
128 | | - sg_init_one(&dev->sg_tmp, dev->addr_vir, count); |
---|
129 | | - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { |
---|
130 | | - dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", |
---|
131 | | - __func__, __LINE__); |
---|
132 | | - return -ENOMEM; |
---|
133 | | - } |
---|
134 | | - dev->addr_in = sg_dma_address(&dev->sg_tmp); |
---|
135 | | - |
---|
136 | | - if (sg_dst) { |
---|
137 | | - if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, |
---|
138 | | - DMA_FROM_DEVICE)) { |
---|
139 | | - dev_err(dev->dev, |
---|
140 | | - "[%s:%d] dma_map_sg(sg_tmp) error\n", |
---|
141 | | - __func__, __LINE__); |
---|
142 | | - dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, |
---|
143 | | - DMA_TO_DEVICE); |
---|
144 | | - return -ENOMEM; |
---|
145 | | - } |
---|
146 | | - dev->addr_out = sg_dma_address(&dev->sg_tmp); |
---|
147 | | - } |
---|
148 | | - } |
---|
149 | | - dev->count = count; |
---|
150 | | - return 0; |
---|
151 | | -} |
---|
152 | | - |
---|
153 | | -static void rk_unload_data(struct rk_crypto_info *dev) |
---|
154 | | -{ |
---|
155 | | - struct scatterlist *sg_in, *sg_out; |
---|
156 | | - |
---|
157 | | - sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; |
---|
158 | | - dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); |
---|
159 | | - |
---|
160 | | - if (dev->sg_dst) { |
---|
161 | | - sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; |
---|
162 | | - dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); |
---|
163 | | - } |
---|
164 | | -} |
---|
165 | | - |
---|
166 | 68 | static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) |
---|
167 | 69 | { |
---|
168 | 70 | struct rk_crypto_info *dev = platform_get_drvdata(dev_id); |
---|
169 | 71 | u32 interrupt_status; |
---|
170 | 72 | |
---|
171 | | - spin_lock(&dev->lock); |
---|
172 | 73 | interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); |
---|
173 | 74 | CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); |
---|
174 | 75 | |
---|
| 76 | + dev->status = 1; |
---|
175 | 77 | if (interrupt_status & 0x0a) { |
---|
176 | 78 | dev_warn(dev->dev, "DMA Error\n"); |
---|
177 | | - dev->err = -EFAULT; |
---|
| 79 | + dev->status = 0; |
---|
178 | 80 | } |
---|
179 | | - tasklet_schedule(&dev->done_task); |
---|
| 81 | + complete(&dev->complete); |
---|
180 | 82 | |
---|
181 | | - spin_unlock(&dev->lock); |
---|
182 | 83 | return IRQ_HANDLED; |
---|
183 | | -} |
---|
184 | | - |
---|
185 | | -static int rk_crypto_enqueue(struct rk_crypto_info *dev, |
---|
186 | | - struct crypto_async_request *async_req) |
---|
187 | | -{ |
---|
188 | | - unsigned long flags; |
---|
189 | | - int ret; |
---|
190 | | - |
---|
191 | | - spin_lock_irqsave(&dev->lock, flags); |
---|
192 | | - ret = crypto_enqueue_request(&dev->queue, async_req); |
---|
193 | | - if (dev->busy) { |
---|
194 | | - spin_unlock_irqrestore(&dev->lock, flags); |
---|
195 | | - return ret; |
---|
196 | | - } |
---|
197 | | - dev->busy = true; |
---|
198 | | - spin_unlock_irqrestore(&dev->lock, flags); |
---|
199 | | - tasklet_schedule(&dev->queue_task); |
---|
200 | | - |
---|
201 | | - return ret; |
---|
202 | | -} |
---|
203 | | - |
---|
204 | | -static void rk_crypto_queue_task_cb(unsigned long data) |
---|
205 | | -{ |
---|
206 | | - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; |
---|
207 | | - struct crypto_async_request *async_req, *backlog; |
---|
208 | | - unsigned long flags; |
---|
209 | | - int err = 0; |
---|
210 | | - |
---|
211 | | - dev->err = 0; |
---|
212 | | - spin_lock_irqsave(&dev->lock, flags); |
---|
213 | | - backlog = crypto_get_backlog(&dev->queue); |
---|
214 | | - async_req = crypto_dequeue_request(&dev->queue); |
---|
215 | | - |
---|
216 | | - if (!async_req) { |
---|
217 | | - dev->busy = false; |
---|
218 | | - spin_unlock_irqrestore(&dev->lock, flags); |
---|
219 | | - return; |
---|
220 | | - } |
---|
221 | | - spin_unlock_irqrestore(&dev->lock, flags); |
---|
222 | | - |
---|
223 | | - if (backlog) { |
---|
224 | | - backlog->complete(backlog, -EINPROGRESS); |
---|
225 | | - backlog = NULL; |
---|
226 | | - } |
---|
227 | | - |
---|
228 | | - dev->async_req = async_req; |
---|
229 | | - err = dev->start(dev); |
---|
230 | | - if (err) |
---|
231 | | - dev->complete(dev->async_req, err); |
---|
232 | | -} |
---|
233 | | - |
---|
234 | | -static void rk_crypto_done_task_cb(unsigned long data) |
---|
235 | | -{ |
---|
236 | | - struct rk_crypto_info *dev = (struct rk_crypto_info *)data; |
---|
237 | | - |
---|
238 | | - if (dev->err) { |
---|
239 | | - dev->complete(dev->async_req, dev->err); |
---|
240 | | - return; |
---|
241 | | - } |
---|
242 | | - |
---|
243 | | - dev->err = dev->update(dev); |
---|
244 | | - if (dev->err) |
---|
245 | | - dev->complete(dev->async_req, dev->err); |
---|
246 | 84 | } |
---|
247 | 85 | |
---|
248 | 86 | static struct rk_crypto_tmp *rk_cipher_algs[] = { |
---|
.. | .. |
---|
337 | 175 | if (err) |
---|
338 | 176 | goto err_crypto; |
---|
339 | 177 | |
---|
340 | | - spin_lock_init(&crypto_info->lock); |
---|
341 | | - |
---|
342 | 178 | crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); |
---|
343 | 179 | if (IS_ERR(crypto_info->reg)) { |
---|
344 | 180 | err = PTR_ERR(crypto_info->reg); |
---|
.. | .. |
---|
389 | 225 | crypto_info->dev = &pdev->dev; |
---|
390 | 226 | platform_set_drvdata(pdev, crypto_info); |
---|
391 | 227 | |
---|
392 | | - tasklet_init(&crypto_info->queue_task, |
---|
393 | | - rk_crypto_queue_task_cb, (unsigned long)crypto_info); |
---|
394 | | - tasklet_init(&crypto_info->done_task, |
---|
395 | | - rk_crypto_done_task_cb, (unsigned long)crypto_info); |
---|
396 | | - crypto_init_queue(&crypto_info->queue, 50); |
---|
| 228 | + crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); |
---|
| 229 | + crypto_engine_start(crypto_info->engine); |
---|
| 230 | + init_completion(&crypto_info->complete); |
---|
397 | 231 | |
---|
398 | | - crypto_info->enable_clk = rk_crypto_enable_clk; |
---|
399 | | - crypto_info->disable_clk = rk_crypto_disable_clk; |
---|
400 | | - crypto_info->load_data = rk_load_data; |
---|
401 | | - crypto_info->unload_data = rk_unload_data; |
---|
402 | | - crypto_info->enqueue = rk_crypto_enqueue; |
---|
403 | | - crypto_info->busy = false; |
---|
| 232 | + rk_crypto_enable_clk(crypto_info); |
---|
404 | 233 | |
---|
405 | 234 | err = rk_crypto_register(crypto_info); |
---|
406 | 235 | if (err) { |
---|
.. | .. |
---|
412 | 241 | return 0; |
---|
413 | 242 | |
---|
414 | 243 | err_register_alg: |
---|
415 | | - tasklet_kill(&crypto_info->queue_task); |
---|
416 | | - tasklet_kill(&crypto_info->done_task); |
---|
| 244 | + crypto_engine_exit(crypto_info->engine); |
---|
417 | 245 | err_crypto: |
---|
| 246 | + dev_err(dev, "Crypto Accelerator not successfully registered\n"); |
---|
418 | 247 | return err; |
---|
419 | 248 | } |
---|
420 | 249 | |
---|
.. | .. |
---|
423 | 252 | struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); |
---|
424 | 253 | |
---|
425 | 254 | rk_crypto_unregister(); |
---|
426 | | - tasklet_kill(&crypto_tmp->done_task); |
---|
427 | | - tasklet_kill(&crypto_tmp->queue_task); |
---|
| 255 | + rk_crypto_disable_clk(crypto_tmp); |
---|
| 256 | + crypto_engine_exit(crypto_tmp->engine); |
---|
428 | 257 | return 0; |
---|
429 | 258 | } |
---|
430 | 259 | |
---|