.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | | -/* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ |
---|
| 2 | +/* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ |
---|
3 | 3 | |
---|
4 | 4 | #include <linux/kernel.h> |
---|
| 5 | +#include <linux/nospec.h> |
---|
5 | 6 | #include "cc_driver.h" |
---|
6 | 7 | #include "cc_buffer_mgr.h" |
---|
7 | 8 | #include "cc_request_mgr.h" |
---|
8 | | -#include "cc_ivgen.h" |
---|
9 | 9 | #include "cc_pm.h" |
---|
10 | 10 | |
---|
11 | 11 | #define CC_MAX_POLL_ITER 10 |
---|
.. | .. |
---|
51 | 51 | bool notif; |
---|
52 | 52 | }; |
---|
53 | 53 | |
---|
| 54 | +static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = { |
---|
| 55 | + { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT), |
---|
| 56 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT), |
---|
| 57 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT), |
---|
| 58 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT), |
---|
| 59 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT), |
---|
| 60 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT), |
---|
| 61 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT), |
---|
| 62 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) }, |
---|
| 63 | + { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT), |
---|
| 64 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT), |
---|
| 65 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT), |
---|
| 66 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT), |
---|
| 67 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT), |
---|
| 68 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT), |
---|
| 69 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT), |
---|
| 70 | + BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) } |
---|
| 71 | +}; |
---|
| 72 | + |
---|
54 | 73 | static void comp_handler(unsigned long devarg); |
---|
55 | 74 | #ifdef COMP_IN_WQ |
---|
56 | 75 | static void comp_work_handler(struct work_struct *work); |
---|
57 | 76 | #endif |
---|
| 77 | + |
---|
| 78 | +static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot) |
---|
| 79 | +{ |
---|
| 80 | + alg = array_index_nospec(alg, CC_CPP_NUM_ALGS); |
---|
| 81 | + slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS); |
---|
| 82 | + |
---|
| 83 | + return cc_cpp_int_masks[alg][slot]; |
---|
| 84 | +} |
---|
58 | 85 | |
---|
59 | 86 | void cc_req_mgr_fini(struct cc_drvdata *drvdata) |
---|
60 | 87 | { |
---|
.. | .. |
---|
80 | 107 | /* Kill tasklet */ |
---|
81 | 108 | tasklet_kill(&req_mgr_h->comptask); |
---|
82 | 109 | #endif |
---|
83 | | - kzfree(req_mgr_h); |
---|
| 110 | + kfree_sensitive(req_mgr_h); |
---|
84 | 111 | drvdata->request_mgr_handle = NULL; |
---|
85 | 112 | } |
---|
86 | 113 | |
---|
.. | .. |
---|
179 | 206 | } |
---|
180 | 207 | } |
---|
181 | 208 | |
---|
182 | | -/*! |
---|
183 | | - * Completion will take place if and only if user requested completion |
---|
184 | | - * by cc_send_sync_request(). |
---|
| 209 | +/** |
---|
| 210 | + * request_mgr_complete() - Completion will take place if and only if user |
---|
| 211 | + * requested completion by cc_send_sync_request(). |
---|
185 | 212 | * |
---|
186 | | - * \param dev |
---|
187 | | - * \param dx_compl_h The completion event to signal |
---|
| 213 | + * @dev: Device pointer |
---|
| 214 | + * @dx_compl_h: The completion event to signal |
---|
| 215 | + * @dummy: unused error code |
---|
188 | 216 | */ |
---|
189 | 217 | static void request_mgr_complete(struct device *dev, void *dx_compl_h, |
---|
190 | 218 | int dummy) |
---|
.. | .. |
---|
202 | 230 | struct device *dev = drvdata_to_dev(drvdata); |
---|
203 | 231 | |
---|
204 | 232 | /* SW queue is checked only once as it will not |
---|
205 | | - * be chaned during the poll because the spinlock_bh |
---|
| 233 | + * be changed during the poll because the spinlock_bh |
---|
206 | 234 | * is held by the thread |
---|
207 | 235 | */ |
---|
208 | 236 | if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == |
---|
.. | .. |
---|
237 | 265 | return -ENOSPC; |
---|
238 | 266 | } |
---|
239 | 267 | |
---|
240 | | -/*! |
---|
241 | | - * Enqueue caller request to crypto hardware. |
---|
| 268 | +/** |
---|
| 269 | + * cc_do_send_request() - Enqueue caller request to crypto hardware. |
---|
242 | 270 | * Need to be called with HW lock held and PM running |
---|
243 | 271 | * |
---|
244 | | - * \param drvdata |
---|
245 | | - * \param cc_req The request to enqueue |
---|
246 | | - * \param desc The crypto sequence |
---|
247 | | - * \param len The crypto sequence length |
---|
248 | | - * \param add_comp If "true": add an artificial dout DMA to mark completion |
---|
| 272 | + * @drvdata: Associated device driver context |
---|
| 273 | + * @cc_req: The request to enqueue |
---|
| 274 | + * @desc: The crypto sequence |
---|
| 275 | + * @len: The crypto sequence length |
---|
| 276 | + * @add_comp: If "true": add an artificial dout DMA to mark completion |
---|
249 | 277 | * |
---|
250 | | - * \return int Returns -EINPROGRESS or error code |
---|
251 | 278 | */ |
---|
252 | | -static int cc_do_send_request(struct cc_drvdata *drvdata, |
---|
253 | | - struct cc_crypto_req *cc_req, |
---|
254 | | - struct cc_hw_desc *desc, unsigned int len, |
---|
255 | | - bool add_comp, bool ivgen) |
---|
| 279 | +static void cc_do_send_request(struct cc_drvdata *drvdata, |
---|
| 280 | + struct cc_crypto_req *cc_req, |
---|
| 281 | + struct cc_hw_desc *desc, unsigned int len, |
---|
| 282 | + bool add_comp) |
---|
256 | 283 | { |
---|
257 | 284 | struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; |
---|
258 | 285 | unsigned int used_sw_slots; |
---|
259 | | - unsigned int iv_seq_len = 0; |
---|
260 | 286 | unsigned int total_seq_len = len; /*initial sequence length*/ |
---|
261 | | - struct cc_hw_desc iv_seq[CC_IVPOOL_SEQ_LEN]; |
---|
262 | 287 | struct device *dev = drvdata_to_dev(drvdata); |
---|
263 | | - int rc; |
---|
264 | | - |
---|
265 | | - if (ivgen) { |
---|
266 | | - dev_dbg(dev, "Acquire IV from pool into %d DMA addresses %pad, %pad, %pad, IV-size=%u\n", |
---|
267 | | - cc_req->ivgen_dma_addr_len, |
---|
268 | | - &cc_req->ivgen_dma_addr[0], |
---|
269 | | - &cc_req->ivgen_dma_addr[1], |
---|
270 | | - &cc_req->ivgen_dma_addr[2], |
---|
271 | | - cc_req->ivgen_size); |
---|
272 | | - |
---|
273 | | - /* Acquire IV from pool */ |
---|
274 | | - rc = cc_get_iv(drvdata, cc_req->ivgen_dma_addr, |
---|
275 | | - cc_req->ivgen_dma_addr_len, |
---|
276 | | - cc_req->ivgen_size, iv_seq, &iv_seq_len); |
---|
277 | | - |
---|
278 | | - if (rc) { |
---|
279 | | - dev_err(dev, "Failed to generate IV (rc=%d)\n", rc); |
---|
280 | | - return rc; |
---|
281 | | - } |
---|
282 | | - |
---|
283 | | - total_seq_len += iv_seq_len; |
---|
284 | | - } |
---|
285 | 288 | |
---|
286 | 289 | used_sw_slots = ((req_mgr_h->req_queue_head - |
---|
287 | 290 | req_mgr_h->req_queue_tail) & |
---|
.. | .. |
---|
293 | 296 | req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; |
---|
294 | 297 | req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & |
---|
295 | 298 | (MAX_REQUEST_QUEUE_SIZE - 1); |
---|
296 | | - /* TODO: Use circ_buf.h ? */ |
---|
297 | 299 | |
---|
298 | 300 | dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); |
---|
299 | 301 | |
---|
300 | 302 | /* |
---|
301 | 303 | * We are about to push command to the HW via the command registers |
---|
302 | | - * that may refernece hsot memory. We need to issue a memory barrier |
---|
303 | | - * to make sure there are no outstnading memory writes |
---|
| 304 | + * that may reference host memory. We need to issue a memory barrier |
---|
| 305 | + * to make sure there are no outstanding memory writes |
---|
304 | 306 | */ |
---|
305 | 307 | wmb(); |
---|
306 | 308 | |
---|
307 | 309 | /* STAT_PHASE_4: Push sequence */ |
---|
308 | | - if (ivgen) |
---|
309 | | - enqueue_seq(drvdata, iv_seq, iv_seq_len); |
---|
310 | 310 | |
---|
311 | 311 | enqueue_seq(drvdata, desc, len); |
---|
312 | 312 | |
---|
.. | .. |
---|
326 | 326 | /* Update the free slots in HW queue */ |
---|
327 | 327 | req_mgr_h->q_free_slots -= total_seq_len; |
---|
328 | 328 | } |
---|
329 | | - |
---|
330 | | - /* Operation still in process */ |
---|
331 | | - return -EINPROGRESS; |
---|
332 | 329 | } |
---|
333 | 330 | |
---|
334 | 331 | static void cc_enqueue_backlog(struct cc_drvdata *drvdata, |
---|
335 | 332 | struct cc_bl_item *bli) |
---|
336 | 333 | { |
---|
337 | 334 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
---|
| 335 | + struct device *dev = drvdata_to_dev(drvdata); |
---|
338 | 336 | |
---|
339 | 337 | spin_lock_bh(&mgr->bl_lock); |
---|
340 | 338 | list_add_tail(&bli->list, &mgr->backlog); |
---|
341 | 339 | ++mgr->bl_len; |
---|
| 340 | + dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len); |
---|
342 | 341 | spin_unlock_bh(&mgr->bl_lock); |
---|
343 | 342 | tasklet_schedule(&mgr->comptask); |
---|
344 | 343 | } |
---|
.. | .. |
---|
348 | 347 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
---|
349 | 348 | struct cc_bl_item *bli; |
---|
350 | 349 | struct cc_crypto_req *creq; |
---|
351 | | - struct crypto_async_request *req; |
---|
352 | | - bool ivgen; |
---|
353 | | - unsigned int total_len; |
---|
| 350 | + void *req; |
---|
354 | 351 | struct device *dev = drvdata_to_dev(drvdata); |
---|
355 | 352 | int rc; |
---|
356 | 353 | |
---|
.. | .. |
---|
358 | 355 | |
---|
359 | 356 | while (mgr->bl_len) { |
---|
360 | 357 | bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); |
---|
| 358 | + dev_dbg(dev, "---bl len: %d\n", mgr->bl_len); |
---|
| 359 | + |
---|
361 | 360 | spin_unlock(&mgr->bl_lock); |
---|
362 | 361 | |
---|
| 362 | + |
---|
363 | 363 | creq = &bli->creq; |
---|
364 | | - req = (struct crypto_async_request *)creq->user_arg; |
---|
| 364 | + req = creq->user_arg; |
---|
365 | 365 | |
---|
366 | 366 | /* |
---|
367 | 367 | * Notify the request we're moving out of the backlog |
---|
368 | 368 | * but only if we haven't done so already. |
---|
369 | 369 | */ |
---|
370 | 370 | if (!bli->notif) { |
---|
371 | | - req->complete(req, -EINPROGRESS); |
---|
| 371 | + creq->user_cb(dev, req, -EINPROGRESS); |
---|
372 | 372 | bli->notif = true; |
---|
373 | 373 | } |
---|
374 | 374 | |
---|
375 | | - ivgen = !!creq->ivgen_dma_addr_len; |
---|
376 | | - total_len = bli->len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); |
---|
377 | | - |
---|
378 | 375 | spin_lock(&mgr->hw_lock); |
---|
379 | 376 | |
---|
380 | | - rc = cc_queues_status(drvdata, mgr, total_len); |
---|
| 377 | + rc = cc_queues_status(drvdata, mgr, bli->len); |
---|
381 | 378 | if (rc) { |
---|
382 | 379 | /* |
---|
383 | | - * There is still not room in the FIFO for |
---|
| 380 | + * There is still no room in the FIFO for |
---|
384 | 381 | * this request. Bail out. We'll return here |
---|
385 | 382 | * on the next completion irq. |
---|
386 | 383 | */ |
---|
.. | .. |
---|
388 | 385 | return; |
---|
389 | 386 | } |
---|
390 | 387 | |
---|
391 | | - rc = cc_do_send_request(drvdata, &bli->creq, bli->desc, |
---|
392 | | - bli->len, false, ivgen); |
---|
393 | | - |
---|
| 388 | + cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, |
---|
| 389 | + false); |
---|
394 | 390 | spin_unlock(&mgr->hw_lock); |
---|
395 | | - |
---|
396 | | - if (rc != -EINPROGRESS) { |
---|
397 | | - cc_pm_put_suspend(dev); |
---|
398 | | - creq->user_cb(dev, req, rc); |
---|
399 | | - } |
---|
400 | 391 | |
---|
401 | 392 | /* Remove ourselves from the backlog list */ |
---|
402 | 393 | spin_lock(&mgr->bl_lock); |
---|
.. | .. |
---|
414 | 405 | { |
---|
415 | 406 | int rc; |
---|
416 | 407 | struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; |
---|
417 | | - bool ivgen = !!cc_req->ivgen_dma_addr_len; |
---|
418 | | - unsigned int total_len = len + (ivgen ? CC_IVPOOL_SEQ_LEN : 0); |
---|
419 | 408 | struct device *dev = drvdata_to_dev(drvdata); |
---|
420 | 409 | bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; |
---|
421 | 410 | gfp_t flags = cc_gfp_flags(req); |
---|
.. | .. |
---|
423 | 412 | |
---|
424 | 413 | rc = cc_pm_get(dev); |
---|
425 | 414 | if (rc) { |
---|
426 | | - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); |
---|
| 415 | + dev_err(dev, "cc_pm_get returned %x\n", rc); |
---|
427 | 416 | return rc; |
---|
428 | 417 | } |
---|
429 | 418 | |
---|
430 | 419 | spin_lock_bh(&mgr->hw_lock); |
---|
431 | | - rc = cc_queues_status(drvdata, mgr, total_len); |
---|
| 420 | + rc = cc_queues_status(drvdata, mgr, len); |
---|
432 | 421 | |
---|
433 | 422 | #ifdef CC_DEBUG_FORCE_BACKLOG |
---|
434 | 423 | if (backlog_ok) |
---|
.. | .. |
---|
452 | 441 | return -EBUSY; |
---|
453 | 442 | } |
---|
454 | 443 | |
---|
455 | | - if (!rc) |
---|
456 | | - rc = cc_do_send_request(drvdata, cc_req, desc, len, false, |
---|
457 | | - ivgen); |
---|
| 444 | + if (!rc) { |
---|
| 445 | + cc_do_send_request(drvdata, cc_req, desc, len, false); |
---|
| 446 | + rc = -EINPROGRESS; |
---|
| 447 | + } |
---|
458 | 448 | |
---|
459 | 449 | spin_unlock_bh(&mgr->hw_lock); |
---|
460 | 450 | return rc; |
---|
.. | .. |
---|
474 | 464 | |
---|
475 | 465 | rc = cc_pm_get(dev); |
---|
476 | 466 | if (rc) { |
---|
477 | | - dev_err(dev, "ssi_power_mgr_runtime_get returned %x\n", rc); |
---|
| 467 | + dev_err(dev, "cc_pm_get returned %x\n", rc); |
---|
478 | 468 | return rc; |
---|
479 | 469 | } |
---|
480 | 470 | |
---|
.. | .. |
---|
486 | 476 | break; |
---|
487 | 477 | |
---|
488 | 478 | spin_unlock_bh(&mgr->hw_lock); |
---|
489 | | - if (rc != -EAGAIN) { |
---|
490 | | - cc_pm_put_suspend(dev); |
---|
491 | | - return rc; |
---|
492 | | - } |
---|
493 | 479 | wait_for_completion_interruptible(&drvdata->hw_queue_avail); |
---|
494 | 480 | reinit_completion(&drvdata->hw_queue_avail); |
---|
495 | 481 | } |
---|
496 | 482 | |
---|
497 | | - rc = cc_do_send_request(drvdata, cc_req, desc, len, true, false); |
---|
| 483 | + cc_do_send_request(drvdata, cc_req, desc, len, true); |
---|
498 | 484 | spin_unlock_bh(&mgr->hw_lock); |
---|
499 | | - |
---|
500 | | - if (rc != -EINPROGRESS) { |
---|
501 | | - cc_pm_put_suspend(dev); |
---|
502 | | - return rc; |
---|
503 | | - } |
---|
504 | | - |
---|
505 | 485 | wait_for_completion(&cc_req->seq_compl); |
---|
506 | 486 | return 0; |
---|
507 | 487 | } |
---|
508 | 488 | |
---|
509 | | -/*! |
---|
510 | | - * Enqueue caller request to crypto hardware during init process. |
---|
511 | | - * assume this function is not called in middle of a flow, |
---|
| 489 | +/** |
---|
| 490 | + * send_request_init() - Enqueue caller request to crypto hardware during init |
---|
| 491 | + * process. |
---|
| 492 | + * Assume this function is not called in the middle of a flow, |
---|
512 | 493 | * since we set QUEUE_LAST_IND flag in the last descriptor. |
---|
513 | 494 | * |
---|
514 | | - * \param drvdata |
---|
515 | | - * \param desc The crypto sequence |
---|
516 | | - * \param len The crypto sequence length |
---|
| 495 | + * @drvdata: Associated device driver context |
---|
| 496 | + * @desc: The crypto sequence |
---|
| 497 | + * @len: The crypto sequence length |
---|
517 | 498 | * |
---|
518 | | - * \return int Returns "0" upon success |
---|
| 499 | + * Return: |
---|
| 500 | + * Returns "0" upon success |
---|
519 | 501 | */ |
---|
520 | 502 | int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, |
---|
521 | 503 | unsigned int len) |
---|
.. | .. |
---|
534 | 516 | |
---|
535 | 517 | /* |
---|
536 | 518 | * We are about to push command to the HW via the command registers |
---|
537 | | - * that may refernece hsot memory. We need to issue a memory barrier |
---|
538 | | - * to make sure there are no outstnading memory writes |
---|
| 519 | + * that may reference host memory. We need to issue a memory barrier |
---|
| 520 | + * to make sure there are no outstanding memory writes |
---|
539 | 521 | */ |
---|
540 | 522 | wmb(); |
---|
541 | 523 | enqueue_seq(drvdata, desc, len); |
---|
.. | .. |
---|
579 | 561 | drvdata->request_mgr_handle; |
---|
580 | 562 | unsigned int *tail = &request_mgr_handle->req_queue_tail; |
---|
581 | 563 | unsigned int *head = &request_mgr_handle->req_queue_head; |
---|
| 564 | + int rc; |
---|
| 565 | + u32 mask; |
---|
582 | 566 | |
---|
583 | 567 | while (request_mgr_handle->axi_completed) { |
---|
584 | 568 | request_mgr_handle->axi_completed--; |
---|
.. | .. |
---|
596 | 580 | |
---|
597 | 581 | cc_req = &request_mgr_handle->req_queue[*tail]; |
---|
598 | 582 | |
---|
| 583 | + if (cc_req->cpp.is_cpp) { |
---|
| 584 | + |
---|
| 585 | + dev_dbg(dev, "CPP request completion slot: %d alg:%d\n", |
---|
| 586 | + cc_req->cpp.slot, cc_req->cpp.alg); |
---|
| 587 | + mask = cc_cpp_int_mask(cc_req->cpp.alg, |
---|
| 588 | + cc_req->cpp.slot); |
---|
| 589 | + rc = (drvdata->irq & mask ? -EPERM : 0); |
---|
| 590 | + dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask, |
---|
| 591 | + drvdata->irq, rc); |
---|
| 592 | + } else { |
---|
| 593 | + dev_dbg(dev, "None CPP request completion\n"); |
---|
| 594 | + rc = 0; |
---|
| 595 | + } |
---|
| 596 | + |
---|
599 | 597 | if (cc_req->user_cb) |
---|
600 | | - cc_req->user_cb(dev, cc_req->user_arg, 0); |
---|
| 598 | + cc_req->user_cb(dev, cc_req->user_arg, rc); |
---|
601 | 599 | *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); |
---|
602 | 600 | dev_dbg(dev, "Dequeue request tail=%u\n", *tail); |
---|
603 | 601 | dev_dbg(dev, "Request completed. axi_completed=%d\n", |
---|
.. | .. |
---|
618 | 616 | struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; |
---|
619 | 617 | struct cc_req_mgr_handle *request_mgr_handle = |
---|
620 | 618 | drvdata->request_mgr_handle; |
---|
621 | | - |
---|
| 619 | + struct device *dev = drvdata_to_dev(drvdata); |
---|
622 | 620 | u32 irq; |
---|
623 | 621 | |
---|
624 | | - irq = (drvdata->irq & CC_COMP_IRQ_MASK); |
---|
| 622 | + dev_dbg(dev, "Completion handler called!\n"); |
---|
| 623 | + irq = (drvdata->irq & drvdata->comp_mask); |
---|
625 | 624 | |
---|
626 | | - if (irq & CC_COMP_IRQ_MASK) { |
---|
627 | | - /* To avoid the interrupt from firing as we unmask it, |
---|
628 | | - * we clear it now |
---|
629 | | - */ |
---|
630 | | - cc_iowrite(drvdata, CC_REG(HOST_ICR), CC_COMP_IRQ_MASK); |
---|
| 625 | + /* To avoid the interrupt from firing as we unmask it, |
---|
| 626 | + * we clear it now |
---|
| 627 | + */ |
---|
| 628 | + cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); |
---|
631 | 629 | |
---|
632 | | - /* Avoid race with above clear: Test completion counter |
---|
633 | | - * once more |
---|
634 | | - */ |
---|
635 | | - request_mgr_handle->axi_completed += |
---|
636 | | - cc_axi_comp_count(drvdata); |
---|
| 630 | + /* Avoid race with above clear: Test completion counter once more */ |
---|
637 | 631 | |
---|
638 | | - while (request_mgr_handle->axi_completed) { |
---|
639 | | - do { |
---|
640 | | - proc_completions(drvdata); |
---|
641 | | - /* At this point (after proc_completions()), |
---|
642 | | - * request_mgr_handle->axi_completed is 0. |
---|
643 | | - */ |
---|
644 | | - request_mgr_handle->axi_completed = |
---|
645 | | - cc_axi_comp_count(drvdata); |
---|
646 | | - } while (request_mgr_handle->axi_completed > 0); |
---|
| 632 | + request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); |
---|
647 | 633 | |
---|
648 | | - cc_iowrite(drvdata, CC_REG(HOST_ICR), |
---|
649 | | - CC_COMP_IRQ_MASK); |
---|
| 634 | + dev_dbg(dev, "AXI completion after updated: %d\n", |
---|
| 635 | + request_mgr_handle->axi_completed); |
---|
650 | 636 | |
---|
| 637 | + while (request_mgr_handle->axi_completed) { |
---|
| 638 | + do { |
---|
| 639 | + drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); |
---|
| 640 | + irq = (drvdata->irq & drvdata->comp_mask); |
---|
| 641 | + proc_completions(drvdata); |
---|
| 642 | + |
---|
| 643 | + /* At this point (after proc_completions()), |
---|
| 644 | + * request_mgr_handle->axi_completed is 0. |
---|
| 645 | + */ |
---|
651 | 646 | request_mgr_handle->axi_completed += |
---|
652 | | - cc_axi_comp_count(drvdata); |
---|
653 | | - } |
---|
| 647 | + cc_axi_comp_count(drvdata); |
---|
| 648 | + } while (request_mgr_handle->axi_completed > 0); |
---|
| 649 | + |
---|
| 650 | + cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); |
---|
| 651 | + |
---|
| 652 | + request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); |
---|
654 | 653 | } |
---|
655 | | - /* after verifing that there is nothing to do, |
---|
| 654 | + |
---|
| 655 | + /* after verifying that there is nothing to do, |
---|
656 | 656 | * unmask AXI completion interrupt |
---|
657 | 657 | */ |
---|
658 | 658 | cc_iowrite(drvdata, CC_REG(HOST_IMR), |
---|
659 | | - cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~irq); |
---|
| 659 | + cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); |
---|
660 | 660 | |
---|
661 | 661 | cc_proc_backlog(drvdata); |
---|
| 662 | + dev_dbg(dev, "Comp. handler done.\n"); |
---|
662 | 663 | } |
---|