| .. | .. |
|---|
| 17 | 17 | |
|---|
| 18 | 18 | #define CRYPTO_CTX_SIZE 256 |
|---|
| 19 | 19 | |
|---|
| 20 | | -/* command queue alignments */ |
|---|
| 21 | | -#define PKT_IN_ALIGN 16 |
|---|
| 20 | +/* packet inuput ring alignments */ |
|---|
| 21 | +#define PKTIN_Q_ALIGN_BYTES 16 |
|---|
| 22 | +/* AQM Queue input alignments */ |
|---|
| 23 | +#define AQM_Q_ALIGN_BYTES 32 |
|---|
| 22 | 24 | |
|---|
| 23 | | -static int cmdq_common_init(struct nitrox_cmdq *cmdq) |
|---|
| 25 | +static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) |
|---|
| 24 | 26 | { |
|---|
| 25 | 27 | struct nitrox_device *ndev = cmdq->ndev; |
|---|
| 26 | | - u32 qsize; |
|---|
| 27 | 28 | |
|---|
| 28 | | - qsize = (ndev->qlen) * cmdq->instr_size; |
|---|
| 29 | | - cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev), |
|---|
| 30 | | - (qsize + PKT_IN_ALIGN), |
|---|
| 31 | | - &cmdq->dma_unaligned, |
|---|
| 32 | | - GFP_KERNEL); |
|---|
| 33 | | - if (!cmdq->head_unaligned) |
|---|
| 29 | + cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; |
|---|
| 30 | + cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, |
|---|
| 31 | + &cmdq->unalign_dma, |
|---|
| 32 | + GFP_KERNEL); |
|---|
| 33 | + if (!cmdq->unalign_base) |
|---|
| 34 | 34 | return -ENOMEM; |
|---|
| 35 | 35 | |
|---|
| 36 | | - cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN); |
|---|
| 37 | | - cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN); |
|---|
| 38 | | - cmdq->qsize = (qsize + PKT_IN_ALIGN); |
|---|
| 36 | + cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); |
|---|
| 37 | + cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); |
|---|
| 39 | 38 | cmdq->write_idx = 0; |
|---|
| 40 | 39 | |
|---|
| 41 | | - spin_lock_init(&cmdq->response_lock); |
|---|
| 42 | | - spin_lock_init(&cmdq->cmdq_lock); |
|---|
| 43 | | - spin_lock_init(&cmdq->backlog_lock); |
|---|
| 40 | + spin_lock_init(&cmdq->cmd_qlock); |
|---|
| 41 | + spin_lock_init(&cmdq->resp_qlock); |
|---|
| 42 | + spin_lock_init(&cmdq->backlog_qlock); |
|---|
| 44 | 43 | |
|---|
| 45 | 44 | INIT_LIST_HEAD(&cmdq->response_head); |
|---|
| 46 | 45 | INIT_LIST_HEAD(&cmdq->backlog_head); |
|---|
| .. | .. |
|---|
| 51 | 50 | return 0; |
|---|
| 52 | 51 | } |
|---|
| 53 | 52 | |
|---|
| 54 | | -static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq) |
|---|
| 53 | +static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) |
|---|
| 55 | 54 | { |
|---|
| 56 | | - struct nitrox_device *ndev = cmdq->ndev; |
|---|
| 55 | + cmdq->write_idx = 0; |
|---|
| 56 | + atomic_set(&cmdq->pending_count, 0); |
|---|
| 57 | + atomic_set(&cmdq->backlog_count, 0); |
|---|
| 58 | +} |
|---|
| 57 | 59 | |
|---|
| 60 | +static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) |
|---|
| 61 | +{ |
|---|
| 62 | + struct nitrox_device *ndev; |
|---|
| 63 | + |
|---|
| 64 | + if (!cmdq) |
|---|
| 65 | + return; |
|---|
| 66 | + |
|---|
| 67 | + if (!cmdq->unalign_base) |
|---|
| 68 | + return; |
|---|
| 69 | + |
|---|
| 70 | + ndev = cmdq->ndev; |
|---|
| 58 | 71 | cancel_work_sync(&cmdq->backlog_qflush); |
|---|
| 59 | 72 | |
|---|
| 60 | 73 | dma_free_coherent(DEV(ndev), cmdq->qsize, |
|---|
| 61 | | - cmdq->head_unaligned, cmdq->dma_unaligned); |
|---|
| 62 | | - |
|---|
| 63 | | - atomic_set(&cmdq->pending_count, 0); |
|---|
| 64 | | - atomic_set(&cmdq->backlog_count, 0); |
|---|
| 74 | + cmdq->unalign_base, cmdq->unalign_dma); |
|---|
| 75 | + nitrox_cmdq_reset(cmdq); |
|---|
| 65 | 76 | |
|---|
| 66 | 77 | cmdq->dbell_csr_addr = NULL; |
|---|
| 67 | | - cmdq->head = NULL; |
|---|
| 78 | + cmdq->compl_cnt_csr_addr = NULL; |
|---|
| 79 | + cmdq->unalign_base = NULL; |
|---|
| 80 | + cmdq->base = NULL; |
|---|
| 81 | + cmdq->unalign_dma = 0; |
|---|
| 68 | 82 | cmdq->dma = 0; |
|---|
| 69 | 83 | cmdq->qsize = 0; |
|---|
| 70 | 84 | cmdq->instr_size = 0; |
|---|
| 71 | 85 | } |
|---|
| 72 | 86 | |
|---|
| 73 | | -static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev) |
|---|
| 87 | +static void nitrox_free_aqm_queues(struct nitrox_device *ndev) |
|---|
| 74 | 88 | { |
|---|
| 75 | 89 | int i; |
|---|
| 76 | 90 | |
|---|
| 77 | 91 | for (i = 0; i < ndev->nr_queues; i++) { |
|---|
| 78 | | - struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i]; |
|---|
| 79 | | - |
|---|
| 80 | | - cmdq_common_cleanup(cmdq); |
|---|
| 92 | + nitrox_cmdq_cleanup(ndev->aqmq[i]); |
|---|
| 93 | + kfree_sensitive(ndev->aqmq[i]); |
|---|
| 94 | + ndev->aqmq[i] = NULL; |
|---|
| 81 | 95 | } |
|---|
| 82 | | - kfree(ndev->pkt_cmdqs); |
|---|
| 83 | | - ndev->pkt_cmdqs = NULL; |
|---|
| 84 | 96 | } |
|---|
| 85 | 97 | |
|---|
| 86 | | -static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev) |
|---|
| 98 | +static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev) |
|---|
| 87 | 99 | { |
|---|
| 88 | | - int i, err, size; |
|---|
| 100 | + int i, err; |
|---|
| 89 | 101 | |
|---|
| 90 | | - size = ndev->nr_queues * sizeof(struct nitrox_cmdq); |
|---|
| 91 | | - ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL); |
|---|
| 92 | | - if (!ndev->pkt_cmdqs) |
|---|
| 102 | + for (i = 0; i < ndev->nr_queues; i++) { |
|---|
| 103 | + struct nitrox_cmdq *cmdq; |
|---|
| 104 | + u64 offset; |
|---|
| 105 | + |
|---|
| 106 | + cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); |
|---|
| 107 | + if (!cmdq) { |
|---|
| 108 | + err = -ENOMEM; |
|---|
| 109 | + goto aqmq_fail; |
|---|
| 110 | + } |
|---|
| 111 | + |
|---|
| 112 | + cmdq->ndev = ndev; |
|---|
| 113 | + cmdq->qno = i; |
|---|
| 114 | + cmdq->instr_size = sizeof(struct aqmq_command_s); |
|---|
| 115 | + |
|---|
| 116 | + /* AQM Queue Doorbell Counter Register Address */ |
|---|
| 117 | + offset = AQMQ_DRBLX(i); |
|---|
| 118 | + cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); |
|---|
| 119 | + /* AQM Queue Commands Completed Count Register Address */ |
|---|
| 120 | + offset = AQMQ_CMD_CNTX(i); |
|---|
| 121 | + cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); |
|---|
| 122 | + |
|---|
| 123 | + err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES); |
|---|
| 124 | + if (err) { |
|---|
| 125 | + kfree_sensitive(cmdq); |
|---|
| 126 | + goto aqmq_fail; |
|---|
| 127 | + } |
|---|
| 128 | + ndev->aqmq[i] = cmdq; |
|---|
| 129 | + } |
|---|
| 130 | + |
|---|
| 131 | + return 0; |
|---|
| 132 | + |
|---|
| 133 | +aqmq_fail: |
|---|
| 134 | + nitrox_free_aqm_queues(ndev); |
|---|
| 135 | + return err; |
|---|
| 136 | +} |
|---|
| 137 | + |
|---|
| 138 | +static void nitrox_free_pktin_queues(struct nitrox_device *ndev) |
|---|
| 139 | +{ |
|---|
| 140 | + int i; |
|---|
| 141 | + |
|---|
| 142 | + for (i = 0; i < ndev->nr_queues; i++) { |
|---|
| 143 | + struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; |
|---|
| 144 | + |
|---|
| 145 | + nitrox_cmdq_cleanup(cmdq); |
|---|
| 146 | + } |
|---|
| 147 | + kfree(ndev->pkt_inq); |
|---|
| 148 | + ndev->pkt_inq = NULL; |
|---|
| 149 | +} |
|---|
| 150 | + |
|---|
| 151 | +static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev) |
|---|
| 152 | +{ |
|---|
| 153 | + int i, err; |
|---|
| 154 | + |
|---|
| 155 | + ndev->pkt_inq = kcalloc_node(ndev->nr_queues, |
|---|
| 156 | + sizeof(struct nitrox_cmdq), |
|---|
| 157 | + GFP_KERNEL, ndev->node); |
|---|
| 158 | + if (!ndev->pkt_inq) |
|---|
| 93 | 159 | return -ENOMEM; |
|---|
| 94 | 160 | |
|---|
| 95 | 161 | for (i = 0; i < ndev->nr_queues; i++) { |
|---|
| 96 | 162 | struct nitrox_cmdq *cmdq; |
|---|
| 97 | 163 | u64 offset; |
|---|
| 98 | 164 | |
|---|
| 99 | | - cmdq = &ndev->pkt_cmdqs[i]; |
|---|
| 165 | + cmdq = &ndev->pkt_inq[i]; |
|---|
| 100 | 166 | cmdq->ndev = ndev; |
|---|
| 101 | 167 | cmdq->qno = i; |
|---|
| 102 | 168 | cmdq->instr_size = sizeof(struct nps_pkt_instr); |
|---|
| 103 | 169 | |
|---|
| 170 | + /* packet input ring doorbell address */ |
|---|
| 104 | 171 | offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); |
|---|
| 105 | | - /* SE ring doorbell address for this queue */ |
|---|
| 106 | 172 | cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); |
|---|
| 173 | + /* packet solicit port completion count address */ |
|---|
| 174 | + offset = NPS_PKT_SLC_CNTSX(i); |
|---|
| 175 | + cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); |
|---|
| 107 | 176 | |
|---|
| 108 | | - err = cmdq_common_init(cmdq); |
|---|
| 177 | + err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES); |
|---|
| 109 | 178 | if (err) |
|---|
| 110 | | - goto pkt_cmdq_fail; |
|---|
| 179 | + goto pktq_fail; |
|---|
| 111 | 180 | } |
|---|
| 112 | 181 | return 0; |
|---|
| 113 | 182 | |
|---|
| 114 | | -pkt_cmdq_fail: |
|---|
| 115 | | - nitrox_cleanup_pkt_cmdqs(ndev); |
|---|
| 183 | +pktq_fail: |
|---|
| 184 | + nitrox_free_pktin_queues(ndev); |
|---|
| 116 | 185 | return err; |
|---|
| 117 | 186 | } |
|---|
| 118 | 187 | |
|---|
| .. | .. |
|---|
| 122 | 191 | |
|---|
| 123 | 192 | /* Crypto context pool, 16 byte aligned */ |
|---|
| 124 | 193 | size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); |
|---|
| 125 | | - ndev->ctx_pool = dma_pool_create("crypto-context", |
|---|
| 194 | + ndev->ctx_pool = dma_pool_create("nitrox-context", |
|---|
| 126 | 195 | DEV(ndev), size, 16, 0); |
|---|
| 127 | 196 | if (!ndev->ctx_pool) |
|---|
| 128 | 197 | return -ENOMEM; |
|---|
| .. | .. |
|---|
| 154 | 223 | if (!chdr) |
|---|
| 155 | 224 | return NULL; |
|---|
| 156 | 225 | |
|---|
| 157 | | - vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma); |
|---|
| 226 | + vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); |
|---|
| 158 | 227 | if (!vaddr) { |
|---|
| 159 | 228 | kfree(chdr); |
|---|
| 160 | 229 | return NULL; |
|---|
| .. | .. |
|---|
| 206 | 275 | if (err) |
|---|
| 207 | 276 | return err; |
|---|
| 208 | 277 | |
|---|
| 209 | | - err = nitrox_init_pkt_cmdqs(ndev); |
|---|
| 278 | + err = nitrox_alloc_pktin_queues(ndev); |
|---|
| 210 | 279 | if (err) |
|---|
| 211 | 280 | destroy_crypto_dma_pool(ndev); |
|---|
| 281 | + |
|---|
| 282 | + err = nitrox_alloc_aqm_queues(ndev); |
|---|
| 283 | + if (err) { |
|---|
| 284 | + nitrox_free_pktin_queues(ndev); |
|---|
| 285 | + destroy_crypto_dma_pool(ndev); |
|---|
| 286 | + } |
|---|
| 212 | 287 | |
|---|
| 213 | 288 | return err; |
|---|
| 214 | 289 | } |
|---|
| .. | .. |
|---|
| 219 | 294 | */ |
|---|
| 220 | 295 | void nitrox_common_sw_cleanup(struct nitrox_device *ndev) |
|---|
| 221 | 296 | { |
|---|
| 222 | | - nitrox_cleanup_pkt_cmdqs(ndev); |
|---|
| 297 | + nitrox_free_aqm_queues(ndev); |
|---|
| 298 | + nitrox_free_pktin_queues(ndev); |
|---|
| 223 | 299 | destroy_crypto_dma_pool(ndev); |
|---|
| 224 | 300 | } |
|---|