| .. | .. |
|---|
| 1 | +// SPDX-License-Identifier: GPL-2.0+ |
|---|
| 1 | 2 | /* |
|---|
| 2 | 3 | * caam - Freescale FSL CAAM support for hw_random |
|---|
| 3 | 4 | * |
|---|
| 4 | 5 | * Copyright 2011 Freescale Semiconductor, Inc. |
|---|
| 6 | + * Copyright 2018-2019 NXP |
|---|
| 5 | 7 | * |
|---|
| 6 | 8 | * Based on caamalg.c crypto API driver. |
|---|
| 7 | 9 | * |
|---|
| 8 | | - * relationship between job descriptors to shared descriptors: |
|---|
| 9 | | - * |
|---|
| 10 | | - * --------------- -------------- |
|---|
| 11 | | - * | JobDesc #0 |-------------------->| ShareDesc | |
|---|
| 12 | | - * | *(buffer 0) | |------------->| (generate) | |
|---|
| 13 | | - * --------------- | | (move) | |
|---|
| 14 | | - * | | (store) | |
|---|
| 15 | | - * --------------- | -------------- |
|---|
| 16 | | - * | JobDesc #1 |------| |
|---|
| 17 | | - * | *(buffer 1) | |
|---|
| 18 | | - * --------------- |
|---|
| 19 | | - * |
|---|
| 20 | | - * A job desc looks like this: |
|---|
| 21 | | - * |
|---|
| 22 | | - * --------------------- |
|---|
| 23 | | - * | Header | |
|---|
| 24 | | - * | ShareDesc Pointer | |
|---|
| 25 | | - * | SEQ_OUT_PTR | |
|---|
| 26 | | - * | (output buffer) | |
|---|
| 27 | | - * --------------------- |
|---|
| 28 | | - * |
|---|
| 29 | | - * The SharedDesc never changes, and each job descriptor points to one of two |
|---|
| 30 | | - * buffers for each device, from which the data will be copied into the |
|---|
| 31 | | - * requested destination |
|---|
| 32 | 10 | */ |
|---|
| 33 | 11 | |
|---|
| 34 | 12 | #include <linux/hw_random.h> |
|---|
| 35 | 13 | #include <linux/completion.h> |
|---|
| 36 | 14 | #include <linux/atomic.h> |
|---|
| 15 | +#include <linux/kfifo.h> |
|---|
| 37 | 16 | |
|---|
| 38 | 17 | #include "compat.h" |
|---|
| 39 | 18 | |
|---|
| .. | .. |
|---|
| 43 | 22 | #include "jr.h" |
|---|
| 44 | 23 | #include "error.h" |
|---|
| 45 | 24 | |
|---|
| 25 | +#define CAAM_RNG_MAX_FIFO_STORE_SIZE 16 |
|---|
| 26 | + |
|---|
| 46 | 27 | /* |
|---|
| 47 | | - * Maximum buffer size: maximum number of random, cache-aligned bytes that |
|---|
| 48 | | - * will be generated and moved to seq out ptr (extlen not allowed) |
|---|
| 28 | + * Length of used descriptors, see caam_init_desc() |
|---|
| 49 | 29 | */ |
|---|
| 50 | | -#define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \ |
|---|
| 51 | | - L1_CACHE_BYTES) |
|---|
| 52 | | - |
|---|
| 53 | | -/* length of descriptors */ |
|---|
| 54 | | -#define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) |
|---|
| 55 | | -#define DESC_RNG_LEN (3 * CAAM_CMD_SZ) |
|---|
| 56 | | - |
|---|
| 57 | | -/* Buffer, its dma address and lock */ |
|---|
| 58 | | -struct buf_data { |
|---|
| 59 | | - u8 buf[RN_BUF_SIZE] ____cacheline_aligned; |
|---|
| 60 | | - dma_addr_t addr; |
|---|
| 61 | | - struct completion filled; |
|---|
| 62 | | - u32 hw_desc[DESC_JOB_O_LEN]; |
|---|
| 63 | | -#define BUF_NOT_EMPTY 0 |
|---|
| 64 | | -#define BUF_EMPTY 1 |
|---|
| 65 | | -#define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */ |
|---|
| 66 | | - atomic_t empty; |
|---|
| 67 | | -}; |
|---|
| 30 | +#define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \ |
|---|
| 31 | + CAAM_CMD_SZ + \ |
|---|
| 32 | + CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) |
|---|
| 68 | 33 | |
|---|
| 69 | 34 | /* rng per-device context */ |
|---|
| 70 | 35 | struct caam_rng_ctx { |
|---|
| 36 | + struct hwrng rng; |
|---|
| 71 | 37 | struct device *jrdev; |
|---|
| 72 | | - dma_addr_t sh_desc_dma; |
|---|
| 73 | | - u32 sh_desc[DESC_RNG_LEN]; |
|---|
| 74 | | - unsigned int cur_buf_idx; |
|---|
| 75 | | - int current_buf; |
|---|
| 76 | | - struct buf_data bufs[2]; |
|---|
| 38 | + struct device *ctrldev; |
|---|
| 39 | + void *desc_async; |
|---|
| 40 | + void *desc_sync; |
|---|
| 41 | + struct work_struct worker; |
|---|
| 42 | + struct kfifo fifo; |
|---|
| 77 | 43 | }; |
|---|
| 78 | 44 | |
|---|
| 79 | | -static struct caam_rng_ctx *rng_ctx; |
|---|
| 45 | +struct caam_rng_job_ctx { |
|---|
| 46 | + struct completion *done; |
|---|
| 47 | + int *err; |
|---|
| 48 | +}; |
|---|
| 80 | 49 | |
|---|
| 81 | | -static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) |
|---|
| 50 | +static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r) |
|---|
| 82 | 51 | { |
|---|
| 83 | | - if (bd->addr) |
|---|
| 84 | | - dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, |
|---|
| 85 | | - DMA_FROM_DEVICE); |
|---|
| 52 | + return (struct caam_rng_ctx *)r->priv; |
|---|
| 86 | 53 | } |
|---|
| 87 | 54 | |
|---|
| 88 | | -static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) |
|---|
| 55 | +static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err, |
|---|
| 56 | + void *context) |
|---|
| 89 | 57 | { |
|---|
| 90 | | - struct device *jrdev = ctx->jrdev; |
|---|
| 91 | | - |
|---|
| 92 | | - if (ctx->sh_desc_dma) |
|---|
| 93 | | - dma_unmap_single(jrdev, ctx->sh_desc_dma, |
|---|
| 94 | | - desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); |
|---|
| 95 | | - rng_unmap_buf(jrdev, &ctx->bufs[0]); |
|---|
| 96 | | - rng_unmap_buf(jrdev, &ctx->bufs[1]); |
|---|
| 97 | | -} |
|---|
| 98 | | - |
|---|
| 99 | | -static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) |
|---|
| 100 | | -{ |
|---|
| 101 | | - struct buf_data *bd; |
|---|
| 102 | | - |
|---|
| 103 | | - bd = container_of(desc, struct buf_data, hw_desc[0]); |
|---|
| 58 | + struct caam_rng_job_ctx *jctx = context; |
|---|
| 104 | 59 | |
|---|
| 105 | 60 | if (err) |
|---|
| 106 | | - caam_jr_strstatus(jrdev, err); |
|---|
| 61 | + *jctx->err = caam_jr_strstatus(jrdev, err); |
|---|
| 107 | 62 | |
|---|
| 108 | | - atomic_set(&bd->empty, BUF_NOT_EMPTY); |
|---|
| 109 | | - complete(&bd->filled); |
|---|
| 110 | | - |
|---|
| 111 | | - /* Buffer refilled, invalidate cache */ |
|---|
| 112 | | - dma_sync_single_for_cpu(jrdev, bd->addr, RN_BUF_SIZE, DMA_FROM_DEVICE); |
|---|
| 113 | | - |
|---|
| 114 | | -#ifdef DEBUG |
|---|
| 115 | | - print_hex_dump(KERN_ERR, "rng refreshed buf@: ", |
|---|
| 116 | | - DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); |
|---|
| 117 | | -#endif |
|---|
| 63 | + complete(jctx->done); |
|---|
| 118 | 64 | } |
|---|
| 119 | 65 | |
|---|
| 120 | | -static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) |
|---|
| 66 | +static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma) |
|---|
| 121 | 67 | { |
|---|
| 122 | | - struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; |
|---|
| 123 | | - struct device *jrdev = ctx->jrdev; |
|---|
| 124 | | - u32 *desc = bd->hw_desc; |
|---|
| 125 | | - int err; |
|---|
| 68 | + init_job_desc(desc, 0); /* + 1 cmd_sz */ |
|---|
| 69 | + /* Generate random bytes: + 1 cmd_sz */ |
|---|
| 70 | + append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG | |
|---|
| 71 | + OP_ALG_PR_ON); |
|---|
| 72 | + /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ |
|---|
| 73 | + append_fifo_store(desc, dst_dma, |
|---|
| 74 | + CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE); |
|---|
| 126 | 75 | |
|---|
| 127 | | - dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); |
|---|
| 128 | | - init_completion(&bd->filled); |
|---|
| 129 | | - err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); |
|---|
| 130 | | - if (err) |
|---|
| 131 | | - complete(&bd->filled); /* don't wait on failed job*/ |
|---|
| 132 | | - else |
|---|
| 133 | | - atomic_inc(&bd->empty); /* note if pending */ |
|---|
| 76 | + print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, |
|---|
| 77 | + 16, 4, desc, desc_bytes(desc), 1); |
|---|
| 134 | 78 | |
|---|
| 135 | | - return err; |
|---|
| 79 | + return desc; |
|---|
| 136 | 80 | } |
|---|
| 137 | 81 | |
|---|
| 138 | | -static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) |
|---|
| 82 | +static int caam_rng_read_one(struct device *jrdev, |
|---|
| 83 | + void *dst, int len, |
|---|
| 84 | + void *desc, |
|---|
| 85 | + struct completion *done) |
|---|
| 139 | 86 | { |
|---|
| 140 | | - struct caam_rng_ctx *ctx = rng_ctx; |
|---|
| 141 | | - struct buf_data *bd = &ctx->bufs[ctx->current_buf]; |
|---|
| 142 | | - int next_buf_idx, copied_idx; |
|---|
| 143 | | - int err; |
|---|
| 87 | + dma_addr_t dst_dma; |
|---|
| 88 | + int err, ret = 0; |
|---|
| 89 | + struct caam_rng_job_ctx jctx = { |
|---|
| 90 | + .done = done, |
|---|
| 91 | + .err = &ret, |
|---|
| 92 | + }; |
|---|
| 144 | 93 | |
|---|
| 145 | | - if (atomic_read(&bd->empty)) { |
|---|
| 146 | | - /* try to submit job if there wasn't one */ |
|---|
| 147 | | - if (atomic_read(&bd->empty) == BUF_EMPTY) { |
|---|
| 148 | | - err = submit_job(ctx, 1); |
|---|
| 149 | | - /* if can't submit job, can't even wait */ |
|---|
| 150 | | - if (err) |
|---|
| 151 | | - return 0; |
|---|
| 152 | | - } |
|---|
| 153 | | - /* no immediate data, so exit if not waiting */ |
|---|
| 154 | | - if (!wait) |
|---|
| 155 | | - return 0; |
|---|
| 94 | + len = CAAM_RNG_MAX_FIFO_STORE_SIZE; |
|---|
| 156 | 95 | |
|---|
| 157 | | - /* waiting for pending job */ |
|---|
| 158 | | - if (atomic_read(&bd->empty)) |
|---|
| 159 | | - wait_for_completion(&bd->filled); |
|---|
| 160 | | - } |
|---|
| 161 | | - |
|---|
| 162 | | - next_buf_idx = ctx->cur_buf_idx + max; |
|---|
| 163 | | - dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", |
|---|
| 164 | | - __func__, ctx->current_buf, ctx->cur_buf_idx); |
|---|
| 165 | | - |
|---|
| 166 | | - /* if enough data in current buffer */ |
|---|
| 167 | | - if (next_buf_idx < RN_BUF_SIZE) { |
|---|
| 168 | | - memcpy(data, bd->buf + ctx->cur_buf_idx, max); |
|---|
| 169 | | - ctx->cur_buf_idx = next_buf_idx; |
|---|
| 170 | | - return max; |
|---|
| 171 | | - } |
|---|
| 172 | | - |
|---|
| 173 | | - /* else, copy what's left... */ |
|---|
| 174 | | - copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; |
|---|
| 175 | | - memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); |
|---|
| 176 | | - ctx->cur_buf_idx = 0; |
|---|
| 177 | | - atomic_set(&bd->empty, BUF_EMPTY); |
|---|
| 178 | | - |
|---|
| 179 | | - /* ...refill... */ |
|---|
| 180 | | - submit_job(ctx, 1); |
|---|
| 181 | | - |
|---|
| 182 | | - /* and use next buffer */ |
|---|
| 183 | | - ctx->current_buf = !ctx->current_buf; |
|---|
| 184 | | - dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); |
|---|
| 185 | | - |
|---|
| 186 | | - /* since there already is some data read, don't wait */ |
|---|
| 187 | | - return copied_idx + caam_read(rng, data + copied_idx, |
|---|
| 188 | | - max - copied_idx, false); |
|---|
| 189 | | -} |
|---|
| 190 | | - |
|---|
| 191 | | -static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) |
|---|
| 192 | | -{ |
|---|
| 193 | | - struct device *jrdev = ctx->jrdev; |
|---|
| 194 | | - u32 *desc = ctx->sh_desc; |
|---|
| 195 | | - |
|---|
| 196 | | - init_sh_desc(desc, HDR_SHARE_SERIAL); |
|---|
| 197 | | - |
|---|
| 198 | | - /* Generate random bytes */ |
|---|
| 199 | | - append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); |
|---|
| 200 | | - |
|---|
| 201 | | - /* Store bytes */ |
|---|
| 202 | | - append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); |
|---|
| 203 | | - |
|---|
| 204 | | - ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), |
|---|
| 205 | | - DMA_TO_DEVICE); |
|---|
| 206 | | - if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { |
|---|
| 207 | | - dev_err(jrdev, "unable to map shared descriptor\n"); |
|---|
| 208 | | - return -ENOMEM; |
|---|
| 209 | | - } |
|---|
| 210 | | -#ifdef DEBUG |
|---|
| 211 | | - print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
|---|
| 212 | | - desc, desc_bytes(desc), 1); |
|---|
| 213 | | -#endif |
|---|
| 214 | | - return 0; |
|---|
| 215 | | -} |
|---|
| 216 | | - |
|---|
| 217 | | -static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) |
|---|
| 218 | | -{ |
|---|
| 219 | | - struct device *jrdev = ctx->jrdev; |
|---|
| 220 | | - struct buf_data *bd = &ctx->bufs[buf_id]; |
|---|
| 221 | | - u32 *desc = bd->hw_desc; |
|---|
| 222 | | - int sh_len = desc_len(ctx->sh_desc); |
|---|
| 223 | | - |
|---|
| 224 | | - init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | |
|---|
| 225 | | - HDR_REVERSE); |
|---|
| 226 | | - |
|---|
| 227 | | - bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); |
|---|
| 228 | | - if (dma_mapping_error(jrdev, bd->addr)) { |
|---|
| 229 | | - dev_err(jrdev, "unable to map dst\n"); |
|---|
| 96 | + dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE); |
|---|
| 97 | + if (dma_mapping_error(jrdev, dst_dma)) { |
|---|
| 98 | + dev_err(jrdev, "unable to map destination memory\n"); |
|---|
| 230 | 99 | return -ENOMEM; |
|---|
| 231 | 100 | } |
|---|
| 232 | 101 | |
|---|
| 233 | | - append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); |
|---|
| 234 | | -#ifdef DEBUG |
|---|
| 235 | | - print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, |
|---|
| 236 | | - desc, desc_bytes(desc), 1); |
|---|
| 237 | | -#endif |
|---|
| 238 | | - return 0; |
|---|
| 102 | + init_completion(done); |
|---|
| 103 | + err = caam_jr_enqueue(jrdev, |
|---|
| 104 | + caam_init_desc(desc, dst_dma), |
|---|
| 105 | + caam_rng_done, &jctx); |
|---|
| 106 | + if (err == -EINPROGRESS) { |
|---|
| 107 | + wait_for_completion(done); |
|---|
| 108 | + err = 0; |
|---|
| 109 | + } |
|---|
| 110 | + |
|---|
| 111 | + dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE); |
|---|
| 112 | + |
|---|
| 113 | + return err ?: (ret ?: len); |
|---|
| 114 | +} |
|---|
| 115 | + |
|---|
| 116 | +static void caam_rng_fill_async(struct caam_rng_ctx *ctx) |
|---|
| 117 | +{ |
|---|
| 118 | + struct scatterlist sg[1]; |
|---|
| 119 | + struct completion done; |
|---|
| 120 | + int len, nents; |
|---|
| 121 | + |
|---|
| 122 | + sg_init_table(sg, ARRAY_SIZE(sg)); |
|---|
| 123 | + nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg), |
|---|
| 124 | + CAAM_RNG_MAX_FIFO_STORE_SIZE); |
|---|
| 125 | + if (!nents) |
|---|
| 126 | + return; |
|---|
| 127 | + |
|---|
| 128 | + len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]), |
|---|
| 129 | + sg[0].length, |
|---|
| 130 | + ctx->desc_async, |
|---|
| 131 | + &done); |
|---|
| 132 | + if (len < 0) |
|---|
| 133 | + return; |
|---|
| 134 | + |
|---|
| 135 | + kfifo_dma_in_finish(&ctx->fifo, len); |
|---|
| 136 | +} |
|---|
| 137 | + |
|---|
| 138 | +static void caam_rng_worker(struct work_struct *work) |
|---|
| 139 | +{ |
|---|
| 140 | + struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx, |
|---|
| 141 | + worker); |
|---|
| 142 | + caam_rng_fill_async(ctx); |
|---|
| 143 | +} |
|---|
| 144 | + |
|---|
| 145 | +static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait) |
|---|
| 146 | +{ |
|---|
| 147 | + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); |
|---|
| 148 | + int out; |
|---|
| 149 | + |
|---|
| 150 | + if (wait) { |
|---|
| 151 | + struct completion done; |
|---|
| 152 | + |
|---|
| 153 | + return caam_rng_read_one(ctx->jrdev, dst, max, |
|---|
| 154 | + ctx->desc_sync, &done); |
|---|
| 155 | + } |
|---|
| 156 | + |
|---|
| 157 | + out = kfifo_out(&ctx->fifo, dst, max); |
|---|
| 158 | + if (kfifo_is_empty(&ctx->fifo)) |
|---|
| 159 | + schedule_work(&ctx->worker); |
|---|
| 160 | + |
|---|
| 161 | + return out; |
|---|
| 239 | 162 | } |
|---|
| 240 | 163 | |
|---|
| 241 | 164 | static void caam_cleanup(struct hwrng *rng) |
|---|
| 242 | 165 | { |
|---|
| 243 | | - int i; |
|---|
| 244 | | - struct buf_data *bd; |
|---|
| 166 | + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); |
|---|
| 245 | 167 | |
|---|
| 246 | | - for (i = 0; i < 2; i++) { |
|---|
| 247 | | - bd = &rng_ctx->bufs[i]; |
|---|
| 248 | | - if (atomic_read(&bd->empty) == BUF_PENDING) |
|---|
| 249 | | - wait_for_completion(&bd->filled); |
|---|
| 250 | | - } |
|---|
| 251 | | - |
|---|
| 252 | | - rng_unmap_ctx(rng_ctx); |
|---|
| 168 | + flush_work(&ctx->worker); |
|---|
| 169 | + caam_jr_free(ctx->jrdev); |
|---|
| 170 | + kfifo_free(&ctx->fifo); |
|---|
| 253 | 171 | } |
|---|
| 254 | 172 | |
|---|
| 255 | | -static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) |
|---|
| 173 | +static int caam_init(struct hwrng *rng) |
|---|
| 256 | 174 | { |
|---|
| 257 | | - struct buf_data *bd = &ctx->bufs[buf_id]; |
|---|
| 175 | + struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); |
|---|
| 258 | 176 | int err; |
|---|
| 259 | 177 | |
|---|
| 260 | | - err = rng_create_job_desc(ctx, buf_id); |
|---|
| 261 | | - if (err) |
|---|
| 262 | | - return err; |
|---|
| 178 | + ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, |
|---|
| 179 | + GFP_DMA | GFP_KERNEL); |
|---|
| 180 | + if (!ctx->desc_sync) |
|---|
| 181 | + return -ENOMEM; |
|---|
| 263 | 182 | |
|---|
| 264 | | - atomic_set(&bd->empty, BUF_EMPTY); |
|---|
| 265 | | - submit_job(ctx, buf_id == ctx->current_buf); |
|---|
| 266 | | - wait_for_completion(&bd->filled); |
|---|
| 183 | + ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, |
|---|
| 184 | + GFP_DMA | GFP_KERNEL); |
|---|
| 185 | + if (!ctx->desc_async) |
|---|
| 186 | + return -ENOMEM; |
|---|
| 187 | + |
|---|
| 188 | + if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE, |
|---|
| 189 | + GFP_DMA | GFP_KERNEL)) |
|---|
| 190 | + return -ENOMEM; |
|---|
| 191 | + |
|---|
| 192 | + INIT_WORK(&ctx->worker, caam_rng_worker); |
|---|
| 193 | + |
|---|
| 194 | + ctx->jrdev = caam_jr_alloc(); |
|---|
| 195 | + err = PTR_ERR_OR_ZERO(ctx->jrdev); |
|---|
| 196 | + if (err) { |
|---|
| 197 | + kfifo_free(&ctx->fifo); |
|---|
| 198 | + pr_err("Job Ring Device allocation for transform failed\n"); |
|---|
| 199 | + return err; |
|---|
| 200 | + } |
|---|
| 201 | + |
|---|
| 202 | + /* |
|---|
| 203 | + * Fill async buffer to have early randomness data for |
|---|
| 204 | + * hw_random |
|---|
| 205 | + */ |
|---|
| 206 | + caam_rng_fill_async(ctx); |
|---|
| 267 | 207 | |
|---|
| 268 | 208 | return 0; |
|---|
| 269 | 209 | } |
|---|
| 270 | 210 | |
|---|
| 271 | | -static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) |
|---|
| 211 | +int caam_rng_init(struct device *ctrldev); |
|---|
| 212 | + |
|---|
| 213 | +void caam_rng_exit(struct device *ctrldev) |
|---|
| 272 | 214 | { |
|---|
| 273 | | - int err; |
|---|
| 274 | | - |
|---|
| 275 | | - ctx->jrdev = jrdev; |
|---|
| 276 | | - |
|---|
| 277 | | - err = rng_create_sh_desc(ctx); |
|---|
| 278 | | - if (err) |
|---|
| 279 | | - return err; |
|---|
| 280 | | - |
|---|
| 281 | | - ctx->current_buf = 0; |
|---|
| 282 | | - ctx->cur_buf_idx = 0; |
|---|
| 283 | | - |
|---|
| 284 | | - err = caam_init_buf(ctx, 0); |
|---|
| 285 | | - if (err) |
|---|
| 286 | | - return err; |
|---|
| 287 | | - |
|---|
| 288 | | - return caam_init_buf(ctx, 1); |
|---|
| 215 | + devres_release_group(ctrldev, caam_rng_init); |
|---|
| 289 | 216 | } |
|---|
| 290 | 217 | |
|---|
| 291 | | -static struct hwrng caam_rng = { |
|---|
| 292 | | - .name = "rng-caam", |
|---|
| 293 | | - .cleanup = caam_cleanup, |
|---|
| 294 | | - .read = caam_read, |
|---|
| 295 | | -}; |
|---|
| 296 | | - |
|---|
| 297 | | -static void __exit caam_rng_exit(void) |
|---|
| 218 | +int caam_rng_init(struct device *ctrldev) |
|---|
| 298 | 219 | { |
|---|
| 299 | | - caam_jr_free(rng_ctx->jrdev); |
|---|
| 300 | | - hwrng_unregister(&caam_rng); |
|---|
| 301 | | - kfree(rng_ctx); |
|---|
| 302 | | -} |
|---|
| 303 | | - |
|---|
| 304 | | -static int __init caam_rng_init(void) |
|---|
| 305 | | -{ |
|---|
| 306 | | - struct device *dev; |
|---|
| 307 | | - struct device_node *dev_node; |
|---|
| 308 | | - struct platform_device *pdev; |
|---|
| 309 | | - struct device *ctrldev; |
|---|
| 310 | | - struct caam_drv_private *priv; |
|---|
| 311 | | - int err; |
|---|
| 312 | | - |
|---|
| 313 | | - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); |
|---|
| 314 | | - if (!dev_node) { |
|---|
| 315 | | - dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); |
|---|
| 316 | | - if (!dev_node) |
|---|
| 317 | | - return -ENODEV; |
|---|
| 318 | | - } |
|---|
| 319 | | - |
|---|
| 320 | | - pdev = of_find_device_by_node(dev_node); |
|---|
| 321 | | - if (!pdev) { |
|---|
| 322 | | - of_node_put(dev_node); |
|---|
| 323 | | - return -ENODEV; |
|---|
| 324 | | - } |
|---|
| 325 | | - |
|---|
| 326 | | - ctrldev = &pdev->dev; |
|---|
| 327 | | - priv = dev_get_drvdata(ctrldev); |
|---|
| 328 | | - of_node_put(dev_node); |
|---|
| 329 | | - |
|---|
| 330 | | - /* |
|---|
| 331 | | - * If priv is NULL, it's probably because the caam driver wasn't |
|---|
| 332 | | - * properly initialized (e.g. RNG4 init failed). Thus, bail out here. |
|---|
| 333 | | - */ |
|---|
| 334 | | - if (!priv) |
|---|
| 335 | | - return -ENODEV; |
|---|
| 220 | + struct caam_rng_ctx *ctx; |
|---|
| 221 | + u32 rng_inst; |
|---|
| 222 | + struct caam_drv_private *priv = dev_get_drvdata(ctrldev); |
|---|
| 223 | + int ret; |
|---|
| 336 | 224 | |
|---|
| 337 | 225 | /* Check for an instantiated RNG before registration */ |
|---|
| 338 | | - if (!(rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK)) |
|---|
| 339 | | - return -ENODEV; |
|---|
| 226 | + if (priv->era < 10) |
|---|
| 227 | + rng_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) & |
|---|
| 228 | + CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; |
|---|
| 229 | + else |
|---|
| 230 | + rng_inst = rd_reg32(&priv->ctrl->vreg.rng) & CHA_VER_NUM_MASK; |
|---|
| 340 | 231 | |
|---|
| 341 | | - dev = caam_jr_alloc(); |
|---|
| 342 | | - if (IS_ERR(dev)) { |
|---|
| 343 | | - pr_err("Job Ring Device allocation for transform failed\n"); |
|---|
| 344 | | - return PTR_ERR(dev); |
|---|
| 232 | + if (!rng_inst) |
|---|
| 233 | + return 0; |
|---|
| 234 | + |
|---|
| 235 | + if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL)) |
|---|
| 236 | + return -ENOMEM; |
|---|
| 237 | + |
|---|
| 238 | + ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL); |
|---|
| 239 | + if (!ctx) |
|---|
| 240 | + return -ENOMEM; |
|---|
| 241 | + |
|---|
| 242 | + ctx->ctrldev = ctrldev; |
|---|
| 243 | + |
|---|
| 244 | + ctx->rng.name = "rng-caam"; |
|---|
| 245 | + ctx->rng.init = caam_init; |
|---|
| 246 | + ctx->rng.cleanup = caam_cleanup; |
|---|
| 247 | + ctx->rng.read = caam_read; |
|---|
| 248 | + ctx->rng.priv = (unsigned long)ctx; |
|---|
| 249 | + ctx->rng.quality = 1024; |
|---|
| 250 | + |
|---|
| 251 | + dev_info(ctrldev, "registering rng-caam\n"); |
|---|
| 252 | + |
|---|
| 253 | + ret = devm_hwrng_register(ctrldev, &ctx->rng); |
|---|
| 254 | + if (ret) { |
|---|
| 255 | + caam_rng_exit(ctrldev); |
|---|
| 256 | + return ret; |
|---|
| 345 | 257 | } |
|---|
| 346 | | - rng_ctx = kmalloc(sizeof(*rng_ctx), GFP_DMA | GFP_KERNEL); |
|---|
| 347 | | - if (!rng_ctx) { |
|---|
| 348 | | - err = -ENOMEM; |
|---|
| 349 | | - goto free_caam_alloc; |
|---|
| 350 | | - } |
|---|
| 351 | | - err = caam_init_rng(rng_ctx, dev); |
|---|
| 352 | | - if (err) |
|---|
| 353 | | - goto free_rng_ctx; |
|---|
| 354 | 258 | |
|---|
| 355 | | - dev_info(dev, "registering rng-caam\n"); |
|---|
| 356 | | - |
|---|
| 357 | | - err = hwrng_register(&caam_rng); |
|---|
| 358 | | - if (!err) |
|---|
| 359 | | - return err; |
|---|
| 360 | | - |
|---|
| 361 | | -free_rng_ctx: |
|---|
| 362 | | - kfree(rng_ctx); |
|---|
| 363 | | -free_caam_alloc: |
|---|
| 364 | | - caam_jr_free(dev); |
|---|
| 365 | | - return err; |
|---|
| 259 | + devres_close_group(ctrldev, caam_rng_init); |
|---|
| 260 | + return 0; |
|---|
| 366 | 261 | } |
|---|
| 367 | | - |
|---|
| 368 | | -module_init(caam_rng_init); |
|---|
| 369 | | -module_exit(caam_rng_exit); |
|---|
| 370 | | - |
|---|
| 371 | | -MODULE_LICENSE("GPL"); |
|---|
| 372 | | -MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); |
|---|
| 373 | | -MODULE_AUTHOR("Freescale Semiconductor - NMG"); |
|---|