xref: /linux/drivers/crypto/caam/caamrng.c (revision db10cb9b)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * caam - Freescale FSL CAAM support for hw_random
4  *
5  * Copyright 2011 Freescale Semiconductor, Inc.
6  * Copyright 2018-2019, 2023 NXP
7  *
8  * Based on caamalg.c crypto API driver.
9  *
10  */
11 
12 #include <linux/hw_random.h>
13 #include <linux/completion.h>
14 #include <linux/atomic.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/kernel.h>
17 #include <linux/kfifo.h>
18 
19 #include "compat.h"
20 
21 #include "regs.h"
22 #include "intern.h"
23 #include "desc_constr.h"
24 #include "jr.h"
25 #include "error.h"
26 
27 #define CAAM_RNG_MAX_FIFO_STORE_SIZE	16
28 
29 /*
30  * Length of used descriptors, see caam_init_desc()
31  */
32 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ +				\
33 			   CAAM_CMD_SZ +				\
34 			   CAAM_CMD_SZ + CAAM_PTR_SZ_MAX)
35 
36 /* rng per-device context */
37 struct caam_rng_ctx {
38 	struct hwrng rng;
39 	struct device *jrdev;
40 	struct device *ctrldev;
41 	void *desc_async;
42 	void *desc_sync;
43 	struct work_struct worker;
44 	struct kfifo fifo;
45 };
46 
47 struct caam_rng_job_ctx {
48 	struct completion *done;
49 	int *err;
50 };
51 
52 static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r)
53 {
54 	return (struct caam_rng_ctx *)r->priv;
55 }
56 
57 static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err,
58 			  void *context)
59 {
60 	struct caam_rng_job_ctx *jctx = context;
61 
62 	if (err)
63 		*jctx->err = caam_jr_strstatus(jrdev, err);
64 
65 	complete(jctx->done);
66 }
67 
68 static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma)
69 {
70 	init_job_desc(desc, 0);	/* + 1 cmd_sz */
71 	/* Generate random bytes: + 1 cmd_sz */
72 	append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG |
73 			 OP_ALG_PR_ON);
74 	/* Store bytes: + 1 cmd_sz + caam_ptr_sz  */
75 	append_fifo_store(desc, dst_dma,
76 			  CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE);
77 
78 	print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS,
79 			     16, 4, desc, desc_bytes(desc), 1);
80 
81 	return desc;
82 }
83 
84 static int caam_rng_read_one(struct device *jrdev,
85 			     void *dst, int len,
86 			     void *desc,
87 			     struct completion *done)
88 {
89 	dma_addr_t dst_dma;
90 	int err, ret = 0;
91 	struct caam_rng_job_ctx jctx = {
92 		.done = done,
93 		.err  = &ret,
94 	};
95 
96 	len = CAAM_RNG_MAX_FIFO_STORE_SIZE;
97 
98 	dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE);
99 	if (dma_mapping_error(jrdev, dst_dma)) {
100 		dev_err(jrdev, "unable to map destination memory\n");
101 		return -ENOMEM;
102 	}
103 
104 	init_completion(done);
105 	err = caam_jr_enqueue(jrdev,
106 			      caam_init_desc(desc, dst_dma),
107 			      caam_rng_done, &jctx);
108 	if (err == -EINPROGRESS) {
109 		wait_for_completion(done);
110 		err = 0;
111 	}
112 
113 	dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE);
114 
115 	return err ?: (ret ?: len);
116 }
117 
118 static void caam_rng_fill_async(struct caam_rng_ctx *ctx)
119 {
120 	struct scatterlist sg[1];
121 	struct completion done;
122 	int len, nents;
123 
124 	sg_init_table(sg, ARRAY_SIZE(sg));
125 	nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg),
126 				     CAAM_RNG_MAX_FIFO_STORE_SIZE);
127 	if (!nents)
128 		return;
129 
130 	len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]),
131 				sg[0].length,
132 				ctx->desc_async,
133 				&done);
134 	if (len < 0)
135 		return;
136 
137 	kfifo_dma_in_finish(&ctx->fifo, len);
138 }
139 
140 static void caam_rng_worker(struct work_struct *work)
141 {
142 	struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx,
143 						worker);
144 	caam_rng_fill_async(ctx);
145 }
146 
147 static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait)
148 {
149 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
150 	int out;
151 
152 	if (wait) {
153 		struct completion done;
154 
155 		return caam_rng_read_one(ctx->jrdev, dst, max,
156 					 ctx->desc_sync, &done);
157 	}
158 
159 	out = kfifo_out(&ctx->fifo, dst, max);
160 	if (kfifo_is_empty(&ctx->fifo))
161 		schedule_work(&ctx->worker);
162 
163 	return out;
164 }
165 
166 static void caam_cleanup(struct hwrng *rng)
167 {
168 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
169 
170 	flush_work(&ctx->worker);
171 	caam_jr_free(ctx->jrdev);
172 	kfifo_free(&ctx->fifo);
173 }
174 
175 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
176 static inline void test_len(struct hwrng *rng, size_t len, bool wait)
177 {
178 	u8 *buf;
179 	int read_len;
180 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
181 	struct device *dev = ctx->ctrldev;
182 
183 	buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL);
184 
185 	while (len > 0) {
186 		read_len = rng->read(rng, buf, len, wait);
187 
188 		if (read_len < 0 || (read_len == 0 && wait)) {
189 			dev_err(dev, "RNG Read FAILED received %d bytes\n",
190 				read_len);
191 			kfree(buf);
192 			return;
193 		}
194 
195 		print_hex_dump_debug("random bytes@: ",
196 			DUMP_PREFIX_ADDRESS, 16, 4,
197 			buf, read_len, 1);
198 
199 		len = len - read_len;
200 	}
201 
202 	kfree(buf);
203 }
204 
205 static inline void test_mode_once(struct hwrng *rng, bool wait)
206 {
207 	test_len(rng, 32, wait);
208 	test_len(rng, 64, wait);
209 	test_len(rng, 128, wait);
210 }
211 
212 static void self_test(struct hwrng *rng)
213 {
214 	pr_info("Executing RNG SELF-TEST with wait\n");
215 	test_mode_once(rng, true);
216 }
217 #endif
218 
219 static int caam_init(struct hwrng *rng)
220 {
221 	struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng);
222 	int err;
223 
224 	ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
225 				      GFP_KERNEL);
226 	if (!ctx->desc_sync)
227 		return -ENOMEM;
228 
229 	ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
230 				       GFP_KERNEL);
231 	if (!ctx->desc_async)
232 		return -ENOMEM;
233 
234 	if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE,
235 					  dma_get_cache_alignment()),
236 			GFP_KERNEL))
237 		return -ENOMEM;
238 
239 	INIT_WORK(&ctx->worker, caam_rng_worker);
240 
241 	ctx->jrdev = caam_jr_alloc();
242 	err = PTR_ERR_OR_ZERO(ctx->jrdev);
243 	if (err) {
244 		kfifo_free(&ctx->fifo);
245 		pr_err("Job Ring Device allocation for transform failed\n");
246 		return err;
247 	}
248 
249 	/*
250 	 * Fill async buffer to have early randomness data for
251 	 * hw_random
252 	 */
253 	caam_rng_fill_async(ctx);
254 
255 	return 0;
256 }
257 
258 int caam_rng_init(struct device *ctrldev);
259 
260 void caam_rng_exit(struct device *ctrldev)
261 {
262 	devres_release_group(ctrldev, caam_rng_init);
263 }
264 
265 int caam_rng_init(struct device *ctrldev)
266 {
267 	struct caam_rng_ctx *ctx;
268 	u32 rng_inst;
269 	struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
270 	int ret;
271 
272 	/* Check for an instantiated RNG before registration */
273 	if (priv->era < 10)
274 		rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) &
275 			    CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
276 	else
277 		rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK;
278 
279 	if (!rng_inst)
280 		return 0;
281 
282 	if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL))
283 		return -ENOMEM;
284 
285 	ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL);
286 	if (!ctx)
287 		return -ENOMEM;
288 
289 	ctx->ctrldev = ctrldev;
290 
291 	ctx->rng.name    = "rng-caam";
292 	ctx->rng.init    = caam_init;
293 	ctx->rng.cleanup = caam_cleanup;
294 	ctx->rng.read    = caam_read;
295 	ctx->rng.priv    = (unsigned long)ctx;
296 
297 	dev_info(ctrldev, "registering rng-caam\n");
298 
299 	ret = devm_hwrng_register(ctrldev, &ctx->rng);
300 	if (ret) {
301 		caam_rng_exit(ctrldev);
302 		return ret;
303 	}
304 
305 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST
306 	self_test(&ctx->rng);
307 #endif
308 
309 	devres_close_group(ctrldev, caam_rng_init);
310 	return 0;
311 }
312