xref: /linux/crypto/async_tx/async_pq.c (revision 0403e382)
1 /*
2  * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3  * Copyright(c) 2009 Intel Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License as published by the Free
7  * Software Foundation; either version 2 of the License, or (at your option)
8  * any later version.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program; if not, write to the Free Software Foundation, Inc., 59
17  * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
18  *
19  * The full GNU General Public License is included in this distribution in the
20  * file called COPYING.
21  */
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/raid/pq.h>
26 #include <linux/async_tx.h>
27 
28 /**
29  * scribble - space to hold throwaway P buffer for synchronous gen_syndrome
30  */
31 static struct page *scribble;
32 
33 static bool is_raid6_zero_block(struct page *p)
34 {
35 	return p == (void *) raid6_empty_zero_page;
36 }
37 
38 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
39  * and async_syndrome_val() contains the 'P' destination address at
40  * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
41  *
42  * note: these are macros as they are used as lvalues
43  */
44 #define P(b, d) (b[d-2])
45 #define Q(b, d) (b[d-1])
46 
47 /**
48  * do_async_gen_syndrome - asynchronously calculate P and/or Q
49  */
50 static __async_inline struct dma_async_tx_descriptor *
51 do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
52 		      const unsigned char *scfs, unsigned int offset, int disks,
53 		      size_t len, dma_addr_t *dma_src,
54 		      struct async_submit_ctl *submit)
55 {
56 	struct dma_async_tx_descriptor *tx = NULL;
57 	struct dma_device *dma = chan->device;
58 	enum dma_ctrl_flags dma_flags = 0;
59 	enum async_tx_flags flags_orig = submit->flags;
60 	dma_async_tx_callback cb_fn_orig = submit->cb_fn;
61 	dma_async_tx_callback cb_param_orig = submit->cb_param;
62 	int src_cnt = disks - 2;
63 	unsigned char coefs[src_cnt];
64 	unsigned short pq_src_cnt;
65 	dma_addr_t dma_dest[2];
66 	int src_off = 0;
67 	int idx;
68 	int i;
69 
70 	/* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
71 	if (P(blocks, disks))
72 		dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
73 					   len, DMA_BIDIRECTIONAL);
74 	else
75 		dma_flags |= DMA_PREP_PQ_DISABLE_P;
76 	if (Q(blocks, disks))
77 		dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
78 					   len, DMA_BIDIRECTIONAL);
79 	else
80 		dma_flags |= DMA_PREP_PQ_DISABLE_Q;
81 
82 	/* convert source addresses being careful to collapse 'empty'
83 	 * sources and update the coefficients accordingly
84 	 */
85 	for (i = 0, idx = 0; i < src_cnt; i++) {
86 		if (is_raid6_zero_block(blocks[i]))
87 			continue;
88 		dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
89 					    DMA_TO_DEVICE);
90 		coefs[idx] = scfs[i];
91 		idx++;
92 	}
93 	src_cnt = idx;
94 
95 	while (src_cnt > 0) {
96 		submit->flags = flags_orig;
97 		pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
98 		/* if we are submitting additional pqs, leave the chain open,
99 		 * clear the callback parameters, and leave the destination
100 		 * buffers mapped
101 		 */
102 		if (src_cnt > pq_src_cnt) {
103 			submit->flags &= ~ASYNC_TX_ACK;
104 			submit->flags |= ASYNC_TX_FENCE;
105 			dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
106 			submit->cb_fn = NULL;
107 			submit->cb_param = NULL;
108 		} else {
109 			dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
110 			submit->cb_fn = cb_fn_orig;
111 			submit->cb_param = cb_param_orig;
112 			if (cb_fn_orig)
113 				dma_flags |= DMA_PREP_INTERRUPT;
114 		}
115 		if (submit->flags & ASYNC_TX_FENCE)
116 			dma_flags |= DMA_PREP_FENCE;
117 
118 		/* Since we have clobbered the src_list we are committed
119 		 * to doing this asynchronously.  Drivers force forward
120 		 * progress in case they can not provide a descriptor
121 		 */
122 		for (;;) {
123 			tx = dma->device_prep_dma_pq(chan, dma_dest,
124 						     &dma_src[src_off],
125 						     pq_src_cnt,
126 						     &coefs[src_off], len,
127 						     dma_flags);
128 			if (likely(tx))
129 				break;
130 			async_tx_quiesce(&submit->depend_tx);
131 			dma_async_issue_pending(chan);
132 		}
133 
134 		async_tx_submit(chan, tx, submit);
135 		submit->depend_tx = tx;
136 
137 		/* drop completed sources */
138 		src_cnt -= pq_src_cnt;
139 		src_off += pq_src_cnt;
140 
141 		dma_flags |= DMA_PREP_CONTINUE;
142 	}
143 
144 	return tx;
145 }
146 
147 /**
148  * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
149  */
150 static void
151 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
152 		     size_t len, struct async_submit_ctl *submit)
153 {
154 	void **srcs;
155 	int i;
156 
157 	if (submit->scribble)
158 		srcs = submit->scribble;
159 	else
160 		srcs = (void **) blocks;
161 
162 	for (i = 0; i < disks; i++) {
163 		if (is_raid6_zero_block(blocks[i])) {
164 			BUG_ON(i > disks - 3); /* P or Q can't be zero */
165 			srcs[i] = blocks[i];
166 		} else
167 			srcs[i] = page_address(blocks[i]) + offset;
168 	}
169 	raid6_call.gen_syndrome(disks, len, srcs);
170 	async_tx_sync_epilog(submit);
171 }
172 
173 /**
174  * async_gen_syndrome - asynchronously calculate a raid6 syndrome
175  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
176  * @offset: common offset into each block (src and dest) to start transaction
177  * @disks: number of blocks (including missing P or Q, see below)
178  * @len: length of operation in bytes
179  * @submit: submission/completion modifiers
180  *
181  * General note: This routine assumes a field of GF(2^8) with a
182  * primitive polynomial of 0x11d and a generator of {02}.
183  *
184  * 'disks' note: callers can optionally omit either P or Q (but not
185  * both) from the calculation by setting blocks[disks-2] or
186  * blocks[disks-1] to NULL.  When P or Q is omitted 'len' must be <=
187  * PAGE_SIZE as a temporary buffer of this size is used in the
188  * synchronous path.  'disks' always accounts for both destination
189  * buffers.
190  *
191  * 'blocks' note: if submit->scribble is NULL then the contents of
192  * 'blocks' may be overridden
193  */
194 struct dma_async_tx_descriptor *
195 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
196 		   size_t len, struct async_submit_ctl *submit)
197 {
198 	int src_cnt = disks - 2;
199 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
200 						      &P(blocks, disks), 2,
201 						      blocks, src_cnt, len);
202 	struct dma_device *device = chan ? chan->device : NULL;
203 	dma_addr_t *dma_src = NULL;
204 
205 	BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
206 
207 	if (submit->scribble)
208 		dma_src = submit->scribble;
209 	else if (sizeof(dma_addr_t) <= sizeof(struct page *))
210 		dma_src = (dma_addr_t *) blocks;
211 
212 	if (dma_src && device &&
213 	    (src_cnt <= dma_maxpq(device, 0) ||
214 	     dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) {
215 		/* run the p+q asynchronously */
216 		pr_debug("%s: (async) disks: %d len: %zu\n",
217 			 __func__, disks, len);
218 		return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
219 					     disks, len, dma_src, submit);
220 	}
221 
222 	/* run the pq synchronously */
223 	pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
224 
225 	/* wait for any prerequisite operations */
226 	async_tx_quiesce(&submit->depend_tx);
227 
228 	if (!P(blocks, disks)) {
229 		P(blocks, disks) = scribble;
230 		BUG_ON(len + offset > PAGE_SIZE);
231 	}
232 	if (!Q(blocks, disks)) {
233 		Q(blocks, disks) = scribble;
234 		BUG_ON(len + offset > PAGE_SIZE);
235 	}
236 	do_sync_gen_syndrome(blocks, offset, disks, len, submit);
237 
238 	return NULL;
239 }
240 EXPORT_SYMBOL_GPL(async_gen_syndrome);
241 
242 /**
243  * async_syndrome_val - asynchronously validate a raid6 syndrome
244  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
245  * @offset: common offset into each block (src and dest) to start transaction
246  * @disks: number of blocks (including missing P or Q, see below)
247  * @len: length of operation in bytes
248  * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
249  * @spare: temporary result buffer for the synchronous case
250  * @submit: submission / completion modifiers
251  *
252  * The same notes from async_gen_syndrome apply to the 'blocks',
253  * and 'disks' parameters of this routine.  The synchronous path
254  * requires a temporary result buffer and submit->scribble to be
255  * specified.
256  */
257 struct dma_async_tx_descriptor *
258 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
259 		   size_t len, enum sum_check_flags *pqres, struct page *spare,
260 		   struct async_submit_ctl *submit)
261 {
262 	struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
263 						      NULL, 0,  blocks, disks,
264 						      len);
265 	struct dma_device *device = chan ? chan->device : NULL;
266 	struct dma_async_tx_descriptor *tx;
267 	enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
268 	dma_addr_t *dma_src = NULL;
269 
270 	BUG_ON(disks < 4);
271 
272 	if (submit->scribble)
273 		dma_src = submit->scribble;
274 	else if (sizeof(dma_addr_t) <= sizeof(struct page *))
275 		dma_src = (dma_addr_t *) blocks;
276 
277 	if (dma_src && device && disks <= dma_maxpq(device, 0)) {
278 		struct device *dev = device->dev;
279 		dma_addr_t *pq = &dma_src[disks-2];
280 		int i;
281 
282 		pr_debug("%s: (async) disks: %d len: %zu\n",
283 			 __func__, disks, len);
284 		if (!P(blocks, disks))
285 			dma_flags |= DMA_PREP_PQ_DISABLE_P;
286 		if (!Q(blocks, disks))
287 			dma_flags |= DMA_PREP_PQ_DISABLE_Q;
288 		if (submit->flags & ASYNC_TX_FENCE)
289 			dma_flags |= DMA_PREP_FENCE;
290 		for (i = 0; i < disks; i++)
291 			if (likely(blocks[i])) {
292 				BUG_ON(is_raid6_zero_block(blocks[i]));
293 				dma_src[i] = dma_map_page(dev, blocks[i],
294 							  offset, len,
295 							  DMA_TO_DEVICE);
296 			}
297 
298 		for (;;) {
299 			tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
300 							    disks - 2,
301 							    raid6_gfexp,
302 							    len, pqres,
303 							    dma_flags);
304 			if (likely(tx))
305 				break;
306 			async_tx_quiesce(&submit->depend_tx);
307 			dma_async_issue_pending(chan);
308 		}
309 		async_tx_submit(chan, tx, submit);
310 
311 		return tx;
312 	} else {
313 		struct page *p_src = P(blocks, disks);
314 		struct page *q_src = Q(blocks, disks);
315 		enum async_tx_flags flags_orig = submit->flags;
316 		dma_async_tx_callback cb_fn_orig = submit->cb_fn;
317 		void *scribble = submit->scribble;
318 		void *cb_param_orig = submit->cb_param;
319 		void *p, *q, *s;
320 
321 		pr_debug("%s: (sync) disks: %d len: %zu\n",
322 			 __func__, disks, len);
323 
324 		/* caller must provide a temporary result buffer and
325 		 * allow the input parameters to be preserved
326 		 */
327 		BUG_ON(!spare || !scribble);
328 
329 		/* wait for any prerequisite operations */
330 		async_tx_quiesce(&submit->depend_tx);
331 
332 		/* recompute p and/or q into the temporary buffer and then
333 		 * check to see the result matches the current value
334 		 */
335 		tx = NULL;
336 		*pqres = 0;
337 		if (p_src) {
338 			init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
339 					  NULL, NULL, scribble);
340 			tx = async_xor(spare, blocks, offset, disks-2, len, submit);
341 			async_tx_quiesce(&tx);
342 			p = page_address(p_src) + offset;
343 			s = page_address(spare) + offset;
344 			*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
345 		}
346 
347 		if (q_src) {
348 			P(blocks, disks) = NULL;
349 			Q(blocks, disks) = spare;
350 			init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
351 			tx = async_gen_syndrome(blocks, offset, disks, len, submit);
352 			async_tx_quiesce(&tx);
353 			q = page_address(q_src) + offset;
354 			s = page_address(spare) + offset;
355 			*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
356 		}
357 
358 		/* restore P, Q and submit */
359 		P(blocks, disks) = p_src;
360 		Q(blocks, disks) = q_src;
361 
362 		submit->cb_fn = cb_fn_orig;
363 		submit->cb_param = cb_param_orig;
364 		submit->flags = flags_orig;
365 		async_tx_sync_epilog(submit);
366 
367 		return NULL;
368 	}
369 }
370 EXPORT_SYMBOL_GPL(async_syndrome_val);
371 
372 static int __init async_pq_init(void)
373 {
374 	scribble = alloc_page(GFP_KERNEL);
375 
376 	if (scribble)
377 		return 0;
378 
379 	pr_err("%s: failed to allocate required spare page\n", __func__);
380 
381 	return -ENOMEM;
382 }
383 
384 static void __exit async_pq_exit(void)
385 {
386 	put_page(scribble);
387 }
388 
389 module_init(async_pq_init);
390 module_exit(async_pq_exit);
391 
392 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
393 MODULE_LICENSE("GPL");
394