1 /* Copyright 2013-2017 IBM Corp.
2  *
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * 	http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12  * implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stdlib.h>
18 #include <unistd.h>
19 #include <stdio.h>
20 #include <stdbool.h>
21 #include <errno.h>
22 #include <string.h>
23 #include <inttypes.h>
24 
25 #include <libflash/libflash.h>
26 #include <libflash/errors.h>
27 
28 #include "blocklevel.h"
29 #include "ecc.h"
30 
31 #define PROT_REALLOC_NUM 25
32 
33 /* This function returns tristate values.
34  * 1  - The region is ECC protected
35  * 0  - The region is not ECC protected
36  * -1 - Partially protected
37  */
ecc_protected(struct blocklevel_device * bl,uint64_t pos,uint64_t len,uint64_t * start)38 static int ecc_protected(struct blocklevel_device *bl, uint64_t pos, uint64_t len, uint64_t *start)
39 {
40 	int i;
41 
42 	/* Length of 0 is nonsensical so add 1 */
43 	if (len == 0)
44 		len = 1;
45 
46 	for (i = 0; i < bl->ecc_prot.n_prot; i++) {
47 		/* Fits entirely within the range */
48 		if (bl->ecc_prot.prot[i].start <= pos &&
49 				bl->ecc_prot.prot[i].start + bl->ecc_prot.prot[i].len >= pos + len) {
50 			if (start)
51 				*start = bl->ecc_prot.prot[i].start;
52 			return 1;
53 		}
54 
55 		/*
56 		 * Even if ranges are merged we can't currently guarantee two
57 		 * contiguous regions are sanely ECC protected so a partial fit
58 		 * is no good.
59 		 */
60 		if ((bl->ecc_prot.prot[i].start >= pos && bl->ecc_prot.prot[i].start < pos + len) ||
61 		   (bl->ecc_prot.prot[i].start <= pos &&
62 			bl->ecc_prot.prot[i].start + bl->ecc_prot.prot[i].len > pos)) {
63 			if (start)
64 				*start = bl->ecc_prot.prot[i].start;
65 			return -1;
66 		}
67 	}
68 	return 0;
69 }
70 
with_ecc_pos(uint64_t ecc_start,uint64_t pos)71 static uint64_t with_ecc_pos(uint64_t ecc_start, uint64_t pos)
72 {
73 	return pos + ((pos - ecc_start) / (BYTES_PER_ECC));
74 }
75 
reacquire(struct blocklevel_device * bl)76 static int reacquire(struct blocklevel_device *bl)
77 {
78 	if (!bl->keep_alive && bl->reacquire)
79 		return bl->reacquire(bl);
80 	return 0;
81 }
82 
release(struct blocklevel_device * bl)83 static int release(struct blocklevel_device *bl)
84 {
85 	int rc = 0;
86 	if (!bl->keep_alive && bl->release) {
87 		/* This is the error return path a lot, preserve errno */
88 		int err = errno;
89 		rc = bl->release(bl);
90 		errno = err;
91 	}
92 	return rc;
93 }
94 
blocklevel_raw_read(struct blocklevel_device * bl,uint64_t pos,void * buf,uint64_t len)95 int blocklevel_raw_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len)
96 {
97 	int rc;
98 
99 	FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len);
100 	if (!bl || !bl->read || !buf) {
101 		errno = EINVAL;
102 		return FLASH_ERR_PARM_ERROR;
103 	}
104 
105 	rc = reacquire(bl);
106 	if (rc)
107 		return rc;
108 
109 	rc = bl->read(bl, pos, buf, len);
110 
111 	release(bl);
112 
113 	return rc;
114 }
115 
blocklevel_read(struct blocklevel_device * bl,uint64_t pos,void * buf,uint64_t len)116 int blocklevel_read(struct blocklevel_device *bl, uint64_t pos, void *buf, uint64_t len)
117 {
118 	int rc, ecc_protection;
119 	struct ecc64 *buffer;
120 	uint64_t ecc_pos, ecc_start, ecc_diff, ecc_len;
121 
122 	FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len);
123 	if (!bl || !buf) {
124 		errno = EINVAL;
125 		return FLASH_ERR_PARM_ERROR;
126 	}
127 
128 	ecc_protection = ecc_protected(bl, pos, len, &ecc_start);
129 
130 	FL_DBG("%s: 0x%" PRIx64 " for 0x%" PRIx64 " ecc=%s\n",
131 		__func__, pos, len, ecc_protection ?
132 		(ecc_protection == -1 ? "partial" : "yes") : "no");
133 
134 	if (!ecc_protection)
135 		return blocklevel_raw_read(bl, pos, buf, len);
136 
137 	/*
138 	 * The region we're reading to has both ecc protection and not.
139 	 * Perhaps one day in the future blocklevel can cope with this.
140 	 */
141 	if (ecc_protection == -1) {
142 		FL_ERR("%s: Can't cope with partial ecc\n", __func__);
143 		errno = EINVAL;
144 		return FLASH_ERR_PARM_ERROR;
145 	}
146 
147 	pos = with_ecc_pos(ecc_start, pos);
148 
149 	ecc_pos = ecc_buffer_align(ecc_start, pos);
150 	ecc_diff = pos - ecc_pos;
151 	ecc_len = ecc_buffer_size(len + ecc_diff);
152 
153 	FL_DBG("%s: adjusted_pos: 0x%" PRIx64 ", ecc_pos: 0x%" PRIx64
154 			", ecc_diff: 0x%" PRIx64 ", ecc_len: 0x%" PRIx64 "\n",
155 			__func__, pos, ecc_pos, ecc_diff, ecc_len);
156 	buffer = malloc(ecc_len);
157 	if (!buffer) {
158 		errno = ENOMEM;
159 		rc = FLASH_ERR_MALLOC_FAILED;
160 		goto out;
161 	}
162 
163 	rc = blocklevel_raw_read(bl, ecc_pos, buffer, ecc_len);
164 	if (rc)
165 		goto out;
166 
167 	/*
168 	 * Could optimise and simply call memcpy_from_ecc() if ecc_diff
169 	 * == 0 but _unaligned checks and bascially does that for us
170 	 */
171 	if (memcpy_from_ecc_unaligned(buf, buffer, len, ecc_diff)) {
172 		errno = EBADF;
173 		rc = FLASH_ERR_ECC_INVALID;
174 	}
175 
176 out:
177 	free(buffer);
178 	return rc;
179 }
180 
blocklevel_raw_write(struct blocklevel_device * bl,uint64_t pos,const void * buf,uint64_t len)181 int blocklevel_raw_write(struct blocklevel_device *bl, uint64_t pos,
182 		const void *buf, uint64_t len)
183 {
184 	int rc;
185 
186 	FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len);
187 	if (!bl || !bl->write || !buf) {
188 		errno = EINVAL;
189 		return FLASH_ERR_PARM_ERROR;
190 	}
191 
192 	rc = reacquire(bl);
193 	if (rc)
194 		return rc;
195 
196 	rc = bl->write(bl, pos, buf, len);
197 
198 	release(bl);
199 
200 	return rc;
201 }
202 
blocklevel_write(struct blocklevel_device * bl,uint64_t pos,const void * buf,uint64_t len)203 int blocklevel_write(struct blocklevel_device *bl, uint64_t pos, const void *buf,
204 		uint64_t len)
205 {
206 	int rc, ecc_protection;
207 	struct ecc64 *buffer;
208 	uint64_t ecc_len;
209 	uint64_t ecc_start, ecc_pos, ecc_diff;
210 
211 	FL_DBG("%s: 0x%" PRIx64 "\t%p\t0x%" PRIx64 "\n", __func__, pos, buf, len);
212 	if (!bl || !buf) {
213 		errno = EINVAL;
214 		return FLASH_ERR_PARM_ERROR;
215 	}
216 
217 	ecc_protection = ecc_protected(bl, pos, len, &ecc_start);
218 
219 	FL_DBG("%s: 0x%" PRIx64 " for 0x%" PRIx64 " ecc=%s\n",
220 		__func__, pos, len, ecc_protection ?
221 		(ecc_protection == -1 ? "partial" : "yes") : "no");
222 
223 	if (!ecc_protection)
224 		return blocklevel_raw_write(bl, pos, buf, len);
225 
226 	/*
227 	 * The region we're writing to has both ecc protection and not.
228 	 * Perhaps one day in the future blocklevel can cope with this.
229 	 */
230 	if (ecc_protection == -1) {
231 		FL_ERR("%s: Can't cope with partial ecc\n", __func__);
232 		errno = EINVAL;
233 		return FLASH_ERR_PARM_ERROR;
234 	}
235 
236 	pos = with_ecc_pos(ecc_start, pos);
237 
238 	ecc_pos = ecc_buffer_align(ecc_start, pos);
239 	ecc_diff = pos - ecc_pos;
240 	ecc_len = ecc_buffer_size(len + ecc_diff);
241 
242 	FL_DBG("%s: adjusted_pos: 0x%" PRIx64 ", ecc_pos: 0x%" PRIx64
243 			", ecc_diff: 0x%" PRIx64 ", ecc_len: 0x%" PRIx64 "\n",
244 			__func__, pos, ecc_pos, ecc_diff, ecc_len);
245 
246 	buffer = malloc(ecc_len);
247 	if (!buffer) {
248 		errno = ENOMEM;
249 		rc = FLASH_ERR_MALLOC_FAILED;
250 		goto out;
251 	}
252 
253 	if (ecc_diff) {
254 		uint64_t start_chunk = ecc_diff;
255 		uint64_t end_chunk = BYTES_PER_ECC - ecc_diff;
256 		uint64_t end_len = ecc_len - end_chunk;
257 
258 		/*
259 		 * Read the start bytes that memcpy_to_ecc_unaligned() will need
260 		 * to calculate the first ecc byte
261 		 */
262 		rc = blocklevel_raw_read(bl, ecc_pos, buffer, start_chunk);
263 		if (rc) {
264 			errno = EBADF;
265 			rc = FLASH_ERR_ECC_INVALID;
266 			goto out;
267 		}
268 
269 		/*
270 		 * Read the end bytes that memcpy_to_ecc_unaligned() will need
271 		 * to calculate the last ecc byte
272 		 */
273 		rc = blocklevel_raw_read(bl, ecc_pos + end_len, ((char *)buffer) + end_len,
274 				end_chunk);
275 		if (rc) {
276 			errno = EBADF;
277 			rc = FLASH_ERR_ECC_INVALID;
278 			goto out;
279 		}
280 
281 		if (memcpy_to_ecc_unaligned(buffer, buf, len, ecc_diff)) {
282 			errno = EBADF;
283 			rc = FLASH_ERR_ECC_INVALID;
284 			goto out;
285 		}
286 	} else {
287 		if (memcpy_to_ecc(buffer, buf, len)) {
288 			errno = EBADF;
289 			rc = FLASH_ERR_ECC_INVALID;
290 			goto out;
291 		}
292 	}
293 	rc = blocklevel_raw_write(bl, pos, buffer, ecc_len);
294 
295 out:
296 	free(buffer);
297 	return rc;
298 }
299 
blocklevel_erase(struct blocklevel_device * bl,uint64_t pos,uint64_t len)300 int blocklevel_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len)
301 {
302 	int rc;
303 	if (!bl || !bl->erase) {
304 		errno = EINVAL;
305 		return FLASH_ERR_PARM_ERROR;
306 	}
307 
308 	FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len);
309 
310 	/* Programmer may be making a horrible mistake without knowing it */
311 	if (pos & bl->erase_mask) {
312 		FL_ERR("blocklevel_erase: pos (0x%"PRIx64") is not erase block (0x%08x) aligned\n",
313 				pos, bl->erase_mask + 1);
314 		return FLASH_ERR_ERASE_BOUNDARY;
315 	}
316 
317 	if (len & bl->erase_mask) {
318 		FL_ERR("blocklevel_erase: len (0x%"PRIx64") is not erase block (0x%08x) aligned\n",
319 				len, bl->erase_mask + 1);
320 		return FLASH_ERR_ERASE_BOUNDARY;
321 	}
322 
323 	rc = reacquire(bl);
324 	if (rc)
325 		return rc;
326 
327 	rc = bl->erase(bl, pos, len);
328 
329 	release(bl);
330 
331 	return rc;
332 }
333 
blocklevel_get_info(struct blocklevel_device * bl,const char ** name,uint64_t * total_size,uint32_t * erase_granule)334 int blocklevel_get_info(struct blocklevel_device *bl, const char **name, uint64_t *total_size,
335 		uint32_t *erase_granule)
336 {
337 	int rc;
338 
339 	if (!bl || !bl->get_info) {
340 		errno = EINVAL;
341 		return FLASH_ERR_PARM_ERROR;
342 	}
343 
344 	rc = reacquire(bl);
345 	if (rc)
346 		return rc;
347 
348 	rc = bl->get_info(bl, name, total_size, erase_granule);
349 
350 	/* Check the validity of what we are being told */
351 	if (erase_granule && *erase_granule != bl->erase_mask + 1)
352 		FL_ERR("blocklevel_get_info: WARNING: erase_granule (0x%08x) and erase_mask"
353 				" (0x%08x) don't match\n", *erase_granule, bl->erase_mask + 1);
354 
355 	release(bl);
356 
357 	return rc;
358 }
359 
360 /*
361  * Compare flash and memory to determine if:
362  * a) Erase must happen before write
363  * b) Flash and memory are identical
364  * c) Flash can simply be written to
365  *
366  * returns -1 for a
367  * returns  0 for b
368  * returns  1 for c
369  */
blocklevel_flashcmp(const void * flash_buf,const void * mem_buf,uint64_t len)370 static int blocklevel_flashcmp(const void *flash_buf, const void *mem_buf, uint64_t len)
371 {
372 	uint64_t i;
373 	int same = true;
374 	const uint8_t *f_buf, *m_buf;
375 
376 	f_buf = flash_buf;
377 	m_buf = mem_buf;
378 
379 	for (i = 0; i < len; i++) {
380 		if (m_buf[i] & ~f_buf[i])
381 			return -1;
382 		if (same && (m_buf[i] != f_buf[i]))
383 			same = false;
384 	}
385 
386 	return same ? 0 : 1;
387 }
388 
blocklevel_smart_erase(struct blocklevel_device * bl,uint64_t pos,uint64_t len)389 int blocklevel_smart_erase(struct blocklevel_device *bl, uint64_t pos, uint64_t len)
390 {
391 	uint64_t block_size;
392 	void *erase_buf;
393 	int rc;
394 
395 	if (!bl) {
396 		errno = EINVAL;
397 		return FLASH_ERR_PARM_ERROR;
398 	}
399 
400 	FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len);
401 
402 	/* Nothing smart needs to be done, pos and len are aligned */
403 	if ((pos & bl->erase_mask) == 0 && (len & bl->erase_mask) == 0) {
404 		FL_DBG("%s: Skipping smarts everything is aligned 0x%" PRIx64 " 0x%" PRIx64
405 				"to 0x%08x\n", __func__, pos, len, bl->erase_mask);
406 		return blocklevel_erase(bl, pos, len);
407 	}
408 	block_size = bl->erase_mask + 1;
409 	erase_buf = malloc(block_size);
410 	if (!erase_buf) {
411 		errno = ENOMEM;
412 		return FLASH_ERR_MALLOC_FAILED;
413 	}
414 
415 	rc = reacquire(bl);
416 	if (rc) {
417 		free(erase_buf);
418 		return rc;
419 	}
420 
421 	if (pos & bl->erase_mask) {
422 		/*
423 		 * base_pos and base_len are the values in the first erase
424 		 * block that we need to preserve: the region up to pos.
425 		 */
426 		uint64_t base_pos = pos & ~(bl->erase_mask);
427 		uint64_t base_len = pos - base_pos;
428 
429 		FL_DBG("%s: preserving 0x%" PRIx64 "..0x%" PRIx64 "\n",
430 				__func__, base_pos, base_pos + base_len);
431 
432 		/*
433 		 * Read the entire block in case this is the ONLY block we're
434 		 * modifying, we may need the end chunk of it later
435 		 */
436 		rc = bl->read(bl, base_pos, erase_buf, block_size);
437 		if (rc)
438 			goto out;
439 
440 		rc = bl->erase(bl, base_pos, block_size);
441 		if (rc)
442 			goto out;
443 
444 		rc = bl->write(bl, base_pos, erase_buf, base_len);
445 		if (rc)
446 			goto out;
447 
448 		/*
449 		 * The requested erase fits entirely into this erase block and
450 		 * so we need to write back the chunk at the end of the block
451 		 */
452 		if (base_pos + base_len + len < base_pos + block_size) {
453 			rc = bl->write(bl, pos + len, erase_buf + base_len + len,
454 					block_size - base_len - len);
455 			FL_DBG("%s: Early exit, everything was in one erase block\n",
456 					__func__);
457 			goto out;
458 		}
459 
460 		pos += block_size - base_len;
461 		len -= block_size - base_len;
462 	}
463 
464 	/* Now we should be aligned, best to double check */
465 	if (pos & bl->erase_mask) {
466 		FL_DBG("%s:pos 0x%" PRIx64 " isn't erase_mask 0x%08x aligned\n",
467 			   	__func__, pos, bl->erase_mask);
468 		rc = FLASH_ERR_PARM_ERROR;
469 		goto out;
470 	}
471 
472 	if (len & ~(bl->erase_mask)) {
473 		rc = bl->erase(bl, pos, len & ~(bl->erase_mask));
474 		if (rc)
475 			goto out;
476 
477 		pos += len & ~(bl->erase_mask);
478 		len -= len & ~(bl->erase_mask);
479 	}
480 
481 	/* Length should be less than a block now */
482 	if (len > block_size) {
483 		FL_DBG("%s: len 0x%" PRIx64 " is still exceeds block_size 0x%" PRIx64 "\n",
484 				__func__, len, block_size);
485 		rc = FLASH_ERR_PARM_ERROR;
486 		goto out;
487 	}
488 
489 	if (len & bl->erase_mask) {
490 		/*
491 		 * top_pos is the first byte that must be preserved and
492 		 * top_len is the length from top_pos to the end of the erase
493 		 * block: the region that must be preserved
494 		 */
495 		uint64_t top_pos = pos + len;
496 		uint64_t top_len = block_size - len;
497 
498 		FL_DBG("%s: preserving 0x%" PRIx64 "..0x%" PRIx64 "\n",
499 				__func__, top_pos, top_pos + top_len);
500 
501 		rc = bl->read(bl, top_pos, erase_buf, top_len);
502 		if (rc)
503 			goto out;
504 
505 		rc = bl->erase(bl, pos, block_size);
506 		if (rc)
507 			goto out;
508 
509 		rc = bl->write(bl, top_pos, erase_buf, top_len);
510 		if (rc)
511 			goto out;
512 	}
513 
514 out:
515 	free(erase_buf);
516 	release(bl);
517 	return rc;
518 }
519 
blocklevel_smart_write(struct blocklevel_device * bl,uint64_t pos,const void * buf,uint64_t len)520 int blocklevel_smart_write(struct blocklevel_device *bl, uint64_t pos, const void *buf, uint64_t len)
521 {
522 	uint32_t erase_size;
523 	const void *write_buf = buf;
524 	void *write_buf_start = NULL;
525 	uint64_t ecc_start;
526 	void *erase_buf;
527 	int rc = 0;
528 
529 	if (!write_buf || !bl) {
530 		errno = EINVAL;
531 		return FLASH_ERR_PARM_ERROR;
532 	}
533 
534 	FL_DBG("%s: 0x%" PRIx64 "\t0x%" PRIx64 "\n", __func__, pos, len);
535 
536 	if (!(bl->flags & WRITE_NEED_ERASE)) {
537 		FL_DBG("%s: backend doesn't need erase\n", __func__);
538 		return blocklevel_write(bl, pos, buf, len);
539 	}
540 
541 	rc = blocklevel_get_info(bl, NULL, NULL, &erase_size);
542 	if (rc)
543 		return rc;
544 
545 	if (ecc_protected(bl, pos, len, &ecc_start)) {
546 		FL_DBG("%s: region has ECC\n", __func__);
547 
548 		len = ecc_buffer_size(len);
549 
550 		write_buf_start = malloc(len);
551 		if (!write_buf_start) {
552 			errno = ENOMEM;
553 			return FLASH_ERR_MALLOC_FAILED;
554 		}
555 
556 		if (memcpy_to_ecc(write_buf_start, buf, ecc_buffer_size_minus_ecc(len))) {
557 			free(write_buf_start);
558 			errno = EBADF;
559 			return FLASH_ERR_ECC_INVALID;
560 		}
561 		write_buf = write_buf_start;
562 	}
563 
564 	erase_buf = malloc(erase_size);
565 	if (!erase_buf) {
566 		errno = ENOMEM;
567 		rc = FLASH_ERR_MALLOC_FAILED;
568 		goto out_free;
569 	}
570 
571 	rc = reacquire(bl);
572 	if (rc)
573 		goto out_free;
574 
575 	while (len > 0) {
576 		uint32_t erase_block = pos & ~(erase_size - 1);
577 		uint32_t block_offset = pos & (erase_size - 1);
578 		uint32_t size = erase_size > len ? len : erase_size;
579 		int cmp;
580 
581 		/* Write crosses an erase boundary, shrink the write to the boundary */
582 		if (erase_size < block_offset + size) {
583 			size = erase_size - block_offset;
584 		}
585 
586 		rc = bl->read(bl, erase_block, erase_buf, erase_size);
587 		if (rc)
588 			goto out;
589 
590 		cmp = blocklevel_flashcmp(erase_buf + block_offset, write_buf, size);
591 		FL_DBG("%s: region 0x%08x..0x%08x ", __func__,
592 				erase_block, erase_size);
593 		if (cmp != 0) {
594 			FL_DBG("needs ");
595 			if (cmp == -1) {
596 				FL_DBG("erase and ");
597 				bl->erase(bl, erase_block, erase_size);
598 			}
599 			FL_DBG("write\n");
600 			memcpy(erase_buf + block_offset, write_buf, size);
601 			rc = bl->write(bl, erase_block, erase_buf, erase_size);
602 			if (rc)
603 				goto out;
604 		}
605 		len -= size;
606 		pos += size;
607 		write_buf += size;
608 	}
609 
610 out:
611 	release(bl);
612 out_free:
613 	free(write_buf_start);
614 	free(erase_buf);
615 	return rc;
616 }
617 
insert_bl_prot_range(struct blocklevel_range * ranges,struct bl_prot_range range)618 static bool insert_bl_prot_range(struct blocklevel_range *ranges, struct bl_prot_range range)
619 {
620 	int i;
621 	uint32_t pos, len;
622 	struct bl_prot_range *prot = ranges->prot;
623 
624 	pos = range.start;
625 	len = range.len;
626 
627 	if (len == 0)
628 		return true;
629 
630 	/* Check for overflow */
631 	if (pos + len < len)
632 		return false;
633 
634 	for (i = 0; i < ranges->n_prot && len > 0; i++) {
635 		if (prot[i].start <= pos && prot[i].start + prot[i].len >= pos + len) {
636 			len = 0;
637 			break; /* Might as well, the next two conditions can't be true */
638 		}
639 
640 		/* Can easily extend this down just by adjusting start */
641 		if (pos <= prot[i].start && pos + len >= prot[i].start) {
642 			FL_DBG("%s: extending start down\n", __func__);
643 			prot[i].len += prot[i].start - pos;
644 			prot[i].start = pos;
645 			pos += prot[i].len;
646 			if (prot[i].len >= len)
647 				len = 0;
648 			else
649 				len -= prot[i].len;
650 		}
651 
652 		/*
653 		 * Jump over this range but the new range might be so big that
654 		 * theres a chunk after
655 		 */
656 		if (pos >= prot[i].start && pos < prot[i].start + prot[i].len) {
657 			FL_DBG("%s: fits within current range ", __func__);
658 			if (prot[i].start + prot[i].len - pos > len) {
659 				FL_DBG("but there is some extra at the end\n");
660 				len -= prot[i].start + prot[i].len - pos;
661 				pos = prot[i].start + prot[i].len;
662 			} else {
663 				FL_DBG("\n");
664 				len = 0;
665 			}
666 		}
667 		/*
668 		 * This condition will be true if the range is smaller than
669 		 * the current range, therefore it should go here!
670 		 */
671 		if (pos < prot[i].start && pos + len <= prot[i].start)
672 			break;
673 	}
674 
675 	if (len) {
676 		int insert_pos = i;
677 		struct bl_prot_range *new_ranges = ranges->prot;
678 
679 		FL_DBG("%s: adding 0x%08x..0x%08x\n", __func__, pos, pos + len);
680 
681 		if (ranges->n_prot == ranges->total_prot) {
682 			new_ranges = realloc(ranges->prot,
683 					sizeof(range) * ((ranges->n_prot) + PROT_REALLOC_NUM));
684 			if (!new_ranges)
685 				return false;
686 			ranges->total_prot += PROT_REALLOC_NUM;
687 		}
688 		if (insert_pos != ranges->n_prot)
689 			for (i = ranges->n_prot; i > insert_pos; i--)
690 				memcpy(&new_ranges[i], &new_ranges[i - 1], sizeof(range));
691 		range.start = pos;
692 		range.len = len;
693 		memcpy(&new_ranges[insert_pos], &range, sizeof(range));
694 		ranges->prot = new_ranges;
695 		ranges->n_prot++;
696 		prot = new_ranges;
697 	}
698 
699 	return true;
700 }
701 
blocklevel_ecc_protect(struct blocklevel_device * bl,uint32_t start,uint32_t len)702 int blocklevel_ecc_protect(struct blocklevel_device *bl, uint32_t start, uint32_t len)
703 {
704 	/*
705 	 * Could implement this at hardware level by having an accessor to the
706 	 * backend in struct blocklevel_device and as a result do nothing at
707 	 * this level (although probably not for ecc!)
708 	 */
709 	struct bl_prot_range range = { .start = start, .len = len };
710 
711 	if (len < BYTES_PER_ECC)
712 		return -1;
713 	return !insert_bl_prot_range(&bl->ecc_prot, range);
714 }
715