1 /* $NetBSD: ebh.c,v 1.6 2015/02/07 04:21:11 christos Exp $ */
2
3 /*-
4 * Copyright (c) 2010 Department of Software Engineering,
5 * University of Szeged, Hungary
6 * Copyright (C) 2009 Ferenc Havasi <havasi@inf.u-szeged.hu>
7 * Copyright (C) 2009 Zoltan Sogor <weth@inf.u-szeged.hu>
8 * Copyright (C) 2009 David Tengeri <dtengeri@inf.u-szeged.hu>
9 * Copyright (C) 2009 Tamas Toth <ttoth@inf.u-szeged.hu>
10 * Copyright (C) 2010 Adam Hoka <ahoka@NetBSD.org>
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to The NetBSD Foundation
14 * by the Department of Software Engineering, University of Szeged, Hungary
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
26 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
27 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
28 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
29 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
30 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
32 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 #include "ebh.h"
39
40 /*****************************************************************************/
41 /* Flash specific operations */
42 /*****************************************************************************/
43 int nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
44 int nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr);
45 int nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
46 int nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset);
47 int nor_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
48 int nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
49 int nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr);
50 int nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,struct chfs_eb_hdr *ebhdr);
51 int nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
52 int nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf);
53 int nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid);
54 int nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr);
55 int mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec);
56
57 int ltree_entry_cmp(struct chfs_ltree_entry *le1, struct chfs_ltree_entry *le2);
58 int peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
59 int peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2);
60 int add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,struct peb_queue *queue);
61 struct chfs_peb * find_peb_in_use(struct chfs_ebh *ebh, int pebnr);
62 int add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec);
63 int add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec);
64 void erase_callback(struct flash_erase_instruction *ei);
65 int free_peb(struct chfs_ebh *ebh);
66 int release_peb(struct chfs_ebh *ebh, int pebnr);
67 void erase_thread(void *data);
68 static void erase_thread_start(struct chfs_ebh *ebh);
69 static void erase_thread_stop(struct chfs_ebh *ebh);
70 int scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2);
71 int nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status);
72 int nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
73 int pebnr, struct chfs_eb_hdr *ebhdr);
74 int nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,struct chfs_eb_hdr *ebhdr, int pebnr);
75 int nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
76 int pebnr, struct chfs_eb_hdr *ebhdr);
77 struct chfs_scan_info *chfs_scan(struct chfs_ebh *ebh);
78 void scan_info_destroy(struct chfs_scan_info *si);
79 int scan_media(struct chfs_ebh *ebh);
80 int get_peb(struct chfs_ebh *ebh);
81 /**
82 * nor_create_eb_hdr - creates an eraseblock header for NOR flash
83 * @ebhdr: ebhdr to set
84 * @lnr: LEB number
85 */
86 int
nor_create_eb_hdr(struct chfs_eb_hdr * ebhdr,int lnr)87 nor_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
88 {
89 ebhdr->u.nor_hdr.lid = htole32(lnr);
90 return 0;
91 }
92
93 /**
94 * nand_create_eb_hdr - creates an eraseblock header for NAND flash
95 * @ebhdr: ebhdr to set
96 * @lnr: LEB number
97 */
98 int
nand_create_eb_hdr(struct chfs_eb_hdr * ebhdr,int lnr)99 nand_create_eb_hdr(struct chfs_eb_hdr *ebhdr, int lnr)
100 {
101 ebhdr->u.nand_hdr.lid = htole32(lnr);
102 return 0;
103 }
104
105 /**
106 * nor_calc_data_offs - calculates data offset on NOR flash
107 * @ebh: chfs eraseblock handler
108 * @pebnr: eraseblock number
109 * @offset: offset within the eraseblock
110 */
111 int
nor_calc_data_offs(struct chfs_ebh * ebh,int pebnr,int offset)112 nor_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
113 {
114 return pebnr * ebh->flash_if->erasesize + offset +
115 CHFS_EB_EC_HDR_SIZE + CHFS_EB_HDR_NOR_SIZE;
116 }
117
118 /**
119 * nand_calc_data_offs - calculates data offset on NAND flash
120 * @ebh: chfs eraseblock handler
121 * @pebnr: eraseblock number
122 * @offset: offset within the eraseblock
123 */
124 int
nand_calc_data_offs(struct chfs_ebh * ebh,int pebnr,int offset)125 nand_calc_data_offs(struct chfs_ebh *ebh, int pebnr, int offset)
126 {
127 return pebnr * ebh->flash_if->erasesize + offset +
128 2 * ebh->flash_if->page_size;
129 }
130
131 /**
132 * nor_read_eb_hdr - read ereaseblock header from NOR flash
133 *
134 * @ebh: chfs eraseblock handler
135 * @pebnr: eraseblock number
136 * @ebhdr: whereto store the data
137 *
138 * Reads the eraseblock header from media.
139 * Returns zero in case of success, error code in case of fail.
140 */
141 int
nor_read_eb_hdr(struct chfs_ebh * ebh,int pebnr,struct chfs_eb_hdr * ebhdr)142 nor_read_eb_hdr(struct chfs_ebh *ebh,
143 int pebnr, struct chfs_eb_hdr *ebhdr)
144 {
145 int ret;
146 size_t retlen;
147 off_t ofs = pebnr * ebh->flash_if->erasesize;
148
149 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
150
151 ret = flash_read(ebh->flash_dev,
152 ofs, CHFS_EB_EC_HDR_SIZE,
153 &retlen, (unsigned char *) &ebhdr->ec_hdr);
154
155 if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
156 return ret;
157
158 ofs += CHFS_EB_EC_HDR_SIZE;
159 ret = flash_read(ebh->flash_dev,
160 ofs, CHFS_EB_HDR_NOR_SIZE,
161 &retlen, (unsigned char *) &ebhdr->u.nor_hdr);
162
163 if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
164 return ret;
165
166 return 0;
167 }
168
169 /**
170 * nand_read_eb_hdr - read ereaseblock header from NAND flash
171 *
172 * @ebh: chfs eraseblock handler
173 * @pebnr: eraseblock number
174 * @ebhdr: whereto store the data
175 *
176 * Reads the eraseblock header from media. It is on the first two page.
177 * Returns zero in case of success, error code in case of fail.
178 */
179 int
nand_read_eb_hdr(struct chfs_ebh * ebh,int pebnr,struct chfs_eb_hdr * ebhdr)180 nand_read_eb_hdr(struct chfs_ebh *ebh, int pebnr,
181 struct chfs_eb_hdr *ebhdr)
182 {
183 int ret;
184 size_t retlen;
185 off_t ofs;
186
187 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
188
189 /* Read erase counter header from the first page. */
190 ofs = pebnr * ebh->flash_if->erasesize;
191 ret = flash_read(ebh->flash_dev,
192 ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
193 (unsigned char *) &ebhdr->ec_hdr);
194 if (ret || retlen != CHFS_EB_EC_HDR_SIZE)
195 return ret;
196
197 /* Read NAND eraseblock header from the second page */
198 ofs += ebh->flash_if->page_size;
199 ret = flash_read(ebh->flash_dev,
200 ofs, CHFS_EB_HDR_NAND_SIZE, &retlen,
201 (unsigned char *) &ebhdr->u.nand_hdr);
202 if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
203 return ret;
204
205 return 0;
206 }
207
208 /**
209 * nor_write_eb_hdr - write ereaseblock header to NOR flash
210 *
211 * @ebh: chfs eraseblock handler
212 * @pebnr: eraseblock number whereto write
213 * @ebh: ebh to write
214 *
215 * Writes the eraseblock header to media.
216 * Returns zero in case of success, error code in case of fail.
217 */
218 int
nor_write_eb_hdr(struct chfs_ebh * ebh,int pebnr,struct chfs_eb_hdr * ebhdr)219 nor_write_eb_hdr(struct chfs_ebh *ebh, int pebnr, struct chfs_eb_hdr *ebhdr)
220 {
221 int ret, crc;
222 size_t retlen;
223
224 off_t ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE;
225
226 ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid
227 | htole32(CHFS_LID_NOT_DIRTY_BIT);
228
229 crc = crc32(0, (uint8_t *)&ebhdr->u.nor_hdr + 4,
230 CHFS_EB_HDR_NOR_SIZE - 4);
231 ebhdr->u.nand_hdr.crc = htole32(crc);
232
233 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
234
235 ret = flash_write(ebh->flash_dev,
236 ofs, CHFS_EB_HDR_NOR_SIZE, &retlen,
237 (unsigned char *) &ebhdr->u.nor_hdr);
238
239 if (ret || retlen != CHFS_EB_HDR_NOR_SIZE)
240 return ret;
241
242 return 0;
243 }
244
245 /**
246 * nand_write_eb_hdr - write ereaseblock header to NAND flash
247 *
248 * @ebh: chfs eraseblock handler
249 * @pebnr: eraseblock number whereto write
250 * @ebh: ebh to write
251 *
252 * Writes the eraseblock header to media.
253 * Returns zero in case of success, error code in case of fail.
254 */
255 int
nand_write_eb_hdr(struct chfs_ebh * ebh,int pebnr,struct chfs_eb_hdr * ebhdr)256 nand_write_eb_hdr(struct chfs_ebh *ebh, int pebnr,
257 struct chfs_eb_hdr *ebhdr)
258 {
259 int ret, crc;
260 size_t retlen;
261 flash_off_t ofs;
262
263 KASSERT(pebnr >= 0 && pebnr < ebh->peb_nr);
264
265 ofs = pebnr * ebh->flash_if->erasesize +
266 ebh->flash_if->page_size;
267
268 ebhdr->u.nand_hdr.serial = htole64(++(*ebh->max_serial));
269
270 crc = crc32(0, (uint8_t *)&ebhdr->u.nand_hdr + 4,
271 CHFS_EB_HDR_NAND_SIZE - 4);
272 ebhdr->u.nand_hdr.crc = htole32(crc);
273
274 ret = flash_write(ebh->flash_dev, ofs,
275 CHFS_EB_HDR_NAND_SIZE, &retlen,
276 (unsigned char *) &ebhdr->u.nand_hdr);
277
278 if (ret || retlen != CHFS_EB_HDR_NAND_SIZE)
279 return ret;
280
281 return 0;
282 }
283
284 /**
285 * nor_check_eb_hdr - check ereaseblock header read from NOR flash
286 *
287 * @ebh: chfs eraseblock handler
288 * @buf: eraseblock header to check
289 *
290 * Returns eraseblock header status.
291 */
292 int
nor_check_eb_hdr(struct chfs_ebh * ebh,void * buf)293 nor_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
294 {
295 uint32_t magic, crc, hdr_crc;
296 struct chfs_eb_hdr *ebhdr = buf;
297 le32 lid_save;
298
299 //check is there a header
300 if (check_pattern((void *) &ebhdr->ec_hdr,
301 0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
302 dbg_ebh("no header found\n");
303 return EBHDR_LEB_NO_HDR;
304 }
305
306 // check magic
307 magic = le32toh(ebhdr->ec_hdr.magic);
308 if (magic != CHFS_MAGIC_BITMASK) {
309 dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
310 CHFS_MAGIC_BITMASK, magic);
311 return EBHDR_LEB_BADMAGIC;
312 }
313
314 // check CRC_EC
315 hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
316 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
317 if (hdr_crc != crc) {
318 dbg_ebh("bad crc_ec found\n");
319 return EBHDR_LEB_BADCRC;
320 }
321
322 /* check if the PEB is free: magic, crc_ec and erase_cnt is good and
323 * everything else is FFF..
324 */
325 if (check_pattern((void *) &ebhdr->u.nor_hdr, 0xFF, 0,
326 CHFS_EB_HDR_NOR_SIZE)) {
327 dbg_ebh("free peb found\n");
328 return EBHDR_LEB_FREE;
329 }
330
331 // check invalidated (CRC == LID == 0)
332 if (ebhdr->u.nor_hdr.crc == 0 && ebhdr->u.nor_hdr.lid == 0) {
333 dbg_ebh("invalidated ebhdr found\n");
334 return EBHDR_LEB_INVALIDATED;
335 }
336
337 // check CRC
338 hdr_crc = le32toh(ebhdr->u.nor_hdr.crc);
339 lid_save = ebhdr->u.nor_hdr.lid;
340
341 // mark lid as not dirty for crc calc
342 ebhdr->u.nor_hdr.lid = ebhdr->u.nor_hdr.lid | htole32(
343 CHFS_LID_NOT_DIRTY_BIT);
344 crc = crc32(0, (uint8_t *) &ebhdr->u.nor_hdr + 4,
345 CHFS_EB_HDR_NOR_SIZE - 4);
346 // restore the original lid value in ebh
347 ebhdr->u.nor_hdr.lid = lid_save;
348
349 if (crc != hdr_crc) {
350 dbg_ebh("bad crc found\n");
351 return EBHDR_LEB_BADCRC;
352 }
353
354 // check dirty
355 if (!(le32toh(lid_save) & CHFS_LID_NOT_DIRTY_BIT)) {
356 dbg_ebh("dirty ebhdr found\n");
357 return EBHDR_LEB_DIRTY;
358 }
359
360 return EBHDR_LEB_OK;
361 }
362
363 /**
364 * nand_check_eb_hdr - check ereaseblock header read from NAND flash
365 *
366 * @ebh: chfs eraseblock handler
367 * @buf: eraseblock header to check
368 *
369 * Returns eraseblock header status.
370 */
371 int
nand_check_eb_hdr(struct chfs_ebh * ebh,void * buf)372 nand_check_eb_hdr(struct chfs_ebh *ebh, void *buf)
373 {
374 uint32_t magic, crc, hdr_crc;
375 struct chfs_eb_hdr *ebhdr = buf;
376
377 //check is there a header
378 if (check_pattern((void *) &ebhdr->ec_hdr,
379 0xFF, 0, CHFS_EB_EC_HDR_SIZE)) {
380 dbg_ebh("no header found\n");
381 return EBHDR_LEB_NO_HDR;
382 }
383
384 // check magic
385 magic = le32toh(ebhdr->ec_hdr.magic);
386 if (magic != CHFS_MAGIC_BITMASK) {
387 dbg_ebh("bad magic bitmask(exp: %x found %x)\n",
388 CHFS_MAGIC_BITMASK, magic);
389 return EBHDR_LEB_BADMAGIC;
390 }
391
392 // check CRC_EC
393 hdr_crc = le32toh(ebhdr->ec_hdr.crc_ec);
394 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
395 if (hdr_crc != crc) {
396 dbg_ebh("bad crc_ec found\n");
397 return EBHDR_LEB_BADCRC;
398 }
399
400 /* check if the PEB is free: magic, crc_ec and erase_cnt is good and
401 * everything else is FFF..
402 */
403 if (check_pattern((void *) &ebhdr->u.nand_hdr, 0xFF, 0,
404 CHFS_EB_HDR_NAND_SIZE)) {
405 dbg_ebh("free peb found\n");
406 return EBHDR_LEB_FREE;
407 }
408
409 // check CRC
410 hdr_crc = le32toh(ebhdr->u.nand_hdr.crc);
411
412 crc = crc32(0, (uint8_t *) &ebhdr->u.nand_hdr + 4,
413 CHFS_EB_HDR_NAND_SIZE - 4);
414
415 if (crc != hdr_crc) {
416 dbg_ebh("bad crc found\n");
417 return EBHDR_LEB_BADCRC;
418 }
419
420 return EBHDR_LEB_OK;
421 }
422
423 /**
424 * nor_mark_eb_hdr_dirty_flash- mark ereaseblock header dirty on NOR flash
425 *
426 * @ebh: chfs eraseblock handler
427 * @pebnr: eraseblock number
428 * @lid: leb id (its bit number 31 will be set to 0)
429 *
430 * It pulls the CHFS_LID_NOT_DIRTY_BIT to zero on flash.
431 *
432 * Returns zero in case of success, error code in case of fail.
433 */
434 int
nor_mark_eb_hdr_dirty_flash(struct chfs_ebh * ebh,int pebnr,int lid)435 nor_mark_eb_hdr_dirty_flash(struct chfs_ebh *ebh, int pebnr, int lid)
436 {
437 int ret;
438 size_t retlen;
439 off_t ofs;
440
441 /* mark leb id dirty */
442 lid = htole32(lid & CHFS_LID_DIRTY_BIT_MASK);
443
444 /* calculate position */
445 ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
446 + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr , lid);
447
448 ret = flash_write(ebh->flash_dev, ofs, sizeof(lid), &retlen,
449 (unsigned char *) &lid);
450 if (ret || retlen != sizeof(lid)) {
451 chfs_err("can't mark peb dirty");
452 return ret;
453 }
454
455 return 0;
456 }
457
458 /**
459 * nor_invalidate_eb_hdr - invalidate ereaseblock header on NOR flash
460 *
461 * @ebh: chfs eraseblock handler
462 * @pebnr: eraseblock number
463 *
464 * Sets crc and lip field to zero.
465 * Returns zero in case of success, error code in case of fail.
466 */
467 int
nor_invalidate_eb_hdr(struct chfs_ebh * ebh,int pebnr)468 nor_invalidate_eb_hdr(struct chfs_ebh *ebh, int pebnr)
469 {
470 int ret;
471 size_t retlen;
472 off_t ofs;
473 char zero_buf[CHFS_INVALIDATE_SIZE];
474
475 /* fill with zero */
476 memset(zero_buf, 0x0, CHFS_INVALIDATE_SIZE);
477
478 /* calculate position (!!! lid is directly behind crc !!!) */
479 ofs = pebnr * ebh->flash_if->erasesize + CHFS_EB_EC_HDR_SIZE
480 + CHFS_GET_MEMBER_POS(struct chfs_nor_eb_hdr, crc);
481
482 ret = flash_write(ebh->flash_dev,
483 ofs, CHFS_INVALIDATE_SIZE, &retlen,
484 (unsigned char *) &zero_buf);
485 if (ret || retlen != CHFS_INVALIDATE_SIZE) {
486 chfs_err("can't invalidate peb");
487 return ret;
488 }
489
490 return 0;
491 }
492
493 /**
494 * mark_eb_hdr_free - free ereaseblock header on NOR or NAND flash
495 *
496 * @ebh: chfs eraseblock handler
497 * @pebnr: eraseblock number
498 * @ec: erase counter of PEB
499 *
500 * Write out the magic and erase counter to the physical eraseblock.
501 * Returns zero in case of success, error code in case of fail.
502 */
503 int
mark_eb_hdr_free(struct chfs_ebh * ebh,int pebnr,int ec)504 mark_eb_hdr_free(struct chfs_ebh *ebh, int pebnr, int ec)
505 {
506 int ret, crc;
507 size_t retlen;
508 off_t ofs;
509 struct chfs_eb_hdr *ebhdr;
510 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
511
512 ebhdr->ec_hdr.magic = htole32(CHFS_MAGIC_BITMASK);
513 ebhdr->ec_hdr.erase_cnt = htole32(ec);
514 crc = crc32(0, (uint8_t *) &ebhdr->ec_hdr + 8, 4);
515 ebhdr->ec_hdr.crc_ec = htole32(crc);
516
517 ofs = pebnr * ebh->flash_if->erasesize;
518
519 KASSERT(sizeof(ebhdr->ec_hdr) == CHFS_EB_EC_HDR_SIZE);
520
521 ret = flash_write(ebh->flash_dev,
522 ofs, CHFS_EB_EC_HDR_SIZE, &retlen,
523 (unsigned char *) &ebhdr->ec_hdr);
524
525 if (ret || retlen != CHFS_EB_EC_HDR_SIZE) {
526 chfs_err("can't mark peb as free: %d\n", pebnr);
527 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
528 return ret;
529 }
530
531 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
532 return 0;
533 }
534
535 /*****************************************************************************/
536 /* End of Flash specific operations */
537 /*****************************************************************************/
538
539 /*****************************************************************************/
540 /* Lock Tree */
541 /*****************************************************************************/
542
543 int
ltree_entry_cmp(struct chfs_ltree_entry * le1,struct chfs_ltree_entry * le2)544 ltree_entry_cmp(struct chfs_ltree_entry *le1,
545 struct chfs_ltree_entry *le2)
546 {
547 return (le1->lnr - le2->lnr);
548 }
549
550 /* Generate functions for Lock tree's red-black tree */
551 RB_PROTOTYPE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
552 RB_GENERATE( ltree_rbtree, chfs_ltree_entry, rb, ltree_entry_cmp);
553
554
555 /**
556 * ltree_lookup - looks up a logical eraseblock in the lock tree
557 * @ebh: chfs eraseblock handler
558 * @lid: identifier of the logical eraseblock
559 *
560 * This function returns a pointer to the wanted &struct chfs_ltree_entry
561 * if the logical eraseblock is in the lock tree, so it is locked, NULL
562 * otherwise.
563 * @ebh->ltree_lock has to be locked!
564 */
565 static struct chfs_ltree_entry *
ltree_lookup(struct chfs_ebh * ebh,int lnr)566 ltree_lookup(struct chfs_ebh *ebh, int lnr)
567 {
568 struct chfs_ltree_entry le, *result;
569 le.lnr = lnr;
570 result = RB_FIND(ltree_rbtree, &ebh->ltree, &le);
571 return result;
572 }
573
574 /**
575 * ltree_add_entry - add an entry to the lock tree
576 * @ebh: chfs eraseblock handler
577 * @lnr: identifier of the logical eraseblock
578 *
579 * This function adds a new logical eraseblock entry identified with @lnr to the
580 * lock tree. If the entry is already in the tree, it increases the user
581 * counter.
582 * Returns NULL if can not allocate memory for lock tree entry, or a pointer
583 * to the inserted entry otherwise.
584 */
585 static struct chfs_ltree_entry *
ltree_add_entry(struct chfs_ebh * ebh,int lnr)586 ltree_add_entry(struct chfs_ebh *ebh, int lnr)
587 {
588 struct chfs_ltree_entry *le, *result;
589
590 le = kmem_alloc(sizeof(struct chfs_ltree_entry), KM_SLEEP);
591
592 le->lnr = lnr;
593 le->users = 1;
594 rw_init(&le->mutex);
595
596 //dbg_ebh("enter ltree lock\n");
597 mutex_enter(&ebh->ltree_lock);
598 //dbg_ebh("insert\n");
599 result = RB_INSERT(ltree_rbtree, &ebh->ltree, le);
600 //dbg_ebh("inserted\n");
601 if (result) {
602 //The entry is already in the tree
603 result->users++;
604 kmem_free(le, sizeof(struct chfs_ltree_entry));
605 }
606 else {
607 result = le;
608 }
609 mutex_exit(&ebh->ltree_lock);
610
611 return result;
612 }
613
614 /**
615 * leb_read_lock - lock a logical eraseblock for read
616 * @ebh: chfs eraseblock handler
617 * @lnr: identifier of the logical eraseblock
618 *
619 * Returns zero in case of success, error code in case of fail.
620 */
621 static int
leb_read_lock(struct chfs_ebh * ebh,int lnr)622 leb_read_lock(struct chfs_ebh *ebh, int lnr)
623 {
624 struct chfs_ltree_entry *le;
625
626 le = ltree_add_entry(ebh, lnr);
627 if (!le)
628 return ENOMEM;
629
630 rw_enter(&le->mutex, RW_READER);
631 return 0;
632 }
633
634 /**
635 * leb_read_unlock - unlock a logical eraseblock from read
636 * @ebh: chfs eraseblock handler
637 * @lnr: identifier of the logical eraseblock
638 *
639 * This function unlocks a logical eraseblock from read and delete it from the
640 * lock tree is there are no more users of it.
641 */
642 static void
leb_read_unlock(struct chfs_ebh * ebh,int lnr)643 leb_read_unlock(struct chfs_ebh *ebh, int lnr)
644 {
645 struct chfs_ltree_entry *le;
646
647 mutex_enter(&ebh->ltree_lock);
648 //dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_read_unlock()\n");
649 le = ltree_lookup(ebh, lnr);
650 if (!le)
651 goto out;
652
653 le->users -= 1;
654 KASSERT(le->users >= 0);
655 rw_exit(&le->mutex);
656 if (le->users == 0) {
657 le = RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
658 if (le) {
659 KASSERT(!rw_lock_held(&le->mutex));
660 rw_destroy(&le->mutex);
661
662 kmem_free(le, sizeof(struct chfs_ltree_entry));
663 }
664 }
665
666 out:
667 mutex_exit(&ebh->ltree_lock);
668 //dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_read_unlock()\n");
669 }
670
671 /**
672 * leb_write_lock - lock a logical eraseblock for write
673 * @ebh: chfs eraseblock handler
674 * @lnr: identifier of the logical eraseblock
675 *
676 * Returns zero in case of success, error code in case of fail.
677 */
678 static int
leb_write_lock(struct chfs_ebh * ebh,int lnr)679 leb_write_lock(struct chfs_ebh *ebh, int lnr)
680 {
681 struct chfs_ltree_entry *le;
682
683 le = ltree_add_entry(ebh, lnr);
684 if (!le)
685 return ENOMEM;
686
687 rw_enter(&le->mutex, RW_WRITER);
688 return 0;
689 }
690
691 /**
692 * leb_write_unlock - unlock a logical eraseblock from write
693 * @ebh: chfs eraseblock handler
694 * @lnr: identifier of the logical eraseblock
695 *
696 * This function unlocks a logical eraseblock from write and delete it from the
697 * lock tree is there are no more users of it.
698 */
699 static void
leb_write_unlock(struct chfs_ebh * ebh,int lnr)700 leb_write_unlock(struct chfs_ebh *ebh, int lnr)
701 {
702 struct chfs_ltree_entry *le;
703
704 mutex_enter(&ebh->ltree_lock);
705 //dbg_ebh("LOCK: ebh->ltree_lock spin locked in leb_write_unlock()\n");
706 le = ltree_lookup(ebh, lnr);
707 if (!le)
708 goto out;
709
710 le->users -= 1;
711 KASSERT(le->users >= 0);
712 rw_exit(&le->mutex);
713 if (le->users == 0) {
714 RB_REMOVE(ltree_rbtree, &ebh->ltree, le);
715
716 KASSERT(!rw_lock_held(&le->mutex));
717 rw_destroy(&le->mutex);
718
719 kmem_free(le, sizeof(struct chfs_ltree_entry));
720 }
721
722 out:
723 mutex_exit(&ebh->ltree_lock);
724 //dbg_ebh("UNLOCK: ebh->ltree_lock spin unlocked in leb_write_unlock()\n");
725 }
726
727 /*****************************************************************************/
728 /* End of Lock Tree */
729 /*****************************************************************************/
730
731 /*****************************************************************************/
732 /* Erase related operations */
733 /*****************************************************************************/
734
735 /**
736 * If the first argument is smaller than the second, the function
737 * returns a value smaller than zero. If they are equal, the function re-
738 * turns zero. Otherwise, it should return a value greater than zero.
739 */
740 int
peb_in_use_cmp(struct chfs_peb * peb1,struct chfs_peb * peb2)741 peb_in_use_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
742 {
743 return (peb1->pebnr - peb2->pebnr);
744 }
745
746 int
peb_free_cmp(struct chfs_peb * peb1,struct chfs_peb * peb2)747 peb_free_cmp(struct chfs_peb *peb1, struct chfs_peb *peb2)
748 {
749 int comp;
750
751 comp = peb1->erase_cnt - peb2->erase_cnt;
752 if (0 == comp)
753 comp = peb1->pebnr - peb2->pebnr;
754
755 return comp;
756 }
757
758 /* Generate functions for in use PEB's red-black tree */
759 RB_PROTOTYPE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
760 RB_GENERATE(peb_in_use_rbtree, chfs_peb, u.rb, peb_in_use_cmp);
761 RB_PROTOTYPE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
762 RB_GENERATE(peb_free_rbtree, chfs_peb, u.rb, peb_free_cmp);
763
764 /**
765 * add_peb_to_erase_queue: adds a PEB to to_erase/fully_erased queue
766 * @ebh - chfs eraseblock handler
767 * @pebnr - physical eraseblock's number
768 * @ec - erase counter of PEB
769 * @queue: the queue to add to
770 *
771 * This function adds a PEB to the erase queue specified by @queue.
772 * The @ebh->erase_lock must be locked before using this.
773 * Returns zero in case of success, error code in case of fail.
774 */
775 int
add_peb_to_erase_queue(struct chfs_ebh * ebh,int pebnr,int ec,struct peb_queue * queue)776 add_peb_to_erase_queue(struct chfs_ebh *ebh, int pebnr, int ec,
777 struct peb_queue *queue)
778 {
779 struct chfs_peb *peb;
780
781 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
782
783 peb->erase_cnt = ec;
784 peb->pebnr = pebnr;
785
786 TAILQ_INSERT_TAIL(queue, peb, u.queue);
787
788 return 0;
789
790 }
791 //TODO
792 /**
793 * find_peb_in_use - looks up a PEB in the RB-tree of used blocks
794 * @ebh - chfs eraseblock handler
795 *
796 * This function returns a pointer to the PEB found in the tree,
797 * NULL otherwise.
798 * The @ebh->erase_lock must be locked before using this.
799 */
800 struct chfs_peb *
find_peb_in_use(struct chfs_ebh * ebh,int pebnr)801 find_peb_in_use(struct chfs_ebh *ebh, int pebnr)
802 {
803 struct chfs_peb peb, *result;
804 peb.pebnr = pebnr;
805 result = RB_FIND(peb_in_use_rbtree, &ebh->in_use, &peb);
806 return result;
807 }
808
809 /**
810 * add_peb_to_free - adds a PEB to the RB-tree of free PEBs
811 * @ebh - chfs eraseblock handler
812 * @pebnr - physical eraseblock's number
813 * @ec - erase counter of PEB
814 *
815 *
816 * This function adds a physical eraseblock to the RB-tree of free PEBs
817 * stored in the @ebh. The key is the erase counter and pebnr.
818 * The @ebh->erase_lock must be locked before using this.
819 * Returns zero in case of success, error code in case of fail.
820 */
821 int
add_peb_to_free(struct chfs_ebh * ebh,int pebnr,int ec)822 add_peb_to_free(struct chfs_ebh *ebh, int pebnr, int ec)
823 {
824 struct chfs_peb *peb, *result;
825
826 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
827
828 peb->erase_cnt = ec;
829 peb->pebnr = pebnr;
830 result = RB_INSERT(peb_free_rbtree, &ebh->free, peb);
831 if (result) {
832 kmem_free(peb, sizeof(struct chfs_peb));
833 return 1;
834 }
835
836 return 0;
837 }
838
839 /**
840 * add_peb_to_in_use - adds a PEB to the RB-tree of used PEBs
841 * @ebh - chfs eraseblock handler
842 * @pebnr - physical eraseblock's number
843 * @ec - erase counter of PEB
844 *
845 *
846 * This function adds a physical eraseblock to the RB-tree of used PEBs
847 * stored in the @ebh. The key is pebnr.
848 * The @ebh->erase_lock must be locked before using this.
849 * Returns zero in case of success, error code in case of fail.
850 */
851 int
add_peb_to_in_use(struct chfs_ebh * ebh,int pebnr,int ec)852 add_peb_to_in_use(struct chfs_ebh *ebh, int pebnr, int ec)
853 {
854 struct chfs_peb *peb, *result;
855
856 peb = kmem_alloc(sizeof(struct chfs_peb), KM_SLEEP);
857
858 peb->erase_cnt = ec;
859 peb->pebnr = pebnr;
860 result = RB_INSERT(peb_in_use_rbtree, &ebh->in_use, peb);
861 if (result) {
862 kmem_free(peb, sizeof(struct chfs_peb));
863 return 1;
864 }
865
866 return 0;
867 }
868
869 /**
870 * erase_callback - callback function for flash erase
871 * @ei: erase information
872 */
873 void
erase_callback(struct flash_erase_instruction * ei)874 erase_callback(struct flash_erase_instruction *ei)
875 {
876 int err;
877 struct chfs_erase_info_priv *priv = (void *) ei->ei_priv;
878 //dbg_ebh("ERASE_CALLBACK() CALLED\n");
879 struct chfs_ebh *ebh = priv->ebh;
880 struct chfs_peb *peb = priv->peb;
881
882 peb->erase_cnt += 1;
883
884 if (ei->ei_state == FLASH_ERASE_DONE) {
885
886 /* Write out erase counter */
887 err = ebh->ops->mark_eb_hdr_free(ebh,
888 peb->pebnr, peb->erase_cnt);
889 if (err) {
890 /* cannot mark PEB as free,so erase it again */
891 chfs_err(
892 "cannot mark eraseblock as free, PEB: %d\n",
893 peb->pebnr);
894 mutex_enter(&ebh->erase_lock);
895 /*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback() "
896 "after mark ebhdr free\n");*/
897 add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
898 &ebh->to_erase);
899 mutex_exit(&ebh->erase_lock);
900 /*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback() "
901 "after mark ebhdr free\n");*/
902 kmem_free(peb, sizeof(struct chfs_peb));
903 return;
904 }
905
906 mutex_enter(&ebh->erase_lock);
907 /*dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_callback()\n");*/
908 err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
909 mutex_exit(&ebh->erase_lock);
910 /*dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_callback()\n");*/
911 kmem_free(peb, sizeof(struct chfs_peb));
912 } else {
913 /*
914 * Erase is finished, but there was a problem,
915 * so erase PEB again
916 */
917 chfs_err("erase failed, state is: 0x%x\n", ei->ei_state);
918 add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt, &ebh->to_erase);
919 kmem_free(peb, sizeof(struct chfs_peb));
920 }
921 }
922
923 /**
924 * free_peb: free a PEB
925 * @ebh: chfs eraseblock handler
926 *
927 * This function erases the first physical eraseblock from one of the erase
928 * lists and adds to the RB-tree of free PEBs.
929 * Returns zero in case of succes, error code in case of fail.
930 */
931 int
free_peb(struct chfs_ebh * ebh)932 free_peb(struct chfs_ebh *ebh)
933 {
934 int err, retries = 0;
935 off_t ofs;
936 struct chfs_peb *peb = NULL;
937 struct flash_erase_instruction *ei;
938
939 KASSERT(mutex_owned(&ebh->erase_lock));
940
941 if (!TAILQ_EMPTY(&ebh->fully_erased)) {
942 //dbg_ebh("[FREE PEB] got a fully erased block\n");
943 peb = TAILQ_FIRST(&ebh->fully_erased);
944 TAILQ_REMOVE(&ebh->fully_erased, peb, u.queue);
945 err = ebh->ops->mark_eb_hdr_free(ebh,
946 peb->pebnr, peb->erase_cnt);
947 if (err) {
948 goto out_free;
949 }
950 err = add_peb_to_free(ebh, peb->pebnr, peb->erase_cnt);
951 goto out_free;
952 }
953 /* Erase PEB */
954 //dbg_ebh("[FREE PEB] eraseing a block\n");
955 peb = TAILQ_FIRST(&ebh->to_erase);
956 TAILQ_REMOVE(&ebh->to_erase, peb, u.queue);
957 mutex_exit(&ebh->erase_lock);
958 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in free_peb()\n");
959 ofs = peb->pebnr * ebh->flash_if->erasesize;
960
961 /* XXX where do we free this? */
962 ei = kmem_alloc(sizeof(struct flash_erase_instruction)
963 + sizeof(struct chfs_erase_info_priv), KM_SLEEP);
964 retry:
965 memset(ei, 0, sizeof(*ei));
966
967 // ei->ei_if = ebh->flash_if;
968 ei->ei_addr = ofs;
969 ei->ei_len = ebh->flash_if->erasesize;
970 ei->ei_callback = erase_callback;
971 ei->ei_priv = (unsigned long) (&ei[1]);
972
973 ((struct chfs_erase_info_priv *) ei->ei_priv)->ebh = ebh;
974 ((struct chfs_erase_info_priv *) ei->ei_priv)->peb = peb;
975
976 err = flash_erase(ebh->flash_dev, ei);
977 dbg_ebh("erased peb: %d\n", peb->pebnr);
978
979 /* einval would mean we did something wrong */
980 KASSERT(err != EINVAL);
981
982 if (err) {
983 dbg_ebh("errno: %d, ei->ei_state: %d\n", err, ei->ei_state);
984 if (CHFS_MAX_GET_PEB_RETRIES < ++retries &&
985 ei->ei_state == FLASH_ERASE_FAILED) {
986 /* The block went bad mark it */
987 dbg_ebh("ebh markbad! 0x%jx\n", (uintmax_t )ofs);
988 err = flash_block_markbad(ebh->flash_dev, ofs);
989 if (!err) {
990 ebh->peb_nr--;
991 }
992
993 goto out;
994 }
995 chfs_err("can not erase PEB: %d, try again\n", peb->pebnr);
996 goto retry;
997 }
998
999 out:
1000 /* lock the erase_lock, because it was locked
1001 * when the function was called */
1002 mutex_enter(&ebh->erase_lock);
1003 return err;
1004
1005 out_free:
1006 kmem_free(peb, sizeof(struct chfs_peb));
1007 return err;
1008 }
1009
1010 /**
1011 * release_peb - schedule an erase for the PEB
1012 * @ebh: chfs eraseblock handler
1013 * @pebnr: physical eraseblock number
1014 *
1015 * This function get the peb identified by @pebnr from the in_use RB-tree of
1016 * @ebh, removes it and schedule an erase for it.
1017 *
1018 * Returns zero on success, error code in case of fail.
1019 */
1020 int
release_peb(struct chfs_ebh * ebh,int pebnr)1021 release_peb(struct chfs_ebh *ebh, int pebnr)
1022 {
1023 int err = 0;
1024 struct chfs_peb *peb;
1025
1026 mutex_enter(&ebh->erase_lock);
1027
1028 //dbg_ebh("LOCK: ebh->erase_lock spin locked in release_peb()\n");
1029 peb = find_peb_in_use(ebh, pebnr);
1030 if (!peb) {
1031 chfs_err("LEB is mapped, but is not in the 'in_use' "
1032 "tree of ebh\n");
1033 goto out_unlock;
1034 }
1035 err = add_peb_to_erase_queue(ebh, peb->pebnr, peb->erase_cnt,
1036 &ebh->to_erase);
1037
1038 if (err)
1039 goto out_unlock;
1040
1041 RB_REMOVE(peb_in_use_rbtree, &ebh->in_use, peb);
1042 out_unlock:
1043 mutex_exit(&ebh->erase_lock);
1044 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in release_peb()"
1045 // " at out_unlock\n");
1046 return err;
1047 }
1048
1049 /**
1050 * erase_thread - background thread for erasing PEBs
1051 * @data: pointer to the eraseblock handler
1052 */
1053 /*void
1054 erase_thread(void *data)
1055 {
1056 struct chfs_ebh *ebh = data;
1057
1058 dbg_ebh("erase thread started\n");
1059 while (ebh->bg_erase.eth_running) {
1060 int err;
1061
1062 mutex_enter(&ebh->erase_lock);
1063 dbg_ebh("LOCK: ebh->erase_lock spin locked in erase_thread()\n");
1064 if (TAILQ_EMPTY(&ebh->to_erase) && TAILQ_EMPTY(&ebh->fully_erased)) {
1065 dbg_ebh("thread has nothing to do\n");
1066 mutex_exit(&ebh->erase_lock);
1067 mutex_enter(&ebh->bg_erase.eth_thread_mtx);
1068 cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1069 &ebh->bg_erase.eth_thread_mtx, mstohz(100));
1070 mutex_exit(&ebh->bg_erase.eth_thread_mtx);
1071
1072 dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1073 continue;
1074 }
1075 mutex_exit(&ebh->erase_lock);
1076 dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in erase_thread()\n");
1077
1078 err = free_peb(ebh);
1079 if (err)
1080 chfs_err("freeing PEB failed in the background thread: %d\n", err);
1081
1082 }
1083 dbg_ebh("erase thread stopped\n");
1084 kthread_exit(0);
1085 }*/
1086
1087 /**
1088 * erase_thread - background thread for erasing PEBs
1089 * @data: pointer to the eraseblock handler
1090 */
1091 void
erase_thread(void * data)1092 erase_thread(void *data) {
1093 dbg_ebh("[EBH THREAD] erase thread started\n");
1094
1095 struct chfs_ebh *ebh = data;
1096 int err;
1097
1098 mutex_enter(&ebh->erase_lock);
1099 while (ebh->bg_erase.eth_running) {
1100 if (TAILQ_EMPTY(&ebh->to_erase) &&
1101 TAILQ_EMPTY(&ebh->fully_erased)) {
1102 cv_timedwait_sig(&ebh->bg_erase.eth_wakeup,
1103 &ebh->erase_lock, mstohz(100));
1104 } else {
1105 /* XXX exiting this mutex is a bit odd here as
1106 * free_peb instantly reenters it...
1107 */
1108 err = free_peb(ebh);
1109 mutex_exit(&ebh->erase_lock);
1110 if (err) {
1111 chfs_err("freeing PEB failed in the"
1112 " background thread: %d\n", err);
1113 }
1114 mutex_enter(&ebh->erase_lock);
1115 }
1116 }
1117 mutex_exit(&ebh->erase_lock);
1118
1119 dbg_ebh("[EBH THREAD] erase thread stopped\n");
1120 kthread_exit(0);
1121 }
1122
1123 /**
1124 * erase_thread_start - init and start erase thread
1125 * @ebh: eraseblock handler
1126 */
1127 static void
erase_thread_start(struct chfs_ebh * ebh)1128 erase_thread_start(struct chfs_ebh *ebh)
1129 {
1130 cv_init(&ebh->bg_erase.eth_wakeup, "ebheracv");
1131
1132 ebh->bg_erase.eth_running = true;
1133 kthread_create(PRI_NONE, KTHREAD_MPSAFE | KTHREAD_MUSTJOIN, NULL,
1134 erase_thread, ebh, &ebh->bg_erase.eth_thread, "ebherase");
1135 }
1136
1137 /**
1138 * erase_thread_stop - stop background erase thread
1139 * @ebh: eraseblock handler
1140 */
1141 static void
erase_thread_stop(struct chfs_ebh * ebh)1142 erase_thread_stop(struct chfs_ebh *ebh)
1143 {
1144 ebh->bg_erase.eth_running = false;
1145 cv_signal(&ebh->bg_erase.eth_wakeup);
1146 dbg_ebh("[EBH THREAD STOP] signaled\n");
1147
1148 kthread_join(ebh->bg_erase.eth_thread);
1149 #ifdef BROKEN_KTH_JOIN
1150 kpause("chfsebhjointh", false, mstohz(1000), NULL);
1151 #endif
1152
1153 cv_destroy(&ebh->bg_erase.eth_wakeup);
1154 }
1155
1156 /*****************************************************************************/
1157 /* End of Erase related operations */
1158 /*****************************************************************************/
1159
1160 /*****************************************************************************/
1161 /* Scan related operations */
1162 /*****************************************************************************/
1163 int
scan_leb_used_cmp(struct chfs_scan_leb * sleb1,struct chfs_scan_leb * sleb2)1164 scan_leb_used_cmp(struct chfs_scan_leb *sleb1, struct chfs_scan_leb *sleb2)
1165 {
1166 return (sleb1->lnr - sleb2->lnr);
1167 }
1168
1169 RB_PROTOTYPE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1170 RB_GENERATE(scan_leb_used_rbtree, chfs_scan_leb, u.rb, scan_leb_used_cmp);
1171
1172 /**
1173 * scan_add_to_queue - adds a physical eraseblock to one of the
1174 * eraseblock queue
1175 * @si: chfs scanning information
1176 * @pebnr: physical eraseblock number
1177 * @erase_cnt: erase counter of the physical eraseblock
1178 * @list: the list to add to
1179 *
1180 * This function adds a physical eraseblock to one of the lists in the scanning
1181 * information.
1182 * Returns zero in case of success, negative error code in case of fail.
1183 */
1184 static int
scan_add_to_queue(struct chfs_scan_info * si,int pebnr,int erase_cnt,struct scan_leb_queue * queue)1185 scan_add_to_queue(struct chfs_scan_info *si, int pebnr, int erase_cnt,
1186 struct scan_leb_queue *queue)
1187 {
1188 struct chfs_scan_leb *sleb;
1189
1190 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1191
1192 sleb->pebnr = pebnr;
1193 sleb->erase_cnt = erase_cnt;
1194 TAILQ_INSERT_TAIL(queue, sleb, u.queue);
1195 return 0;
1196 }
1197
1198 /*
1199 * nor_scan_add_to_used - add a physical eraseblock to the
1200 * used tree of scan info
1201 * @ebh: chfs eraseblock handler
1202 * @si: chfs scanning information
1203 * @ebhdr: eraseblock header
1204 * @pebnr: physical eraseblock number
1205 * @leb_status: the status of the PEB's eraseblock header
1206 *
1207 * This function adds a PEB to the used tree of the scanning information.
1208 * It handles the situations if there are more physical eraseblock referencing
1209 * to the same logical eraseblock.
1210 * Returns zero in case of success, error code in case of fail.
1211 */
1212 int
nor_scan_add_to_used(struct chfs_ebh * ebh,struct chfs_scan_info * si,struct chfs_eb_hdr * ebhdr,int pebnr,int leb_status)1213 nor_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1214 struct chfs_eb_hdr *ebhdr, int pebnr, int leb_status)
1215 {
1216 int err, lnr, ec;
1217 struct chfs_scan_leb *sleb, *old;
1218
1219 lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1220 ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1221
1222 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1223
1224 sleb->erase_cnt = ec;
1225 sleb->lnr = lnr;
1226 sleb->pebnr = pebnr;
1227 sleb->info = leb_status;
1228
1229 old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1230 if (old) {
1231 kmem_free(sleb, sizeof(struct chfs_scan_leb));
1232 /* There is already an eraseblock in the used tree */
1233 /* If the new one is bad */
1234 if (EBHDR_LEB_DIRTY == leb_status &&
1235 EBHDR_LEB_OK == old->info) {
1236 return scan_add_to_queue(si, pebnr, ec, &si->erase);
1237 } else {
1238 err = scan_add_to_queue(si, old->pebnr,
1239 old->erase_cnt, &si->erase);
1240 if (err) {
1241 return err;
1242 }
1243
1244 old->erase_cnt = ec;
1245 old->lnr = lnr;
1246 old->pebnr = pebnr;
1247 old->info = leb_status;
1248 return 0;
1249 }
1250 }
1251 return 0;
1252 }
1253
1254 /**
1255 * nor_process eb -read the headers from NOR flash, check them and add to
1256 * the scanning information
1257 * @ebh: chfs eraseblock handler
1258 * @si: chfs scanning information
1259 * @pebnr: physical eraseblock number
1260 *
1261 * Returns zero in case of success, error code in case of fail.
1262 */
1263 int
nor_process_eb(struct chfs_ebh * ebh,struct chfs_scan_info * si,int pebnr,struct chfs_eb_hdr * ebhdr)1264 nor_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1265 int pebnr, struct chfs_eb_hdr *ebhdr)
1266 {
1267 int err, erase_cnt, leb_status;
1268
1269 err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1270 if (err)
1271 return err;
1272
1273 erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1274 dbg_ebh("erase_cnt: %d\n", erase_cnt);
1275 leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1276 if (EBHDR_LEB_BADMAGIC == leb_status ||
1277 EBHDR_LEB_BADCRC == leb_status) {
1278 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1279 return err;
1280 }
1281 else if (EBHDR_LEB_FREE == leb_status) {
1282 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1283 goto count_mean;
1284 }
1285 else if (EBHDR_LEB_NO_HDR == leb_status) {
1286 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1287 return err;
1288 }
1289 else if (EBHDR_LEB_INVALIDATED == leb_status) {
1290 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erase);
1291 return err;
1292 }
1293
1294 err = nor_scan_add_to_used(ebh, si, ebhdr, pebnr, leb_status);
1295 if (err)
1296 return err;
1297
1298
1299 count_mean:
1300 si->sum_of_ec += erase_cnt;
1301 si->num_of_eb++;
1302
1303 return err;
1304 }
1305
1306 /*
1307 * nand_scan_add_to_used - add a physical eraseblock to the
1308 * used tree of scan info
1309 * @ebh: chfs eraseblock handler
1310 * @si: chfs scanning information
1311 * @ebhdr: eraseblock header
1312 * @pebnr: physical eraseblock number
1313 * @leb_status: the status of the PEB's eraseblock header
1314 *
1315 * This function adds a PEB to the used tree of the scanning information.
1316 * It handles the situations if there are more physical eraseblock referencing
1317 * to the same logical eraseblock.
1318 * Returns zero in case of success, error code in case of fail.
1319 */
1320 int
nand_scan_add_to_used(struct chfs_ebh * ebh,struct chfs_scan_info * si,struct chfs_eb_hdr * ebhdr,int pebnr)1321 nand_scan_add_to_used(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1322 struct chfs_eb_hdr *ebhdr, int pebnr)
1323 {
1324 int err, lnr, ec;
1325 struct chfs_scan_leb *sleb, *old;
1326 uint64_t serial = le64toh(ebhdr->u.nand_hdr.serial);
1327
1328 lnr = CHFS_GET_LID(ebhdr->u.nor_hdr.lid);
1329 ec = le32toh(ebhdr->ec_hdr.erase_cnt);
1330
1331 sleb = kmem_alloc(sizeof(struct chfs_scan_leb), KM_SLEEP);
1332
1333 sleb->erase_cnt = ec;
1334 sleb->lnr = lnr;
1335 sleb->pebnr = pebnr;
1336 sleb->info = serial;
1337
1338 old = RB_INSERT(scan_leb_used_rbtree, &si->used, sleb);
1339 if (old) {
1340 kmem_free(sleb, sizeof(struct chfs_scan_leb));
1341 /* There is already an eraseblock in the used tree */
1342 /* If the new one is bad */
1343 if (serial < old->info)
1344 return scan_add_to_queue(si, pebnr, ec, &si->erase);
1345 else {
1346 err = scan_add_to_queue(si,
1347 old->pebnr, old->erase_cnt, &si->erase);
1348 if (err)
1349 return err;
1350
1351 old->erase_cnt = ec;
1352 old->lnr = lnr;
1353 old->pebnr = pebnr;
1354 old->info = serial;
1355 return 0;
1356 }
1357 }
1358 return 0;
1359 }
1360
1361 /**
1362 * nand_process eb -read the headers from NAND flash, check them and add to the
1363 * scanning information
1364 * @ebh: chfs eraseblock handler
1365 * @si: chfs scanning information
1366 * @pebnr: physical eraseblock number
1367 *
1368 * Returns zero in case of success, error code in case of fail.
1369 */
1370 int
nand_process_eb(struct chfs_ebh * ebh,struct chfs_scan_info * si,int pebnr,struct chfs_eb_hdr * ebhdr)1371 nand_process_eb(struct chfs_ebh *ebh, struct chfs_scan_info *si,
1372 int pebnr, struct chfs_eb_hdr *ebhdr)
1373 {
1374 int err, erase_cnt, leb_status;
1375 uint64_t max_serial;
1376 /* isbad() is defined on some ancient platforms, heh */
1377 bool is_bad;
1378
1379 /* Check block is bad */
1380 err = flash_block_isbad(ebh->flash_dev,
1381 pebnr * ebh->flash_if->erasesize, &is_bad);
1382 if (err) {
1383 chfs_err("checking block is bad failed\n");
1384 return err;
1385 }
1386 if (is_bad) {
1387 si->bad_peb_cnt++;
1388 return 0;
1389 }
1390
1391 err = ebh->ops->read_eb_hdr(ebh, pebnr, ebhdr);
1392 if (err)
1393 return err;
1394
1395 erase_cnt = le32toh(ebhdr->ec_hdr.erase_cnt);
1396 leb_status = ebh->ops->check_eb_hdr(ebh, ebhdr);
1397 if (EBHDR_LEB_BADMAGIC == leb_status ||
1398 EBHDR_LEB_BADCRC == leb_status) {
1399 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->corrupted);
1400 return err;
1401 }
1402 else if (EBHDR_LEB_FREE == leb_status) {
1403 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->free);
1404 goto count_mean;
1405 }
1406 else if (EBHDR_LEB_NO_HDR == leb_status) {
1407 err = scan_add_to_queue(si, pebnr, erase_cnt, &si->erased);
1408 return err;
1409 }
1410
1411 err = nand_scan_add_to_used(ebh, si, ebhdr, pebnr);
1412 if (err)
1413 return err;
1414
1415 max_serial = le64toh(ebhdr->u.nand_hdr.serial);
1416 if (max_serial > *ebh->max_serial) {
1417 *ebh->max_serial = max_serial;
1418 }
1419
1420 count_mean:
1421 si->sum_of_ec += erase_cnt;
1422 si->num_of_eb++;
1423
1424 return err;
1425 }
1426
1427 /**
1428 * chfs_scan - scans the media and returns informations about it
1429 * @ebh: chfs eraseblock handler
1430 *
1431 * This function scans through the media and returns information about it or if
1432 * it fails NULL will be returned.
1433 */
1434 struct chfs_scan_info *
chfs_scan(struct chfs_ebh * ebh)1435 chfs_scan(struct chfs_ebh *ebh)
1436 {
1437 struct chfs_scan_info *si;
1438 struct chfs_eb_hdr *ebhdr;
1439 int pebnr, err;
1440
1441 si = kmem_alloc(sizeof(*si), KM_SLEEP);
1442
1443 TAILQ_INIT(&si->corrupted);
1444 TAILQ_INIT(&si->free);
1445 TAILQ_INIT(&si->erase);
1446 TAILQ_INIT(&si->erased);
1447 RB_INIT(&si->used);
1448 si->bad_peb_cnt = 0;
1449 si->num_of_eb = 0;
1450 si->sum_of_ec = 0;
1451
1452 ebhdr = kmem_alloc(sizeof(*ebhdr), KM_SLEEP);
1453
1454 for (pebnr = 0; pebnr < ebh->peb_nr; pebnr++) {
1455 dbg_ebh("processing PEB %d\n", pebnr);
1456 err = ebh->ops->process_eb(ebh, si, pebnr, ebhdr);
1457 if (err < 0)
1458 goto out_ebhdr;
1459 }
1460 kmem_free(ebhdr, sizeof(*ebhdr));
1461 dbg_ebh("[CHFS_SCAN] scanning information collected\n");
1462 return si;
1463
1464 out_ebhdr:
1465 kmem_free(ebhdr, sizeof(*ebhdr));
1466 kmem_free(si, sizeof(*si));
1467 return NULL;
1468 }
1469
1470 /**
1471 * scan_info_destroy - frees all lists and trees in the scanning information
1472 * @si: the scanning information
1473 */
1474 void
scan_info_destroy(struct chfs_scan_info * si)1475 scan_info_destroy(struct chfs_scan_info *si)
1476 {
1477 EBH_QUEUE_DESTROY(&si->corrupted,
1478 struct chfs_scan_leb, u.queue);
1479
1480 EBH_QUEUE_DESTROY(&si->erase,
1481 struct chfs_scan_leb, u.queue);
1482
1483 EBH_QUEUE_DESTROY(&si->erased,
1484 struct chfs_scan_leb, u.queue);
1485
1486 EBH_QUEUE_DESTROY(&si->free,
1487 struct chfs_scan_leb, u.queue);
1488
1489 EBH_TREE_DESTROY(scan_leb_used_rbtree,
1490 &si->used, struct chfs_scan_leb);
1491
1492 kmem_free(si, sizeof(*si));
1493 dbg_ebh("[SCAN_INFO_DESTROY] scanning information destroyed\n");
1494 }
1495
1496 /**
1497 * scan_media - scan media
1498 *
1499 * @ebh - chfs eraseblock handler
1500 *
1501 * Returns zero in case of success, error code in case of fail.
1502 */
1503
1504 int
scan_media(struct chfs_ebh * ebh)1505 scan_media(struct chfs_ebh *ebh)
1506 {
1507 int err, i, avg_ec;
1508 struct chfs_scan_info *si;
1509 struct chfs_scan_leb *sleb;
1510
1511 si = chfs_scan(ebh);
1512 /*
1513 * Process the scan info, manage the eraseblock lists
1514 */
1515 mutex_init(&ebh->ltree_lock, MUTEX_DEFAULT, IPL_NONE);
1516 mutex_init(&ebh->erase_lock, MUTEX_DEFAULT, IPL_NONE);
1517 RB_INIT(&ebh->ltree);
1518 RB_INIT(&ebh->free);
1519 RB_INIT(&ebh->in_use);
1520 TAILQ_INIT(&ebh->to_erase);
1521 TAILQ_INIT(&ebh->fully_erased);
1522 mutex_init(&ebh->alc_mutex, MUTEX_DEFAULT, IPL_NONE);
1523
1524 ebh->peb_nr -= si->bad_peb_cnt;
1525
1526 /*
1527 * Create background thread for erasing
1528 */
1529 erase_thread_start(ebh);
1530
1531 ebh->lmap = kmem_alloc(ebh->peb_nr * sizeof(int), KM_SLEEP);
1532
1533 for (i = 0; i < ebh->peb_nr; i++) {
1534 ebh->lmap[i] = EBH_LEB_UNMAPPED;
1535 }
1536
1537 if (si->num_of_eb == 0) {
1538 /* The flash contains no data. */
1539 avg_ec = 0;
1540 }
1541 else {
1542 avg_ec = (int) (si->sum_of_ec / si->num_of_eb);
1543 }
1544 dbg_ebh("num_of_eb: %d\n", si->num_of_eb);
1545
1546 mutex_enter(&ebh->erase_lock);
1547
1548 RB_FOREACH(sleb, scan_leb_used_rbtree, &si->used) {
1549 ebh->lmap[sleb->lnr] = sleb->pebnr;
1550 err = add_peb_to_in_use(ebh, sleb->pebnr, sleb->erase_cnt);
1551 if (err)
1552 goto out_free;
1553 }
1554
1555 TAILQ_FOREACH(sleb, &si->erased, u.queue) {
1556 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1557 &ebh->fully_erased);
1558 if (err)
1559 goto out_free;
1560 }
1561
1562 TAILQ_FOREACH(sleb, &si->erase, u.queue) {
1563 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1564 &ebh->to_erase);
1565 if (err)
1566 goto out_free;
1567 }
1568
1569 TAILQ_FOREACH(sleb, &si->free, u.queue) {
1570 err = add_peb_to_free(ebh, sleb->pebnr, sleb->erase_cnt);
1571 if (err)
1572 goto out_free;
1573 }
1574
1575 TAILQ_FOREACH(sleb, &si->corrupted, u.queue) {
1576 err = add_peb_to_erase_queue(ebh, sleb->pebnr, avg_ec,
1577 &ebh->to_erase);
1578 if (err)
1579 goto out_free;
1580 }
1581 mutex_exit(&ebh->erase_lock);
1582 scan_info_destroy(si);
1583 return 0;
1584
1585 out_free:
1586 mutex_exit(&ebh->erase_lock);
1587 kmem_free(ebh->lmap, ebh->peb_nr * sizeof(int));
1588 scan_info_destroy(si);
1589 dbg_ebh("[SCAN_MEDIA] returning with error: %d\n", err);
1590 return err;
1591 }
1592
1593 /*****************************************************************************/
1594 /* End of Scan related operations */
1595 /*****************************************************************************/
1596
1597 /**
1598 * ebh_open - opens mtd device and init ereaseblock header
1599 * @ebh: eraseblock handler
1600 * @flash_nr: flash device number to use
1601 *
1602 * Returns zero in case of success, error code in case of fail.
1603 */
1604 int
ebh_open(struct chfs_ebh * ebh,dev_t dev)1605 ebh_open(struct chfs_ebh *ebh, dev_t dev)
1606 {
1607 int err;
1608
1609 ebh->flash_dev = flash_get_device(dev);
1610 if (!ebh->flash_dev) {
1611 aprint_error("ebh_open: cant get flash device\n");
1612 return ENODEV;
1613 }
1614
1615 ebh->flash_if = flash_get_interface(dev);
1616 if (!ebh->flash_if) {
1617 aprint_error("ebh_open: cant get flash interface\n");
1618 return ENODEV;
1619 }
1620
1621 ebh->flash_size = flash_get_size(dev);
1622 ebh->peb_nr = ebh->flash_size / ebh->flash_if->erasesize;
1623 // ebh->peb_nr = ebh->flash_if->size / ebh->flash_if->erasesize;
1624 /* Set up flash operations based on flash type */
1625 ebh->ops = kmem_alloc(sizeof(struct chfs_ebh_ops), KM_SLEEP);
1626
1627 switch (ebh->flash_if->type) {
1628 case FLASH_TYPE_NOR:
1629 ebh->eb_size = ebh->flash_if->erasesize -
1630 CHFS_EB_EC_HDR_SIZE - CHFS_EB_HDR_NOR_SIZE;
1631
1632 ebh->ops->read_eb_hdr = nor_read_eb_hdr;
1633 ebh->ops->write_eb_hdr = nor_write_eb_hdr;
1634 ebh->ops->check_eb_hdr = nor_check_eb_hdr;
1635 ebh->ops->mark_eb_hdr_dirty_flash =
1636 nor_mark_eb_hdr_dirty_flash;
1637 ebh->ops->invalidate_eb_hdr = nor_invalidate_eb_hdr;
1638 ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1639
1640 ebh->ops->process_eb = nor_process_eb;
1641
1642 ebh->ops->create_eb_hdr = nor_create_eb_hdr;
1643 ebh->ops->calc_data_offs = nor_calc_data_offs;
1644
1645 ebh->max_serial = NULL;
1646 break;
1647 case FLASH_TYPE_NAND:
1648 ebh->eb_size = ebh->flash_if->erasesize -
1649 2 * ebh->flash_if->page_size;
1650
1651 ebh->ops->read_eb_hdr = nand_read_eb_hdr;
1652 ebh->ops->write_eb_hdr = nand_write_eb_hdr;
1653 ebh->ops->check_eb_hdr = nand_check_eb_hdr;
1654 ebh->ops->mark_eb_hdr_free = mark_eb_hdr_free;
1655 ebh->ops->mark_eb_hdr_dirty_flash = NULL;
1656 ebh->ops->invalidate_eb_hdr = NULL;
1657
1658 ebh->ops->process_eb = nand_process_eb;
1659
1660 ebh->ops->create_eb_hdr = nand_create_eb_hdr;
1661 ebh->ops->calc_data_offs = nand_calc_data_offs;
1662
1663 ebh->max_serial = kmem_alloc(sizeof(uint64_t), KM_SLEEP);
1664
1665 *ebh->max_serial = 0;
1666 break;
1667 default:
1668 return 1;
1669 }
1670 printf("opening ebh: eb_size: %zu\n", ebh->eb_size);
1671 err = scan_media(ebh);
1672 if (err) {
1673 dbg_ebh("Scan failed.");
1674 kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1675 kmem_free(ebh, sizeof(struct chfs_ebh));
1676 return err;
1677 }
1678 return 0;
1679 }
1680
1681 /**
1682 * ebh_close - close ebh
1683 * @ebh: eraseblock handler
1684 * Returns zero in case of success, error code in case of fail.
1685 */
1686 int
ebh_close(struct chfs_ebh * ebh)1687 ebh_close(struct chfs_ebh *ebh)
1688 {
1689 erase_thread_stop(ebh);
1690
1691 EBH_TREE_DESTROY(peb_free_rbtree, &ebh->free, struct chfs_peb);
1692 EBH_TREE_DESTROY(peb_in_use_rbtree, &ebh->in_use, struct chfs_peb);
1693
1694 EBH_QUEUE_DESTROY(&ebh->fully_erased, struct chfs_peb, u.queue);
1695 EBH_QUEUE_DESTROY(&ebh->to_erase, struct chfs_peb, u.queue);
1696
1697 /* XXX HACK, see ebh.h */
1698 EBH_TREE_DESTROY_MUTEX(ltree_rbtree, &ebh->ltree,
1699 struct chfs_ltree_entry);
1700
1701 KASSERT(!mutex_owned(&ebh->ltree_lock));
1702 KASSERT(!mutex_owned(&ebh->alc_mutex));
1703 KASSERT(!mutex_owned(&ebh->erase_lock));
1704
1705 mutex_destroy(&ebh->ltree_lock);
1706 mutex_destroy(&ebh->alc_mutex);
1707 mutex_destroy(&ebh->erase_lock);
1708
1709 kmem_free(ebh->ops, sizeof(struct chfs_ebh_ops));
1710 kmem_free(ebh, sizeof(struct chfs_ebh));
1711
1712 return 0;
1713 }
1714
1715 /**
1716 * ebh_read_leb - read data from leb
1717 * @ebh: eraseblock handler
1718 * @lnr: logical eraseblock number
1719 * @buf: buffer to read to
1720 * @offset: offset from where to read
1721 * @len: bytes number to read
1722 *
1723 * Returns zero in case of success, error code in case of fail.
1724 */
1725 int
ebh_read_leb(struct chfs_ebh * ebh,int lnr,char * buf,uint32_t offset,size_t len,size_t * retlen)1726 ebh_read_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1727 size_t len, size_t *retlen)
1728 {
1729 int err, pebnr;
1730 off_t data_offset;
1731
1732 KASSERT(offset + len <= ebh->eb_size);
1733
1734 err = leb_read_lock(ebh, lnr);
1735 if (err)
1736 return err;
1737
1738 pebnr = ebh->lmap[lnr];
1739 /* If PEB is not mapped the buffer is filled with 0xFF */
1740 if (EBH_LEB_UNMAPPED == pebnr) {
1741 leb_read_unlock(ebh, lnr);
1742 memset(buf, 0xFF, len);
1743 return 0;
1744 }
1745
1746 /* Read data */
1747 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1748 err = flash_read(ebh->flash_dev, data_offset, len, retlen,
1749 (unsigned char *) buf);
1750 if (err)
1751 goto out_free;
1752
1753 KASSERT(len == *retlen);
1754
1755 out_free:
1756 leb_read_unlock(ebh, lnr);
1757 return err;
1758 }
1759
1760 /**
1761 * get_peb: get a free physical eraseblock
1762 * @ebh - chfs eraseblock handler
1763 *
1764 * This function gets a free eraseblock from the ebh->free RB-tree.
1765 * The fist entry will be returned and deleted from the tree.
1766 * The entries sorted by the erase counters, so the PEB with the smallest
1767 * erase counter will be added back.
1768 * If something goes bad a negative value will be returned.
1769 */
1770 int
get_peb(struct chfs_ebh * ebh)1771 get_peb(struct chfs_ebh *ebh)
1772 {
1773 int err, pebnr;
1774 struct chfs_peb *peb;
1775
1776 retry:
1777 mutex_enter(&ebh->erase_lock);
1778 //dbg_ebh("LOCK: ebh->erase_lock spin locked in get_peb()\n");
1779 if (RB_EMPTY(&ebh->free)) {
1780 /*There is no more free PEBs in the tree*/
1781 if (TAILQ_EMPTY(&ebh->to_erase) &&
1782 TAILQ_EMPTY(&ebh->fully_erased)) {
1783 mutex_exit(&ebh->erase_lock);
1784 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1785 return ENOSPC;
1786 }
1787 err = free_peb(ebh);
1788
1789 mutex_exit(&ebh->erase_lock);
1790 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1791
1792 if (err)
1793 return err;
1794 goto retry;
1795 }
1796 peb = RB_MIN(peb_free_rbtree, &ebh->free);
1797 pebnr = peb->pebnr;
1798 RB_REMOVE(peb_free_rbtree, &ebh->free, peb);
1799 err = add_peb_to_in_use(ebh, peb->pebnr, peb->erase_cnt);
1800 if (err)
1801 pebnr = err;
1802
1803 kmem_free(peb, sizeof(struct chfs_peb));
1804
1805 mutex_exit(&ebh->erase_lock);
1806 //dbg_ebh("UNLOCK: ebh->erase_lock spin unlocked in get_peb()\n");
1807
1808 return pebnr;
1809 }
1810
1811 /**
1812 * ebh_write_leb - write data to leb
1813 * @ebh: eraseblock handler
1814 * @lnr: logical eraseblock number
1815 * @buf: data to write
1816 * @offset: offset where to write
1817 * @len: bytes number to write
1818 *
1819 * Returns zero in case of success, error code in case of fail.
1820 */
1821 int
ebh_write_leb(struct chfs_ebh * ebh,int lnr,char * buf,uint32_t offset,size_t len,size_t * retlen)1822 ebh_write_leb(struct chfs_ebh *ebh, int lnr, char *buf, uint32_t offset,
1823 size_t len, size_t *retlen)
1824 {
1825 int err, pebnr, retries = 0;
1826 off_t data_offset;
1827 struct chfs_eb_hdr *ebhdr;
1828
1829 dbg("offset: %d | len: %zu | (offset+len): %zu "
1830 " | ebsize: %zu\n", offset, len, (offset+len), ebh->eb_size);
1831
1832 KASSERT(offset + len <= ebh->eb_size);
1833
1834 err = leb_write_lock(ebh, lnr);
1835 if (err)
1836 return err;
1837
1838 pebnr = ebh->lmap[lnr];
1839 /* If the LEB is mapped write out data */
1840 if (pebnr != EBH_LEB_UNMAPPED) {
1841 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1842 err = flash_write(ebh->flash_dev, data_offset, len, retlen,
1843 (unsigned char *) buf);
1844
1845 if (err) {
1846 chfs_err("error %d while writing %zu bytes to PEB "
1847 "%d:%ju, written %zu bytes\n",
1848 err, len, pebnr, (uintmax_t )offset, *retlen);
1849 } else {
1850 KASSERT(len == *retlen);
1851 }
1852
1853 leb_write_unlock(ebh, lnr);
1854 return err;
1855 }
1856
1857 /*
1858 * If the LEB is unmapped, get a free PEB and write the
1859 * eraseblock header first
1860 */
1861 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1862
1863 /* Setting up eraseblock header properties */
1864 ebh->ops->create_eb_hdr(ebhdr, lnr);
1865
1866 retry:
1867 /* Getting a physical eraseblock from the wear leveling system */
1868 pebnr = get_peb(ebh);
1869 if (pebnr < 0) {
1870 leb_write_unlock(ebh, lnr);
1871 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1872 return pebnr;
1873 }
1874
1875 /* Write the eraseblock header to the media */
1876 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1877 if (err) {
1878 chfs_warn(
1879 "error writing eraseblock header: LEB %d , PEB %d\n",
1880 lnr, pebnr);
1881 goto write_error;
1882 }
1883
1884 /* Write out data */
1885 if (len) {
1886 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, offset);
1887 err = flash_write(ebh->flash_dev,
1888 data_offset, len, retlen, (unsigned char *) buf);
1889 if (err) {
1890 chfs_err("error %d while writing %zu bytes to PEB "
1891 " %d:%ju, written %zu bytes\n",
1892 err, len, pebnr, (uintmax_t )offset, *retlen);
1893 goto write_error;
1894 }
1895 }
1896
1897 ebh->lmap[lnr] = pebnr;
1898 leb_write_unlock(ebh, lnr);
1899 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1900
1901 return 0;
1902
1903 write_error: err = release_peb(ebh, pebnr);
1904 // max retries (NOW: 2)
1905 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1906 leb_write_unlock(ebh, lnr);
1907 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1908 return err;
1909 }
1910 goto retry;
1911 }
1912
1913 /**
1914 * ebh_erase_leb - erase a leb
1915 * @ebh: eraseblock handler
1916 * @lnr: leb number
1917 *
1918 * Returns zero in case of success, error code in case of fail.
1919 */
1920 int
ebh_erase_leb(struct chfs_ebh * ebh,int lnr)1921 ebh_erase_leb(struct chfs_ebh *ebh, int lnr)
1922 {
1923 int err, pebnr;
1924
1925 leb_write_lock(ebh, lnr);
1926
1927 pebnr = ebh->lmap[lnr];
1928 if (pebnr < 0) {
1929 leb_write_unlock(ebh, lnr);
1930 return EBH_LEB_UNMAPPED;
1931 }
1932 err = release_peb(ebh, pebnr);
1933 if (err)
1934 goto out_unlock;
1935
1936 ebh->lmap[lnr] = EBH_LEB_UNMAPPED;
1937 cv_signal(&ebh->bg_erase.eth_wakeup);
1938 out_unlock:
1939 leb_write_unlock(ebh, lnr);
1940 return err;
1941 }
1942
1943 /**
1944 * ebh_map_leb - maps a PEB to LEB
1945 * @ebh: eraseblock handler
1946 * @lnr: leb number
1947 *
1948 * Returns zero on success, error code in case of fail
1949 */
1950 int
ebh_map_leb(struct chfs_ebh * ebh,int lnr)1951 ebh_map_leb(struct chfs_ebh *ebh, int lnr)
1952 {
1953 int err, pebnr, retries = 0;
1954 struct chfs_eb_hdr *ebhdr;
1955
1956 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
1957
1958 err = leb_write_lock(ebh, lnr);
1959 if (err) {
1960 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1961 return err;
1962 }
1963
1964 retry:
1965 pebnr = get_peb(ebh);
1966 if (pebnr < 0) {
1967 err = pebnr;
1968 goto out_unlock;
1969 }
1970
1971 ebh->ops->create_eb_hdr(ebhdr, lnr);
1972
1973 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
1974 if (err) {
1975 chfs_warn(
1976 "error writing eraseblock header: LEB %d , PEB %d\n",
1977 lnr, pebnr);
1978 goto write_error;
1979 }
1980
1981 ebh->lmap[lnr] = pebnr;
1982
1983 out_unlock:
1984 leb_write_unlock(ebh, lnr);
1985 return err;
1986
1987 write_error:
1988 err = release_peb(ebh, pebnr);
1989 // max retries (NOW: 2)
1990 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
1991 leb_write_unlock(ebh, lnr);
1992 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
1993 return err;
1994 }
1995 goto retry;
1996 }
1997
1998 /**
1999 * ebh_unmap_leb -
2000 * @ebh: eraseblock handler
2001 * @lnr: leb number
2002 *
2003 * Retruns zero on success, error code in case of fail.
2004 */
2005 int
ebh_unmap_leb(struct chfs_ebh * ebh,int lnr)2006 ebh_unmap_leb(struct chfs_ebh *ebh, int lnr)
2007 {
2008 int err;
2009
2010 if (ebh_is_mapped(ebh, lnr) < 0)
2011 /* If the eraseblock already unmapped */
2012 return 0;
2013
2014 err = ebh_erase_leb(ebh, lnr);
2015
2016 return err;
2017 }
2018
2019 /**
2020 * ebh_is_mapped - check if a PEB is mapped to @lnr
2021 * @ebh: eraseblock handler
2022 * @lnr: leb number
2023 *
2024 * Retruns 0 if the logical eraseblock is mapped, negative error code otherwise.
2025 */
2026 int
ebh_is_mapped(struct chfs_ebh * ebh,int lnr)2027 ebh_is_mapped(struct chfs_ebh *ebh, int lnr)
2028 {
2029 int err, result;
2030 err = leb_read_lock(ebh, lnr);
2031 if (err)
2032 return err;
2033
2034 result = ebh->lmap[lnr];
2035 leb_read_unlock(ebh, lnr);
2036
2037 return result;
2038 }
2039
2040 /**
2041 * ebh_change_leb - write the LEB to another PEB
2042 * @ebh: eraseblock handler
2043 * @lnr: leb number
2044 * @buf: data to write
2045 * @len: length of data
2046 * Returns zero in case of success, error code in case of fail.
2047 */
2048 int
ebh_change_leb(struct chfs_ebh * ebh,int lnr,char * buf,size_t len,size_t * retlen)2049 ebh_change_leb(struct chfs_ebh *ebh, int lnr, char *buf, size_t len,
2050 size_t *retlen)
2051 {
2052 int err, pebnr, pebnr_old, retries = 0;
2053 off_t data_offset;
2054
2055 struct chfs_peb *peb = NULL;
2056 struct chfs_eb_hdr *ebhdr;
2057
2058 if (ebh_is_mapped(ebh, lnr) < 0)
2059 return EBH_LEB_UNMAPPED;
2060
2061 if (len == 0) {
2062 err = ebh_unmap_leb(ebh, lnr);
2063 if (err)
2064 return err;
2065 return ebh_map_leb(ebh, lnr);
2066 }
2067
2068 ebhdr = kmem_alloc(sizeof(struct chfs_eb_hdr), KM_SLEEP);
2069
2070 pebnr_old = ebh->lmap[lnr];
2071
2072 mutex_enter(&ebh->alc_mutex);
2073 err = leb_write_lock(ebh, lnr);
2074 if (err)
2075 goto out_mutex;
2076
2077 if (ebh->ops->mark_eb_hdr_dirty_flash) {
2078 err = ebh->ops->mark_eb_hdr_dirty_flash(ebh, pebnr_old, lnr);
2079 if (err)
2080 goto out_unlock;
2081 }
2082
2083 /* Setting up eraseblock header properties */
2084 ebh->ops->create_eb_hdr(ebhdr, lnr);
2085
2086 retry:
2087 /* Getting a physical eraseblock from the wear leveling system */
2088 pebnr = get_peb(ebh);
2089 if (pebnr < 0) {
2090 leb_write_unlock(ebh, lnr);
2091 mutex_exit(&ebh->alc_mutex);
2092 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2093 return pebnr;
2094 }
2095
2096 err = ebh->ops->write_eb_hdr(ebh, pebnr, ebhdr);
2097 if (err) {
2098 chfs_warn(
2099 "error writing eraseblock header: LEB %d , PEB %d",
2100 lnr, pebnr);
2101 goto write_error;
2102 }
2103
2104 /* Write out data */
2105 data_offset = ebh->ops->calc_data_offs(ebh, pebnr, 0);
2106 err = flash_write(ebh->flash_dev, data_offset, len, retlen,
2107 (unsigned char *) buf);
2108 if (err) {
2109 chfs_err("error %d while writing %zu bytes to PEB %d:%ju,"
2110 " written %zu bytes",
2111 err, len, pebnr, (uintmax_t)data_offset, *retlen);
2112 goto write_error;
2113 }
2114
2115 ebh->lmap[lnr] = pebnr;
2116
2117 if (ebh->ops->invalidate_eb_hdr) {
2118 err = ebh->ops->invalidate_eb_hdr(ebh, pebnr_old);
2119 if (err)
2120 goto out_unlock;
2121 }
2122 peb = find_peb_in_use(ebh, pebnr_old);
2123 err = release_peb(ebh, peb->pebnr);
2124
2125 out_unlock:
2126 leb_write_unlock(ebh, lnr);
2127
2128 out_mutex:
2129 mutex_exit(&ebh->alc_mutex);
2130 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2131 kmem_free(peb, sizeof(struct chfs_peb));
2132 return err;
2133
2134 write_error:
2135 err = release_peb(ebh, pebnr);
2136 //max retries (NOW: 2)
2137 if (err || CHFS_MAX_GET_PEB_RETRIES < ++retries) {
2138 leb_write_unlock(ebh, lnr);
2139 mutex_exit(&ebh->alc_mutex);
2140 kmem_free(ebhdr, sizeof(struct chfs_eb_hdr));
2141 return err;
2142 }
2143 goto retry;
2144 }
2145
2146