1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2022, The littlefs authors.
5 * Copyright (c) 2017, Arm Limited. All rights reserved.
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8 #include "lfs.h"
9 #include "lfs_util.h"
10
11 #ifdef __ZEPHYR__
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(littlefs, CONFIG_FS_LOG_LEVEL);
14 #endif
15
16 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
17 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
18
19 enum {
20 LFS_OK_RELOCATED = 1,
21 LFS_OK_DROPPED = 2,
22 LFS_OK_ORPHANED = 3,
23 };
24
25 enum {
26 LFS_CMP_EQ = 0,
27 LFS_CMP_LT = 1,
28 LFS_CMP_GT = 2,
29 };
30
31
32 /// Caching block device operations ///
33
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)34 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
35 // do not zero, cheaper if cache is readonly or only going to be
36 // written with identical data (during relocates)
37 (void)lfs;
38 rcache->block = LFS_BLOCK_NULL;
39 }
40
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)41 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
42 // zero to avoid information leak
43 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
44 pcache->block = LFS_BLOCK_NULL;
45 }
46
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)47 static int lfs_bd_read(lfs_t *lfs,
48 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
49 lfs_block_t block, lfs_off_t off,
50 void *buffer, lfs_size_t size) {
51 uint8_t *data = buffer;
52 if (off+size > lfs->cfg->block_size
53 || (lfs->block_count && block >= lfs->block_count)) {
54 return LFS_ERR_CORRUPT;
55 }
56
57 while (size > 0) {
58 lfs_size_t diff = size;
59
60 if (pcache && block == pcache->block &&
61 off < pcache->off + pcache->size) {
62 if (off >= pcache->off) {
63 // is already in pcache?
64 diff = lfs_min(diff, pcache->size - (off-pcache->off));
65 memcpy(data, &pcache->buffer[off-pcache->off], diff);
66
67 data += diff;
68 off += diff;
69 size -= diff;
70 continue;
71 }
72
73 // pcache takes priority
74 diff = lfs_min(diff, pcache->off-off);
75 }
76
77 if (block == rcache->block &&
78 off < rcache->off + rcache->size) {
79 if (off >= rcache->off) {
80 // is already in rcache?
81 diff = lfs_min(diff, rcache->size - (off-rcache->off));
82 memcpy(data, &rcache->buffer[off-rcache->off], diff);
83
84 data += diff;
85 off += diff;
86 size -= diff;
87 continue;
88 }
89
90 // rcache takes priority
91 diff = lfs_min(diff, rcache->off-off);
92 }
93
94 if (size >= hint && off % lfs->cfg->read_size == 0 &&
95 size >= lfs->cfg->read_size) {
96 // bypass cache?
97 diff = lfs_aligndown(diff, lfs->cfg->read_size);
98 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
99 if (err) {
100 return err;
101 }
102
103 data += diff;
104 off += diff;
105 size -= diff;
106 continue;
107 }
108
109 // load to cache, first condition can no longer fail
110 LFS_ASSERT(!lfs->block_count || block < lfs->block_count);
111 rcache->block = block;
112 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
113 rcache->size = lfs_min(
114 lfs_min(
115 lfs_alignup(off+hint, lfs->cfg->read_size),
116 lfs->cfg->block_size)
117 - rcache->off,
118 lfs->cfg->cache_size);
119 int err = lfs->cfg->read(lfs->cfg, rcache->block,
120 rcache->off, rcache->buffer, rcache->size);
121 LFS_ASSERT(err <= 0);
122 if (err) {
123 return err;
124 }
125 }
126
127 return 0;
128 }
129
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)130 static int lfs_bd_cmp(lfs_t *lfs,
131 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
132 lfs_block_t block, lfs_off_t off,
133 const void *buffer, lfs_size_t size) {
134 const uint8_t *data = buffer;
135 lfs_size_t diff = 0;
136
137 for (lfs_off_t i = 0; i < size; i += diff) {
138 uint8_t dat[8];
139
140 diff = lfs_min(size-i, sizeof(dat));
141 int err = lfs_bd_read(lfs,
142 pcache, rcache, hint-i,
143 block, off+i, &dat, diff);
144 if (err) {
145 return err;
146 }
147
148 int res = memcmp(dat, data + i, diff);
149 if (res) {
150 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
151 }
152 }
153
154 return LFS_CMP_EQ;
155 }
156
lfs_bd_crc(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,lfs_size_t size,uint32_t * crc)157 static int lfs_bd_crc(lfs_t *lfs,
158 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
159 lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
160 lfs_size_t diff = 0;
161
162 for (lfs_off_t i = 0; i < size; i += diff) {
163 uint8_t dat[8];
164 diff = lfs_min(size-i, sizeof(dat));
165 int err = lfs_bd_read(lfs,
166 pcache, rcache, hint-i,
167 block, off+i, &dat, diff);
168 if (err) {
169 return err;
170 }
171
172 *crc = lfs_crc(*crc, &dat, diff);
173 }
174
175 return 0;
176 }
177
178 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)179 static int lfs_bd_flush(lfs_t *lfs,
180 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
181 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
182 LFS_ASSERT(pcache->block < lfs->block_count);
183 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
184 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
185 pcache->off, pcache->buffer, diff);
186 LFS_ASSERT(err <= 0);
187 if (err) {
188 return err;
189 }
190
191 if (validate) {
192 // check data on disk
193 lfs_cache_drop(lfs, rcache);
194 int res = lfs_bd_cmp(lfs,
195 NULL, rcache, diff,
196 pcache->block, pcache->off, pcache->buffer, diff);
197 if (res < 0) {
198 return res;
199 }
200
201 if (res != LFS_CMP_EQ) {
202 return LFS_ERR_CORRUPT;
203 }
204 }
205
206 lfs_cache_zero(lfs, pcache);
207 }
208
209 return 0;
210 }
211 #endif
212
213 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)214 static int lfs_bd_sync(lfs_t *lfs,
215 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
216 lfs_cache_drop(lfs, rcache);
217
218 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
219 if (err) {
220 return err;
221 }
222
223 err = lfs->cfg->sync(lfs->cfg);
224 LFS_ASSERT(err <= 0);
225 return err;
226 }
227 #endif
228
229 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)230 static int lfs_bd_prog(lfs_t *lfs,
231 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
232 lfs_block_t block, lfs_off_t off,
233 const void *buffer, lfs_size_t size) {
234 const uint8_t *data = buffer;
235 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count);
236 LFS_ASSERT(off + size <= lfs->cfg->block_size);
237
238 while (size > 0) {
239 if (block == pcache->block &&
240 off >= pcache->off &&
241 off < pcache->off + lfs->cfg->cache_size) {
242 // already fits in pcache?
243 lfs_size_t diff = lfs_min(size,
244 lfs->cfg->cache_size - (off-pcache->off));
245 memcpy(&pcache->buffer[off-pcache->off], data, diff);
246
247 data += diff;
248 off += diff;
249 size -= diff;
250
251 pcache->size = lfs_max(pcache->size, off - pcache->off);
252 if (pcache->size == lfs->cfg->cache_size) {
253 // eagerly flush out pcache if we fill up
254 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
255 if (err) {
256 return err;
257 }
258 }
259
260 continue;
261 }
262
263 // pcache must have been flushed, either by programming and
264 // entire block or manually flushing the pcache
265 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
266
267 // prepare pcache, first condition can no longer fail
268 pcache->block = block;
269 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
270 pcache->size = 0;
271 }
272
273 return 0;
274 }
275 #endif
276
277 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)278 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
279 LFS_ASSERT(block < lfs->block_count);
280 int err = lfs->cfg->erase(lfs->cfg, block);
281 LFS_ASSERT(err <= 0);
282 return err;
283 }
284 #endif
285
286
287 /// Small type-level utilities ///
288 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])289 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
290 lfs_block_t t = pair[0];
291 pair[0] = pair[1];
292 pair[1] = t;
293 }
294
lfs_pair_isnull(const lfs_block_t pair[2])295 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
296 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
297 }
298
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])299 static inline int lfs_pair_cmp(
300 const lfs_block_t paira[2],
301 const lfs_block_t pairb[2]) {
302 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
303 paira[0] == pairb[1] || paira[1] == pairb[0]);
304 }
305
lfs_pair_issync(const lfs_block_t paira[2],const lfs_block_t pairb[2])306 static inline bool lfs_pair_issync(
307 const lfs_block_t paira[2],
308 const lfs_block_t pairb[2]) {
309 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
310 (paira[0] == pairb[1] && paira[1] == pairb[0]);
311 }
312
lfs_pair_fromle32(lfs_block_t pair[2])313 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
314 pair[0] = lfs_fromle32(pair[0]);
315 pair[1] = lfs_fromle32(pair[1]);
316 }
317
318 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])319 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
320 pair[0] = lfs_tole32(pair[0]);
321 pair[1] = lfs_tole32(pair[1]);
322 }
323 #endif
324
325 // operations on 32-bit entry tags
326 typedef uint32_t lfs_tag_t;
327 typedef int32_t lfs_stag_t;
328
329 #define LFS_MKTAG(type, id, size) \
330 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
331
332 #define LFS_MKTAG_IF(cond, type, id, size) \
333 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
334
335 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
336 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
337
lfs_tag_isvalid(lfs_tag_t tag)338 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
339 return !(tag & 0x80000000);
340 }
341
lfs_tag_isdelete(lfs_tag_t tag)342 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
343 return ((int32_t)(tag << 22) >> 22) == -1;
344 }
345
lfs_tag_type1(lfs_tag_t tag)346 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
347 return (tag & 0x70000000) >> 20;
348 }
349
lfs_tag_type2(lfs_tag_t tag)350 static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
351 return (tag & 0x78000000) >> 20;
352 }
353
lfs_tag_type3(lfs_tag_t tag)354 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
355 return (tag & 0x7ff00000) >> 20;
356 }
357
lfs_tag_chunk(lfs_tag_t tag)358 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
359 return (tag & 0x0ff00000) >> 20;
360 }
361
lfs_tag_splice(lfs_tag_t tag)362 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
363 return (int8_t)lfs_tag_chunk(tag);
364 }
365
lfs_tag_id(lfs_tag_t tag)366 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
367 return (tag & 0x000ffc00) >> 10;
368 }
369
lfs_tag_size(lfs_tag_t tag)370 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
371 return tag & 0x000003ff;
372 }
373
lfs_tag_dsize(lfs_tag_t tag)374 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
375 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
376 }
377
378 // operations on attributes in attribute lists
379 struct lfs_mattr {
380 lfs_tag_t tag;
381 const void *buffer;
382 };
383
384 struct lfs_diskoff {
385 lfs_block_t block;
386 lfs_off_t off;
387 };
388
389 #define LFS_MKATTRS(...) \
390 (struct lfs_mattr[]){__VA_ARGS__}, \
391 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
392
393 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)394 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
395 for (int i = 0; i < 3; i++) {
396 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
397 }
398 }
399
lfs_gstate_iszero(const lfs_gstate_t * a)400 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
401 for (int i = 0; i < 3; i++) {
402 if (((uint32_t*)a)[i] != 0) {
403 return false;
404 }
405 }
406 return true;
407 }
408
409 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)410 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
411 return lfs_tag_size(a->tag);
412 }
413
lfs_gstate_getorphans(const lfs_gstate_t * a)414 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
415 return lfs_tag_size(a->tag) & 0x1ff;
416 }
417
lfs_gstate_hasmove(const lfs_gstate_t * a)418 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
419 return lfs_tag_type1(a->tag);
420 }
421 #endif
422
lfs_gstate_needssuperblock(const lfs_gstate_t * a)423 static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
424 return lfs_tag_size(a->tag) >> 9;
425 }
426
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)427 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
428 const lfs_block_t *pair) {
429 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
430 }
431
lfs_gstate_fromle32(lfs_gstate_t * a)432 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
433 a->tag = lfs_fromle32(a->tag);
434 a->pair[0] = lfs_fromle32(a->pair[0]);
435 a->pair[1] = lfs_fromle32(a->pair[1]);
436 }
437
438 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)439 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
440 a->tag = lfs_tole32(a->tag);
441 a->pair[0] = lfs_tole32(a->pair[0]);
442 a->pair[1] = lfs_tole32(a->pair[1]);
443 }
444 #endif
445
446 // operations on forward-CRCs used to track erased state
447 struct lfs_fcrc {
448 lfs_size_t size;
449 uint32_t crc;
450 };
451
lfs_fcrc_fromle32(struct lfs_fcrc * fcrc)452 static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
453 fcrc->size = lfs_fromle32(fcrc->size);
454 fcrc->crc = lfs_fromle32(fcrc->crc);
455 }
456
457 #ifndef LFS_READONLY
lfs_fcrc_tole32(struct lfs_fcrc * fcrc)458 static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
459 fcrc->size = lfs_tole32(fcrc->size);
460 fcrc->crc = lfs_tole32(fcrc->crc);
461 }
462 #endif
463
464 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)465 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
466 ctz->head = lfs_fromle32(ctz->head);
467 ctz->size = lfs_fromle32(ctz->size);
468 }
469
470 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)471 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
472 ctz->head = lfs_tole32(ctz->head);
473 ctz->size = lfs_tole32(ctz->size);
474 }
475 #endif
476
lfs_superblock_fromle32(lfs_superblock_t * superblock)477 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
478 superblock->version = lfs_fromle32(superblock->version);
479 superblock->block_size = lfs_fromle32(superblock->block_size);
480 superblock->block_count = lfs_fromle32(superblock->block_count);
481 superblock->name_max = lfs_fromle32(superblock->name_max);
482 superblock->file_max = lfs_fromle32(superblock->file_max);
483 superblock->attr_max = lfs_fromle32(superblock->attr_max);
484 }
485
486 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)487 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
488 superblock->version = lfs_tole32(superblock->version);
489 superblock->block_size = lfs_tole32(superblock->block_size);
490 superblock->block_count = lfs_tole32(superblock->block_count);
491 superblock->name_max = lfs_tole32(superblock->name_max);
492 superblock->file_max = lfs_tole32(superblock->file_max);
493 superblock->attr_max = lfs_tole32(superblock->attr_max);
494 }
495 #endif
496
497 #ifndef LFS_NO_ASSERT
498 #if __ASSERT_ON
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)499 static bool lfs_mlist_isopen(struct lfs_mlist *head,
500 struct lfs_mlist *node) {
501 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
502 if (*p == (struct lfs_mlist*)node) {
503 return true;
504 }
505 }
506
507 return false;
508 }
509 #endif
510 #endif
511
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)512 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
513 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
514 if (*p == mlist) {
515 *p = (*p)->next;
516 break;
517 }
518 }
519 }
520
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)521 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
522 mlist->next = lfs->mlist;
523 lfs->mlist = mlist;
524 }
525
526 // some other filesystem operations
lfs_fs_disk_version(lfs_t * lfs)527 static uint32_t lfs_fs_disk_version(lfs_t *lfs) {
528 (void)lfs;
529 #ifdef LFS_MULTIVERSION
530 if (lfs->cfg->disk_version) {
531 return lfs->cfg->disk_version;
532 } else
533 #endif
534 {
535 return LFS_DISK_VERSION;
536 }
537 }
538
lfs_fs_disk_version_major(lfs_t * lfs)539 static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) {
540 return 0xffff & (lfs_fs_disk_version(lfs) >> 16);
541
542 }
543
lfs_fs_disk_version_minor(lfs_t * lfs)544 static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) {
545 return 0xffff & (lfs_fs_disk_version(lfs) >> 0);
546 }
547
548
549 /// Internal operations predeclared here ///
550 #ifndef LFS_READONLY
551 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
552 const struct lfs_mattr *attrs, int attrcount);
553 static int lfs_dir_compact(lfs_t *lfs,
554 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
555 lfs_mdir_t *source, uint16_t begin, uint16_t end);
556 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
557 const void *buffer, lfs_size_t size);
558 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
559 const void *buffer, lfs_size_t size);
560 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
561 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
562 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
563
564 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
565 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
566 static void lfs_fs_prepmove(lfs_t *lfs,
567 uint16_t id, const lfs_block_t pair[2]);
568 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
569 lfs_mdir_t *pdir);
570 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
571 lfs_mdir_t *parent);
572 static int lfs_fs_forceconsistency(lfs_t *lfs);
573 #endif
574
575 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
576
577 #ifdef LFS_MIGRATE
578 static int lfs1_traverse(lfs_t *lfs,
579 int (*cb)(void*, lfs_block_t), void *data);
580 #endif
581
582 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
583
584 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
585 void *buffer, lfs_size_t size);
586 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
587 void *buffer, lfs_size_t size);
588 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
589 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
590
591 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
592 static int lfs_fs_rawtraverse(lfs_t *lfs,
593 int (*cb)(void *data, lfs_block_t block), void *data,
594 bool includeorphans);
595
596 static int lfs_deinit(lfs_t *lfs);
597 static int lfs_rawunmount(lfs_t *lfs);
598
599
600 /// Block allocator ///
601 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)602 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
603 lfs_t *lfs = (lfs_t*)p;
604 lfs_block_t off = ((block - lfs->free.off)
605 + lfs->block_count) % lfs->block_count;
606
607 if (off < lfs->free.size) {
608 lfs->free.buffer[off / 32] |= 1U << (off % 32);
609 }
610
611 return 0;
612 }
613 #endif
614
615 // indicate allocated blocks have been committed into the filesystem, this
616 // is to prevent blocks from being garbage collected in the middle of a
617 // commit operation
lfs_alloc_ack(lfs_t * lfs)618 static void lfs_alloc_ack(lfs_t *lfs) {
619 lfs->free.ack = lfs->block_count;
620 }
621
622 // drop the lookahead buffer, this is done during mounting and failed
623 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)624 static void lfs_alloc_drop(lfs_t *lfs) {
625 lfs->free.size = 0;
626 lfs->free.i = 0;
627 lfs_alloc_ack(lfs);
628 }
629
630 #ifndef LFS_READONLY
lfs_fs_rawgc(lfs_t * lfs)631 static int lfs_fs_rawgc(lfs_t *lfs) {
632 // Move free offset at the first unused block (lfs->free.i)
633 // lfs->free.i is equal lfs->free.size when all blocks are used
634 lfs->free.off = (lfs->free.off + lfs->free.i) % lfs->block_count;
635 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
636 lfs->free.i = 0;
637
638 // find mask of free blocks from tree
639 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
640 int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
641 if (err) {
642 lfs_alloc_drop(lfs);
643 return err;
644 }
645
646 return 0;
647 }
648 #endif
649
650 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)651 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
652 while (true) {
653 while (lfs->free.i != lfs->free.size) {
654 lfs_block_t off = lfs->free.i;
655 lfs->free.i += 1;
656 lfs->free.ack -= 1;
657
658 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
659 // found a free block
660 *block = (lfs->free.off + off) % lfs->block_count;
661
662 // eagerly find next off so an alloc ack can
663 // discredit old lookahead blocks
664 while (lfs->free.i != lfs->free.size &&
665 (lfs->free.buffer[lfs->free.i / 32]
666 & (1U << (lfs->free.i % 32)))) {
667 lfs->free.i += 1;
668 lfs->free.ack -= 1;
669 }
670
671 return 0;
672 }
673 }
674
675 // check if we have looked at all blocks since last ack
676 if (lfs->free.ack == 0) {
677 LFS_ERROR("No more free space %"PRIu32,
678 lfs->free.i + lfs->free.off);
679 return LFS_ERR_NOSPC;
680 }
681
682 int err = lfs_fs_rawgc(lfs);
683 if(err) {
684 return err;
685 }
686 }
687 }
688 #endif
689
690 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)691 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
692 lfs_tag_t gmask, lfs_tag_t gtag,
693 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
694 lfs_off_t off = dir->off;
695 lfs_tag_t ntag = dir->etag;
696 lfs_stag_t gdiff = 0;
697
698 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
699 lfs_tag_id(gmask) != 0 &&
700 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
701 // synthetic moves
702 gdiff -= LFS_MKTAG(0, 1, 0);
703 }
704
705 // iterate over dir block backwards (for faster lookups)
706 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
707 off -= lfs_tag_dsize(ntag);
708 lfs_tag_t tag = ntag;
709 int err = lfs_bd_read(lfs,
710 NULL, &lfs->rcache, sizeof(ntag),
711 dir->pair[0], off, &ntag, sizeof(ntag));
712 if (err) {
713 return err;
714 }
715
716 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
717
718 if (lfs_tag_id(gmask) != 0 &&
719 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
720 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
721 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
722 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
723 // found where we were created
724 return LFS_ERR_NOENT;
725 }
726
727 // move around splices
728 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
729 }
730
731 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
732 if (lfs_tag_isdelete(tag)) {
733 return LFS_ERR_NOENT;
734 }
735
736 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
737 err = lfs_bd_read(lfs,
738 NULL, &lfs->rcache, diff,
739 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
740 if (err) {
741 return err;
742 }
743
744 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
745
746 return tag + gdiff;
747 }
748 }
749
750 return LFS_ERR_NOENT;
751 }
752
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)753 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
754 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
755 return lfs_dir_getslice(lfs, dir,
756 gmask, gtag,
757 0, buffer, lfs_tag_size(gtag));
758 }
759
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)760 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
761 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
762 lfs_tag_t gmask, lfs_tag_t gtag,
763 lfs_off_t off, void *buffer, lfs_size_t size) {
764 uint8_t *data = buffer;
765 if (off+size > lfs->cfg->block_size) {
766 return LFS_ERR_CORRUPT;
767 }
768
769 while (size > 0) {
770 lfs_size_t diff = size;
771
772 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
773 off < pcache->off + pcache->size) {
774 if (off >= pcache->off) {
775 // is already in pcache?
776 diff = lfs_min(diff, pcache->size - (off-pcache->off));
777 memcpy(data, &pcache->buffer[off-pcache->off], diff);
778
779 data += diff;
780 off += diff;
781 size -= diff;
782 continue;
783 }
784
785 // pcache takes priority
786 diff = lfs_min(diff, pcache->off-off);
787 }
788
789 if (rcache->block == LFS_BLOCK_INLINE &&
790 off < rcache->off + rcache->size) {
791 if (off >= rcache->off) {
792 // is already in rcache?
793 diff = lfs_min(diff, rcache->size - (off-rcache->off));
794 memcpy(data, &rcache->buffer[off-rcache->off], diff);
795
796 data += diff;
797 off += diff;
798 size -= diff;
799 continue;
800 }
801
802 // rcache takes priority
803 diff = lfs_min(diff, rcache->off-off);
804 }
805
806 // load to cache, first condition can no longer fail
807 rcache->block = LFS_BLOCK_INLINE;
808 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
809 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
810 lfs->cfg->cache_size);
811 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
812 rcache->off, rcache->buffer, rcache->size);
813 if (err < 0) {
814 return err;
815 }
816 }
817
818 return 0;
819 }
820
821 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)822 static int lfs_dir_traverse_filter(void *p,
823 lfs_tag_t tag, const void *buffer) {
824 lfs_tag_t *filtertag = p;
825 (void)buffer;
826
827 // which mask depends on unique bit in tag structure
828 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
829 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
830 : LFS_MKTAG(0x700, 0x3ff, 0);
831
832 // check for redundancy
833 if ((mask & tag) == (mask & *filtertag) ||
834 lfs_tag_isdelete(*filtertag) ||
835 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
836 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
837 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
838 *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
839 return true;
840 }
841
842 // check if we need to adjust for created/deleted tags
843 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
844 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
845 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
846 }
847
848 return false;
849 }
850 #endif
851
852 #ifndef LFS_READONLY
853 // maximum recursive depth of lfs_dir_traverse, the deepest call:
854 //
855 // traverse with commit
856 // '-> traverse with move
857 // '-> traverse with filter
858 //
859 #define LFS_DIR_TRAVERSE_DEPTH 3
860
861 struct lfs_dir_traverse {
862 const lfs_mdir_t *dir;
863 lfs_off_t off;
864 lfs_tag_t ptag;
865 const struct lfs_mattr *attrs;
866 int attrcount;
867
868 lfs_tag_t tmask;
869 lfs_tag_t ttag;
870 uint16_t begin;
871 uint16_t end;
872 int16_t diff;
873
874 int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
875 void *data;
876
877 lfs_tag_t tag;
878 const void *buffer;
879 struct lfs_diskoff disk;
880 };
881
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)882 static int lfs_dir_traverse(lfs_t *lfs,
883 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
884 const struct lfs_mattr *attrs, int attrcount,
885 lfs_tag_t tmask, lfs_tag_t ttag,
886 uint16_t begin, uint16_t end, int16_t diff,
887 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
888 // This function in inherently recursive, but bounded. To allow tool-based
889 // analysis without unnecessary code-cost we use an explicit stack
890 struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
891 unsigned sp = 0;
892 int res;
893
894 // iterate over directory and attrs
895 lfs_tag_t tag;
896 const void *buffer;
897 struct lfs_diskoff disk = {0};
898 while (true) {
899 {
900 if (off+lfs_tag_dsize(ptag) < dir->off) {
901 off += lfs_tag_dsize(ptag);
902 int err = lfs_bd_read(lfs,
903 NULL, &lfs->rcache, sizeof(tag),
904 dir->pair[0], off, &tag, sizeof(tag));
905 if (err) {
906 return err;
907 }
908
909 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
910 disk.block = dir->pair[0];
911 disk.off = off+sizeof(lfs_tag_t);
912 buffer = &disk;
913 ptag = tag;
914 } else if (attrcount > 0) {
915 tag = attrs[0].tag;
916 buffer = attrs[0].buffer;
917 attrs += 1;
918 attrcount -= 1;
919 } else {
920 // finished traversal, pop from stack?
921 res = 0;
922 break;
923 }
924
925 // do we need to filter?
926 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
927 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
928 continue;
929 }
930
931 if (lfs_tag_id(tmask) != 0) {
932 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
933 // recurse, scan for duplicates, and update tag based on
934 // creates/deletes
935 stack[sp] = (struct lfs_dir_traverse){
936 .dir = dir,
937 .off = off,
938 .ptag = ptag,
939 .attrs = attrs,
940 .attrcount = attrcount,
941 .tmask = tmask,
942 .ttag = ttag,
943 .begin = begin,
944 .end = end,
945 .diff = diff,
946 .cb = cb,
947 .data = data,
948 .tag = tag,
949 .buffer = buffer,
950 .disk = disk,
951 };
952 sp += 1;
953
954 tmask = 0;
955 ttag = 0;
956 begin = 0;
957 end = 0;
958 diff = 0;
959 cb = lfs_dir_traverse_filter;
960 data = &stack[sp-1].tag;
961 continue;
962 }
963 }
964
965 popped:
966 // in filter range?
967 if (lfs_tag_id(tmask) != 0 &&
968 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
969 continue;
970 }
971
972 // handle special cases for mcu-side operations
973 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
974 // do nothing
975 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
976 // Without this condition, lfs_dir_traverse can exhibit an
977 // extremely expensive O(n^3) of nested loops when renaming.
978 // This happens because lfs_dir_traverse tries to filter tags by
979 // the tags in the source directory, triggering a second
980 // lfs_dir_traverse with its own filter operation.
981 //
982 // traverse with commit
983 // '-> traverse with filter
984 // '-> traverse with move
985 // '-> traverse with filter
986 //
987 // However we don't actually care about filtering the second set of
988 // tags, since duplicate tags have no effect when filtering.
989 //
990 // This check skips this unnecessary recursive filtering explicitly,
991 // reducing this runtime from O(n^3) to O(n^2).
992 if (cb == lfs_dir_traverse_filter) {
993 continue;
994 }
995
996 // recurse into move
997 stack[sp] = (struct lfs_dir_traverse){
998 .dir = dir,
999 .off = off,
1000 .ptag = ptag,
1001 .attrs = attrs,
1002 .attrcount = attrcount,
1003 .tmask = tmask,
1004 .ttag = ttag,
1005 .begin = begin,
1006 .end = end,
1007 .diff = diff,
1008 .cb = cb,
1009 .data = data,
1010 .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
1011 };
1012 sp += 1;
1013
1014 uint16_t fromid = lfs_tag_size(tag);
1015 uint16_t toid = lfs_tag_id(tag);
1016 dir = buffer;
1017 off = 0;
1018 ptag = 0xffffffff;
1019 attrs = NULL;
1020 attrcount = 0;
1021 tmask = LFS_MKTAG(0x600, 0x3ff, 0);
1022 ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
1023 begin = fromid;
1024 end = fromid+1;
1025 diff = toid-fromid+diff;
1026 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
1027 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
1028 const struct lfs_attr *a = buffer;
1029 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
1030 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
1031 if (res < 0) {
1032 return res;
1033 }
1034
1035 if (res) {
1036 break;
1037 }
1038 }
1039 } else {
1040 res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
1041 if (res < 0) {
1042 return res;
1043 }
1044
1045 if (res) {
1046 break;
1047 }
1048 }
1049 }
1050
1051 if (sp > 0) {
1052 // pop from the stack and return, fortunately all pops share
1053 // a destination
1054 dir = stack[sp-1].dir;
1055 off = stack[sp-1].off;
1056 ptag = stack[sp-1].ptag;
1057 attrs = stack[sp-1].attrs;
1058 attrcount = stack[sp-1].attrcount;
1059 tmask = stack[sp-1].tmask;
1060 ttag = stack[sp-1].ttag;
1061 begin = stack[sp-1].begin;
1062 end = stack[sp-1].end;
1063 diff = stack[sp-1].diff;
1064 cb = stack[sp-1].cb;
1065 data = stack[sp-1].data;
1066 tag = stack[sp-1].tag;
1067 buffer = stack[sp-1].buffer;
1068 disk = stack[sp-1].disk;
1069 sp -= 1;
1070 goto popped;
1071 } else {
1072 return res;
1073 }
1074 }
1075 #endif
1076
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)1077 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
1078 lfs_mdir_t *dir, const lfs_block_t pair[2],
1079 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
1080 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1081 // we can find tag very efficiently during a fetch, since we're already
1082 // scanning the entire directory
1083 lfs_stag_t besttag = -1;
1084
1085 // if either block address is invalid we return LFS_ERR_CORRUPT here,
1086 // otherwise later writes to the pair could fail
1087 if (lfs->block_count
1088 && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) {
1089 return LFS_ERR_CORRUPT;
1090 }
1091
1092 // find the block with the most recent revision
1093 uint32_t revs[2] = {0, 0};
1094 int r = 0;
1095 for (int i = 0; i < 2; i++) {
1096 int err = lfs_bd_read(lfs,
1097 NULL, &lfs->rcache, sizeof(revs[i]),
1098 pair[i], 0, &revs[i], sizeof(revs[i]));
1099 revs[i] = lfs_fromle32(revs[i]);
1100 if (err && err != LFS_ERR_CORRUPT) {
1101 return err;
1102 }
1103
1104 if (err != LFS_ERR_CORRUPT &&
1105 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1106 r = i;
1107 }
1108 }
1109
1110 dir->pair[0] = pair[(r+0)%2];
1111 dir->pair[1] = pair[(r+1)%2];
1112 dir->rev = revs[(r+0)%2];
1113 dir->off = 0; // nonzero = found some commits
1114
1115 // now scan tags to fetch the actual dir and find possible match
1116 for (int i = 0; i < 2; i++) {
1117 lfs_off_t off = 0;
1118 lfs_tag_t ptag = 0xffffffff;
1119
1120 uint16_t tempcount = 0;
1121 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1122 bool tempsplit = false;
1123 lfs_stag_t tempbesttag = besttag;
1124
1125 // assume not erased until proven otherwise
1126 bool maybeerased = false;
1127 bool hasfcrc = false;
1128 struct lfs_fcrc fcrc;
1129
1130 dir->rev = lfs_tole32(dir->rev);
1131 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1132 dir->rev = lfs_fromle32(dir->rev);
1133
1134 while (true) {
1135 // extract next tag
1136 lfs_tag_t tag;
1137 off += lfs_tag_dsize(ptag);
1138 int err = lfs_bd_read(lfs,
1139 NULL, &lfs->rcache, lfs->cfg->block_size,
1140 dir->pair[0], off, &tag, sizeof(tag));
1141 if (err) {
1142 if (err == LFS_ERR_CORRUPT) {
1143 // can't continue?
1144 break;
1145 }
1146 return err;
1147 }
1148
1149 crc = lfs_crc(crc, &tag, sizeof(tag));
1150 tag = lfs_frombe32(tag) ^ ptag;
1151
1152 // next commit not yet programmed?
1153 if (!lfs_tag_isvalid(tag)) {
1154 // we only might be erased if the last tag was a crc
1155 maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC);
1156 break;
1157 // out of range?
1158 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1159 break;
1160 }
1161
1162 ptag = tag;
1163
1164 if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
1165 // check the crc attr
1166 uint32_t dcrc;
1167 err = lfs_bd_read(lfs,
1168 NULL, &lfs->rcache, lfs->cfg->block_size,
1169 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1170 if (err) {
1171 if (err == LFS_ERR_CORRUPT) {
1172 break;
1173 }
1174 return err;
1175 }
1176 dcrc = lfs_fromle32(dcrc);
1177
1178 if (crc != dcrc) {
1179 break;
1180 }
1181
1182 // reset the next bit if we need to
1183 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1184
1185 // toss our crc into the filesystem seed for
1186 // pseudorandom numbers, note we use another crc here
1187 // as a collection function because it is sufficiently
1188 // random and convenient
1189 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1190
1191 // update with what's found so far
1192 besttag = tempbesttag;
1193 dir->off = off + lfs_tag_dsize(tag);
1194 dir->etag = ptag;
1195 dir->count = tempcount;
1196 dir->tail[0] = temptail[0];
1197 dir->tail[1] = temptail[1];
1198 dir->split = tempsplit;
1199
1200 // reset crc, hasfcrc
1201 crc = 0xffffffff;
1202 continue;
1203 }
1204
1205 // crc the entry first, hopefully leaving it in the cache
1206 err = lfs_bd_crc(lfs,
1207 NULL, &lfs->rcache, lfs->cfg->block_size,
1208 dir->pair[0], off+sizeof(tag),
1209 lfs_tag_dsize(tag)-sizeof(tag), &crc);
1210 if (err) {
1211 if (err == LFS_ERR_CORRUPT) {
1212 break;
1213 }
1214 return err;
1215 }
1216
1217 // directory modification tags?
1218 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1219 // increase count of files if necessary
1220 if (lfs_tag_id(tag) >= tempcount) {
1221 tempcount = lfs_tag_id(tag) + 1;
1222 }
1223 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1224 tempcount += lfs_tag_splice(tag);
1225
1226 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1227 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1228 tempbesttag |= 0x80000000;
1229 } else if (tempbesttag != -1 &&
1230 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1231 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1232 }
1233 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1234 tempsplit = (lfs_tag_chunk(tag) & 1);
1235
1236 err = lfs_bd_read(lfs,
1237 NULL, &lfs->rcache, lfs->cfg->block_size,
1238 dir->pair[0], off+sizeof(tag), &temptail, 8);
1239 if (err) {
1240 if (err == LFS_ERR_CORRUPT) {
1241 break;
1242 }
1243 return err;
1244 }
1245 lfs_pair_fromle32(temptail);
1246 } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
1247 err = lfs_bd_read(lfs,
1248 NULL, &lfs->rcache, lfs->cfg->block_size,
1249 dir->pair[0], off+sizeof(tag),
1250 &fcrc, sizeof(fcrc));
1251 if (err) {
1252 if (err == LFS_ERR_CORRUPT) {
1253 break;
1254 }
1255 }
1256
1257 lfs_fcrc_fromle32(&fcrc);
1258 hasfcrc = true;
1259 }
1260
1261 // found a match for our fetcher?
1262 if ((fmask & tag) == (fmask & ftag)) {
1263 int res = cb(data, tag, &(struct lfs_diskoff){
1264 dir->pair[0], off+sizeof(tag)});
1265 if (res < 0) {
1266 if (res == LFS_ERR_CORRUPT) {
1267 break;
1268 }
1269 return res;
1270 }
1271
1272 if (res == LFS_CMP_EQ) {
1273 // found a match
1274 tempbesttag = tag;
1275 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1276 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1277 // found an identical tag, but contents didn't match
1278 // this must mean that our besttag has been overwritten
1279 tempbesttag = -1;
1280 } else if (res == LFS_CMP_GT &&
1281 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1282 // found a greater match, keep track to keep things sorted
1283 tempbesttag = tag | 0x80000000;
1284 }
1285 }
1286 }
1287
1288 // found no valid commits?
1289 if (dir->off == 0) {
1290 // try the other block?
1291 lfs_pair_swap(dir->pair);
1292 dir->rev = revs[(r+1)%2];
1293 continue;
1294 }
1295
1296 // did we end on a valid commit? we may have an erased block
1297 dir->erased = false;
1298 if (maybeerased && dir->off % lfs->cfg->prog_size == 0) {
1299 #ifdef LFS_MULTIVERSION
1300 // note versions < lfs2.1 did not have fcrc tags, if
1301 // we're < lfs2.1 treat missing fcrc as erased data
1302 //
1303 // we don't strictly need to do this, but otherwise writing
1304 // to lfs2.0 disks becomes very inefficient
1305 if (lfs_fs_disk_version(lfs) < 0x00020001) {
1306 dir->erased = true;
1307
1308 } else
1309 #endif
1310 if (hasfcrc) {
1311 // check for an fcrc matching the next prog's erased state, if
1312 // this failed most likely a previous prog was interrupted, we
1313 // need a new erase
1314 uint32_t fcrc_ = 0xffffffff;
1315 int err = lfs_bd_crc(lfs,
1316 NULL, &lfs->rcache, lfs->cfg->block_size,
1317 dir->pair[0], dir->off, fcrc.size, &fcrc_);
1318 if (err && err != LFS_ERR_CORRUPT) {
1319 return err;
1320 }
1321
1322 // found beginning of erased part?
1323 dir->erased = (fcrc_ == fcrc.crc);
1324 }
1325 }
1326
1327 // synthetic move
1328 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1329 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1330 besttag |= 0x80000000;
1331 } else if (besttag != -1 &&
1332 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1333 besttag -= LFS_MKTAG(0, 1, 0);
1334 }
1335 }
1336
1337 // found tag? or found best id?
1338 if (id) {
1339 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1340 }
1341
1342 if (lfs_tag_isvalid(besttag)) {
1343 return besttag;
1344 } else if (lfs_tag_id(besttag) < dir->count) {
1345 return LFS_ERR_NOENT;
1346 } else {
1347 return 0;
1348 }
1349 }
1350
1351 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1352 dir->pair[0], dir->pair[1]);
1353 return LFS_ERR_CORRUPT;
1354 }
1355
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1356 static int lfs_dir_fetch(lfs_t *lfs,
1357 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1358 // note, mask=-1, tag=-1 can never match a tag since this
1359 // pattern has the invalid bit set
1360 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1361 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1362 }
1363
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1364 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1365 lfs_gstate_t *gstate) {
1366 lfs_gstate_t temp;
1367 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1368 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1369 if (res < 0 && res != LFS_ERR_NOENT) {
1370 return res;
1371 }
1372
1373 if (res != LFS_ERR_NOENT) {
1374 // xor together to find resulting gstate
1375 lfs_gstate_fromle32(&temp);
1376 lfs_gstate_xor(gstate, &temp);
1377 }
1378
1379 return 0;
1380 }
1381
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1382 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1383 uint16_t id, struct lfs_info *info) {
1384 if (id == 0x3ff) {
1385 // special case for root
1386 strcpy(info->name, "/");
1387 info->type = LFS_TYPE_DIR;
1388 return 0;
1389 }
1390
1391 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1392 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1393 if (tag < 0) {
1394 return (int)tag;
1395 }
1396
1397 info->type = lfs_tag_type3(tag);
1398
1399 struct lfs_ctz ctz;
1400 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1401 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1402 if (tag < 0) {
1403 return (int)tag;
1404 }
1405 lfs_ctz_fromle32(&ctz);
1406
1407 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1408 info->size = ctz.size;
1409 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1410 info->size = lfs_tag_size(tag);
1411 }
1412
1413 return 0;
1414 }
1415
1416 struct lfs_dir_find_match {
1417 lfs_t *lfs;
1418 const void *name;
1419 lfs_size_t size;
1420 };
1421
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1422 static int lfs_dir_find_match(void *data,
1423 lfs_tag_t tag, const void *buffer) {
1424 struct lfs_dir_find_match *name = data;
1425 lfs_t *lfs = name->lfs;
1426 const struct lfs_diskoff *disk = buffer;
1427
1428 // compare with disk
1429 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1430 int res = lfs_bd_cmp(lfs,
1431 NULL, &lfs->rcache, diff,
1432 disk->block, disk->off, name->name, diff);
1433 if (res != LFS_CMP_EQ) {
1434 return res;
1435 }
1436
1437 // only equal if our size is still the same
1438 if (name->size != lfs_tag_size(tag)) {
1439 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1440 }
1441
1442 // found a match!
1443 return LFS_CMP_EQ;
1444 }
1445
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1446 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1447 const char **path, uint16_t *id) {
1448 // we reduce path to a single name if we can find it
1449 const char *name = *path;
1450 if (id) {
1451 *id = 0x3ff;
1452 }
1453
1454 // default to root dir
1455 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1456 dir->tail[0] = lfs->root[0];
1457 dir->tail[1] = lfs->root[1];
1458
1459 while (true) {
1460 nextname:
1461 // skip slashes
1462 name += strspn(name, "/");
1463 lfs_size_t namelen = strcspn(name, "/");
1464
1465 // skip '.' and root '..'
1466 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1467 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1468 name += namelen;
1469 goto nextname;
1470 }
1471
1472 // skip if matched by '..' in name
1473 const char *suffix = name + namelen;
1474 lfs_size_t sufflen;
1475 int depth = 1;
1476 while (true) {
1477 suffix += strspn(suffix, "/");
1478 sufflen = strcspn(suffix, "/");
1479 if (sufflen == 0) {
1480 break;
1481 }
1482
1483 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1484 depth -= 1;
1485 if (depth == 0) {
1486 name = suffix + sufflen;
1487 goto nextname;
1488 }
1489 } else {
1490 depth += 1;
1491 }
1492
1493 suffix += sufflen;
1494 }
1495
1496 // found path
1497 if (name[0] == '\0') {
1498 return tag;
1499 }
1500
1501 // update what we've found so far
1502 *path = name;
1503
1504 // only continue if we hit a directory
1505 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1506 return LFS_ERR_NOTDIR;
1507 }
1508
1509 // grab the entry data
1510 if (lfs_tag_id(tag) != 0x3ff) {
1511 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1512 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1513 if (res < 0) {
1514 return res;
1515 }
1516 lfs_pair_fromle32(dir->tail);
1517 }
1518
1519 // find entry matching name
1520 while (true) {
1521 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1522 LFS_MKTAG(0x780, 0, 0),
1523 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1524 // are we last name?
1525 (strchr(name, '/') == NULL) ? id : NULL,
1526 lfs_dir_find_match, &(struct lfs_dir_find_match){
1527 lfs, name, namelen});
1528 if (tag < 0) {
1529 return tag;
1530 }
1531
1532 if (tag) {
1533 break;
1534 }
1535
1536 if (!dir->split) {
1537 return LFS_ERR_NOENT;
1538 }
1539 }
1540
1541 // to next name
1542 name += namelen;
1543 }
1544 }
1545
1546 // commit logic
1547 struct lfs_commit {
1548 lfs_block_t block;
1549 lfs_off_t off;
1550 lfs_tag_t ptag;
1551 uint32_t crc;
1552
1553 lfs_off_t begin;
1554 lfs_off_t end;
1555 };
1556
1557 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1558 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1559 const void *buffer, lfs_size_t size) {
1560 int err = lfs_bd_prog(lfs,
1561 &lfs->pcache, &lfs->rcache, false,
1562 commit->block, commit->off ,
1563 (const uint8_t*)buffer, size);
1564 if (err) {
1565 return err;
1566 }
1567
1568 commit->crc = lfs_crc(commit->crc, buffer, size);
1569 commit->off += size;
1570 return 0;
1571 }
1572 #endif
1573
1574 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1575 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1576 lfs_tag_t tag, const void *buffer) {
1577 // check if we fit
1578 lfs_size_t dsize = lfs_tag_dsize(tag);
1579 if (commit->off + dsize > commit->end) {
1580 return LFS_ERR_NOSPC;
1581 }
1582
1583 // write out tag
1584 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1585 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1586 if (err) {
1587 return err;
1588 }
1589
1590 if (!(tag & 0x80000000)) {
1591 // from memory
1592 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1593 if (err) {
1594 return err;
1595 }
1596 } else {
1597 // from disk
1598 const struct lfs_diskoff *disk = buffer;
1599 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1600 // rely on caching to make this efficient
1601 uint8_t dat;
1602 err = lfs_bd_read(lfs,
1603 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1604 disk->block, disk->off+i, &dat, 1);
1605 if (err) {
1606 return err;
1607 }
1608
1609 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1610 if (err) {
1611 return err;
1612 }
1613 }
1614 }
1615
1616 commit->ptag = tag & 0x7fffffff;
1617 return 0;
1618 }
1619 #endif
1620
1621 #ifndef LFS_READONLY
1622
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1623 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1624 // align to program units
1625 //
1626 // this gets a bit complex as we have two types of crcs:
1627 // - 5-word crc with fcrc to check following prog (middle of block)
1628 // - 2-word crc with no following prog (end of block)
1629 const lfs_off_t end = lfs_alignup(
1630 lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
1631 lfs->cfg->prog_size);
1632
1633 lfs_off_t off1 = 0;
1634 uint32_t crc1 = 0;
1635
1636 // create crc tags to fill up remainder of commit, note that
1637 // padding is not crced, which lets fetches skip padding but
1638 // makes committing a bit more complicated
1639 while (commit->off < end) {
1640 lfs_off_t noff = (
1641 lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
1642 + (commit->off+sizeof(lfs_tag_t)));
1643 // too large for crc tag? need padding commits
1644 if (noff < end) {
1645 noff = lfs_min(noff, end - 5*sizeof(uint32_t));
1646 }
1647
1648 // space for fcrc?
1649 uint8_t eperturb = (uint8_t)-1;
1650 if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
1651 // first read the leading byte, this always contains a bit
1652 // we can perturb to avoid writes that don't change the fcrc
1653 int err = lfs_bd_read(lfs,
1654 NULL, &lfs->rcache, lfs->cfg->prog_size,
1655 commit->block, noff, &eperturb, 1);
1656 if (err && err != LFS_ERR_CORRUPT) {
1657 return err;
1658 }
1659
1660 #ifdef LFS_MULTIVERSION
1661 // unfortunately fcrcs break mdir fetching < lfs2.1, so only write
1662 // these if we're a >= lfs2.1 filesystem
1663 if (lfs_fs_disk_version(lfs) <= 0x00020000) {
1664 // don't write fcrc
1665 } else
1666 #endif
1667 {
1668 // find the expected fcrc, don't bother avoiding a reread
1669 // of the eperturb, it should still be in our cache
1670 struct lfs_fcrc fcrc = {
1671 .size = lfs->cfg->prog_size,
1672 .crc = 0xffffffff
1673 };
1674 err = lfs_bd_crc(lfs,
1675 NULL, &lfs->rcache, lfs->cfg->prog_size,
1676 commit->block, noff, fcrc.size, &fcrc.crc);
1677 if (err && err != LFS_ERR_CORRUPT) {
1678 return err;
1679 }
1680
1681 lfs_fcrc_tole32(&fcrc);
1682 err = lfs_dir_commitattr(lfs, commit,
1683 LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
1684 &fcrc);
1685 if (err) {
1686 return err;
1687 }
1688 }
1689 }
1690
1691 // build commit crc
1692 struct {
1693 lfs_tag_t tag;
1694 uint32_t crc;
1695 } ccrc;
1696 lfs_tag_t ntag = LFS_MKTAG(
1697 LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
1698 noff - (commit->off+sizeof(lfs_tag_t)));
1699 ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
1700 commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
1701 ccrc.crc = lfs_tole32(commit->crc);
1702
1703 int err = lfs_bd_prog(lfs,
1704 &lfs->pcache, &lfs->rcache, false,
1705 commit->block, commit->off, &ccrc, sizeof(ccrc));
1706 if (err) {
1707 return err;
1708 }
1709
1710 // keep track of non-padding checksum to verify
1711 if (off1 == 0) {
1712 off1 = commit->off + sizeof(lfs_tag_t);
1713 crc1 = commit->crc;
1714 }
1715
1716 commit->off = noff;
1717 // perturb valid bit?
1718 commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24);
1719 // reset crc for next commit
1720 commit->crc = 0xffffffff;
1721
1722 // manually flush here since we don't prog the padding, this confuses
1723 // the caching layer
1724 if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
1725 // flush buffers
1726 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1727 if (err) {
1728 return err;
1729 }
1730 }
1731 }
1732
1733 // successful commit, check checksums to make sure
1734 //
1735 // note that we don't need to check padding commits, worst
1736 // case if they are corrupted we would have had to compact anyways
1737 lfs_off_t off = commit->begin;
1738 uint32_t crc = 0xffffffff;
1739 int err = lfs_bd_crc(lfs,
1740 NULL, &lfs->rcache, off1+sizeof(uint32_t),
1741 commit->block, off, off1-off, &crc);
1742 if (err) {
1743 return err;
1744 }
1745
1746 // check non-padding commits against known crc
1747 if (crc != crc1) {
1748 return LFS_ERR_CORRUPT;
1749 }
1750
1751 // make sure to check crc in case we happen to pick
1752 // up an unrelated crc (frozen block?)
1753 err = lfs_bd_crc(lfs,
1754 NULL, &lfs->rcache, sizeof(uint32_t),
1755 commit->block, off1, sizeof(uint32_t), &crc);
1756 if (err) {
1757 return err;
1758 }
1759
1760 if (crc != 0) {
1761 return LFS_ERR_CORRUPT;
1762 }
1763
1764 return 0;
1765 }
1766 #endif
1767
1768 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1769 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1770 // allocate pair of dir blocks (backwards, so we write block 1 first)
1771 for (int i = 0; i < 2; i++) {
1772 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1773 if (err) {
1774 return err;
1775 }
1776 }
1777
1778 // zero for reproducibility in case initial block is unreadable
1779 dir->rev = 0;
1780
1781 // rather than clobbering one of the blocks we just pretend
1782 // the revision may be valid
1783 int err = lfs_bd_read(lfs,
1784 NULL, &lfs->rcache, sizeof(dir->rev),
1785 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1786 dir->rev = lfs_fromle32(dir->rev);
1787 if (err && err != LFS_ERR_CORRUPT) {
1788 return err;
1789 }
1790
1791 // to make sure we don't immediately evict, align the new revision count
1792 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1793 // is tweaked this way
1794 if (lfs->cfg->block_cycles > 0) {
1795 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1796 }
1797
1798 // set defaults
1799 dir->off = sizeof(dir->rev);
1800 dir->etag = 0xffffffff;
1801 dir->count = 0;
1802 dir->tail[0] = LFS_BLOCK_NULL;
1803 dir->tail[1] = LFS_BLOCK_NULL;
1804 dir->erased = false;
1805 dir->split = false;
1806
1807 // don't write out yet, let caller take care of that
1808 return 0;
1809 }
1810 #endif
1811
1812 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1813 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1814 // steal state
1815 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1816 if (err) {
1817 return err;
1818 }
1819
1820 // steal tail
1821 lfs_pair_tole32(tail->tail);
1822 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1823 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1824 lfs_pair_fromle32(tail->tail);
1825 if (err) {
1826 return err;
1827 }
1828
1829 return 0;
1830 }
1831 #endif
1832
1833 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1834 static int lfs_dir_split(lfs_t *lfs,
1835 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1836 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1837 // create tail metadata pair
1838 lfs_mdir_t tail;
1839 int err = lfs_dir_alloc(lfs, &tail);
1840 if (err) {
1841 return err;
1842 }
1843
1844 tail.split = dir->split;
1845 tail.tail[0] = dir->tail[0];
1846 tail.tail[1] = dir->tail[1];
1847
1848 // note we don't care about LFS_OK_RELOCATED
1849 int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1850 if (res < 0) {
1851 return res;
1852 }
1853
1854 dir->tail[0] = tail.pair[0];
1855 dir->tail[1] = tail.pair[1];
1856 dir->split = true;
1857
1858 // update root if needed
1859 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1860 lfs->root[0] = tail.pair[0];
1861 lfs->root[1] = tail.pair[1];
1862 }
1863
1864 return 0;
1865 }
1866 #endif
1867
1868 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1869 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1870 lfs_size_t *size = p;
1871 (void)buffer;
1872
1873 *size += lfs_tag_dsize(tag);
1874 return 0;
1875 }
1876 #endif
1877
1878 #ifndef LFS_READONLY
1879 struct lfs_dir_commit_commit {
1880 lfs_t *lfs;
1881 struct lfs_commit *commit;
1882 };
1883 #endif
1884
1885 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1886 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1887 struct lfs_dir_commit_commit *commit = p;
1888 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1889 }
1890 #endif
1891
1892 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1893 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1894 // If our revision count == n * block_cycles, we should force a relocation,
1895 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1896 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1897 // 1. block_cycles = 1, which would prevent relocations from terminating
1898 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1899 // one metadata block in the pair, effectively making this useless
1900 return (lfs->cfg->block_cycles > 0
1901 && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1902 }
1903 #endif
1904
1905 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1906 static int lfs_dir_compact(lfs_t *lfs,
1907 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1908 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1909 // save some state in case block is bad
1910 bool relocated = false;
1911 bool tired = lfs_dir_needsrelocation(lfs, dir);
1912
1913 // increment revision count
1914 dir->rev += 1;
1915
1916 // do not proactively relocate blocks during migrations, this
1917 // can cause a number of failure states such: clobbering the
1918 // v1 superblock if we relocate root, and invalidating directory
1919 // pointers if we relocate the head of a directory. On top of
1920 // this, relocations increase the overall complexity of
1921 // lfs_migration, which is already a delicate operation.
1922 #ifdef LFS_MIGRATE
1923 if (lfs->lfs1) {
1924 tired = false;
1925 }
1926 #endif
1927
1928 if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1929 // we're writing too much, time to relocate
1930 goto relocate;
1931 }
1932
1933 // begin loop to commit compaction to blocks until a compact sticks
1934 while (true) {
1935 {
1936 // setup commit state
1937 struct lfs_commit commit = {
1938 .block = dir->pair[1],
1939 .off = 0,
1940 .ptag = 0xffffffff,
1941 .crc = 0xffffffff,
1942
1943 .begin = 0,
1944 .end = (lfs->cfg->metadata_max ?
1945 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1946 };
1947
1948 // erase block to write to
1949 int err = lfs_bd_erase(lfs, dir->pair[1]);
1950 if (err) {
1951 if (err == LFS_ERR_CORRUPT) {
1952 goto relocate;
1953 }
1954 return err;
1955 }
1956
1957 // write out header
1958 dir->rev = lfs_tole32(dir->rev);
1959 err = lfs_dir_commitprog(lfs, &commit,
1960 &dir->rev, sizeof(dir->rev));
1961 dir->rev = lfs_fromle32(dir->rev);
1962 if (err) {
1963 if (err == LFS_ERR_CORRUPT) {
1964 goto relocate;
1965 }
1966 return err;
1967 }
1968
1969 // traverse the directory, this time writing out all unique tags
1970 err = lfs_dir_traverse(lfs,
1971 source, 0, 0xffffffff, attrs, attrcount,
1972 LFS_MKTAG(0x400, 0x3ff, 0),
1973 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1974 begin, end, -begin,
1975 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1976 lfs, &commit});
1977 if (err) {
1978 if (err == LFS_ERR_CORRUPT) {
1979 goto relocate;
1980 }
1981 return err;
1982 }
1983
1984 // commit tail, which may be new after last size check
1985 if (!lfs_pair_isnull(dir->tail)) {
1986 lfs_pair_tole32(dir->tail);
1987 err = lfs_dir_commitattr(lfs, &commit,
1988 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1989 dir->tail);
1990 lfs_pair_fromle32(dir->tail);
1991 if (err) {
1992 if (err == LFS_ERR_CORRUPT) {
1993 goto relocate;
1994 }
1995 return err;
1996 }
1997 }
1998
1999 // bring over gstate?
2000 lfs_gstate_t delta = {0};
2001 if (!relocated) {
2002 lfs_gstate_xor(&delta, &lfs->gdisk);
2003 lfs_gstate_xor(&delta, &lfs->gstate);
2004 }
2005 lfs_gstate_xor(&delta, &lfs->gdelta);
2006 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2007
2008 err = lfs_dir_getgstate(lfs, dir, &delta);
2009 if (err) {
2010 return err;
2011 }
2012
2013 if (!lfs_gstate_iszero(&delta)) {
2014 lfs_gstate_tole32(&delta);
2015 err = lfs_dir_commitattr(lfs, &commit,
2016 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2017 sizeof(delta)), &delta);
2018 if (err) {
2019 if (err == LFS_ERR_CORRUPT) {
2020 goto relocate;
2021 }
2022 return err;
2023 }
2024 }
2025
2026 // complete commit with crc
2027 err = lfs_dir_commitcrc(lfs, &commit);
2028 if (err) {
2029 if (err == LFS_ERR_CORRUPT) {
2030 goto relocate;
2031 }
2032 return err;
2033 }
2034
2035 // successful compaction, swap dir pair to indicate most recent
2036 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2037 lfs_pair_swap(dir->pair);
2038 dir->count = end - begin;
2039 dir->off = commit.off;
2040 dir->etag = commit.ptag;
2041 // update gstate
2042 lfs->gdelta = (lfs_gstate_t){0};
2043 if (!relocated) {
2044 lfs->gdisk = lfs->gstate;
2045 }
2046 }
2047 break;
2048
2049 relocate:
2050 // commit was corrupted, drop caches and prepare to relocate block
2051 relocated = true;
2052 lfs_cache_drop(lfs, &lfs->pcache);
2053 if (!tired) {
2054 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
2055 }
2056
2057 // can't relocate superblock, filesystem is now frozen
2058 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2059 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
2060 dir->pair[1]);
2061 return LFS_ERR_NOSPC;
2062 }
2063
2064 // relocate half of pair
2065 int err = lfs_alloc(lfs, &dir->pair[1]);
2066 if (err && (err != LFS_ERR_NOSPC || !tired)) {
2067 return err;
2068 }
2069
2070 tired = false;
2071 continue;
2072 }
2073
2074 return relocated ? LFS_OK_RELOCATED : 0;
2075 }
2076 #endif
2077
2078 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)2079 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
2080 const struct lfs_mattr *attrs, int attrcount,
2081 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
2082 while (true) {
2083 // find size of first split, we do this by halving the split until
2084 // the metadata is guaranteed to fit
2085 //
2086 // Note that this isn't a true binary search, we never increase the
2087 // split size. This may result in poorly distributed metadata but isn't
2088 // worth the extra code size or performance hit to fix.
2089 lfs_size_t split = begin;
2090 while (end - split > 1) {
2091 lfs_size_t size = 0;
2092 int err = lfs_dir_traverse(lfs,
2093 source, 0, 0xffffffff, attrs, attrcount,
2094 LFS_MKTAG(0x400, 0x3ff, 0),
2095 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
2096 split, end, -split,
2097 lfs_dir_commit_size, &size);
2098 if (err) {
2099 return err;
2100 }
2101
2102 // space is complicated, we need room for:
2103 //
2104 // - tail: 4+2*4 = 12 bytes
2105 // - gstate: 4+3*4 = 16 bytes
2106 // - move delete: 4 = 4 bytes
2107 // - crc: 4+4 = 8 bytes
2108 // total = 40 bytes
2109 //
2110 // And we cap at half a block to avoid degenerate cases with
2111 // nearly-full metadata blocks.
2112 //
2113 if (end - split < 0xff
2114 && size <= lfs_min(
2115 lfs->cfg->block_size - 40,
2116 lfs_alignup(
2117 (lfs->cfg->metadata_max
2118 ? lfs->cfg->metadata_max
2119 : lfs->cfg->block_size)/2,
2120 lfs->cfg->prog_size))) {
2121 break;
2122 }
2123
2124 split = split + ((end - split) / 2);
2125 }
2126
2127 if (split == begin) {
2128 // no split needed
2129 break;
2130 }
2131
2132 // split into two metadata pairs and continue
2133 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2134 source, split, end);
2135 if (err && err != LFS_ERR_NOSPC) {
2136 return err;
2137 }
2138
2139 if (err) {
2140 // we can't allocate a new block, try to compact with degraded
2141 // performance
2142 LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
2143 dir->pair[0], dir->pair[1]);
2144 break;
2145 } else {
2146 end = split;
2147 }
2148 }
2149
2150 if (lfs_dir_needsrelocation(lfs, dir)
2151 && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2152 // oh no! we're writing too much to the superblock,
2153 // should we expand?
2154 lfs_ssize_t size = lfs_fs_rawsize(lfs);
2155 if (size < 0) {
2156 return size;
2157 }
2158
2159 // do we have extra space? littlefs can't reclaim this space
2160 // by itself, so expand cautiously
2161 if ((lfs_size_t)size < lfs->block_count/2) {
2162 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
2163 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2164 source, begin, end);
2165 if (err && err != LFS_ERR_NOSPC) {
2166 return err;
2167 }
2168
2169 if (err) {
2170 // welp, we tried, if we ran out of space there's not much
2171 // we can do, we'll error later if we've become frozen
2172 LFS_WARN("Unable to expand superblock");
2173 } else {
2174 end = begin;
2175 }
2176 }
2177 }
2178
2179 return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2180 }
2181 #endif
2182
2183 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2184 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2185 const lfs_block_t pair[2],
2186 const struct lfs_mattr *attrs, int attrcount,
2187 lfs_mdir_t *pdir) {
2188 int state = 0;
2189
2190 // calculate changes to the directory
2191 bool hasdelete = false;
2192 for (int i = 0; i < attrcount; i++) {
2193 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2194 dir->count += 1;
2195 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2196 LFS_ASSERT(dir->count > 0);
2197 dir->count -= 1;
2198 hasdelete = true;
2199 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2200 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2201 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2202 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2203 lfs_pair_fromle32(dir->tail);
2204 }
2205 }
2206
2207 // should we actually drop the directory block?
2208 if (hasdelete && dir->count == 0) {
2209 LFS_ASSERT(pdir);
2210 int err = lfs_fs_pred(lfs, dir->pair, pdir);
2211 if (err && err != LFS_ERR_NOENT) {
2212 return err;
2213 }
2214
2215 if (err != LFS_ERR_NOENT && pdir->split) {
2216 state = LFS_OK_DROPPED;
2217 goto fixmlist;
2218 }
2219 }
2220
2221 if (dir->erased) {
2222 // try to commit
2223 struct lfs_commit commit = {
2224 .block = dir->pair[0],
2225 .off = dir->off,
2226 .ptag = dir->etag,
2227 .crc = 0xffffffff,
2228
2229 .begin = dir->off,
2230 .end = (lfs->cfg->metadata_max ?
2231 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2232 };
2233
2234 // traverse attrs that need to be written out
2235 lfs_pair_tole32(dir->tail);
2236 int err = lfs_dir_traverse(lfs,
2237 dir, dir->off, dir->etag, attrs, attrcount,
2238 0, 0, 0, 0, 0,
2239 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2240 lfs, &commit});
2241 lfs_pair_fromle32(dir->tail);
2242 if (err) {
2243 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2244 goto compact;
2245 }
2246 return err;
2247 }
2248
2249 // commit any global diffs if we have any
2250 lfs_gstate_t delta = {0};
2251 lfs_gstate_xor(&delta, &lfs->gstate);
2252 lfs_gstate_xor(&delta, &lfs->gdisk);
2253 lfs_gstate_xor(&delta, &lfs->gdelta);
2254 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2255 if (!lfs_gstate_iszero(&delta)) {
2256 err = lfs_dir_getgstate(lfs, dir, &delta);
2257 if (err) {
2258 return err;
2259 }
2260
2261 lfs_gstate_tole32(&delta);
2262 err = lfs_dir_commitattr(lfs, &commit,
2263 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2264 sizeof(delta)), &delta);
2265 if (err) {
2266 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2267 goto compact;
2268 }
2269 return err;
2270 }
2271 }
2272
2273 // finalize commit with the crc
2274 err = lfs_dir_commitcrc(lfs, &commit);
2275 if (err) {
2276 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2277 goto compact;
2278 }
2279 return err;
2280 }
2281
2282 // successful commit, update dir
2283 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2284 dir->off = commit.off;
2285 dir->etag = commit.ptag;
2286 // and update gstate
2287 lfs->gdisk = lfs->gstate;
2288 lfs->gdelta = (lfs_gstate_t){0};
2289
2290 goto fixmlist;
2291 }
2292
2293 compact:
2294 // fall back to compaction
2295 lfs_cache_drop(lfs, &lfs->pcache);
2296
2297 state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2298 dir, 0, dir->count);
2299 if (state < 0) {
2300 return state;
2301 }
2302
2303 goto fixmlist;
2304
2305 fixmlist:;
2306 // this complicated bit of logic is for fixing up any active
2307 // metadata-pairs that we may have affected
2308 //
2309 // note we have to make two passes since the mdir passed to
2310 // lfs_dir_commit could also be in this list, and even then
2311 // we need to copy the pair so they don't get clobbered if we refetch
2312 // our mdir.
2313 lfs_block_t oldpair[2] = {pair[0], pair[1]};
2314 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2315 if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2316 d->m = *dir;
2317 if (d->m.pair != pair) {
2318 for (int i = 0; i < attrcount; i++) {
2319 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2320 d->id == lfs_tag_id(attrs[i].tag)) {
2321 d->m.pair[0] = LFS_BLOCK_NULL;
2322 d->m.pair[1] = LFS_BLOCK_NULL;
2323 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2324 d->id > lfs_tag_id(attrs[i].tag)) {
2325 d->id -= 1;
2326 if (d->type == LFS_TYPE_DIR) {
2327 ((lfs_dir_t*)d)->pos -= 1;
2328 }
2329 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2330 d->id >= lfs_tag_id(attrs[i].tag)) {
2331 d->id += 1;
2332 if (d->type == LFS_TYPE_DIR) {
2333 ((lfs_dir_t*)d)->pos += 1;
2334 }
2335 }
2336 }
2337 }
2338
2339 while (d->id >= d->m.count && d->m.split) {
2340 // we split and id is on tail now
2341 d->id -= d->m.count;
2342 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2343 if (err) {
2344 return err;
2345 }
2346 }
2347 }
2348 }
2349
2350 return state;
2351 }
2352 #endif
2353
2354 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2355 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2356 const struct lfs_mattr *attrs, int attrcount) {
2357 // check for any inline files that aren't RAM backed and
2358 // forcefully evict them, needed for filesystem consistency
2359 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2360 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2361 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2362 f->ctz.size > lfs->cfg->cache_size) {
2363 int err = lfs_file_outline(lfs, f);
2364 if (err) {
2365 return err;
2366 }
2367
2368 err = lfs_file_flush(lfs, f);
2369 if (err) {
2370 return err;
2371 }
2372 }
2373 }
2374
2375 lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2376 lfs_mdir_t ldir = *dir;
2377 lfs_mdir_t pdir;
2378 int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2379 attrs, attrcount, &pdir);
2380 if (state < 0) {
2381 return state;
2382 }
2383
2384 // update if we're not in mlist, note we may have already been
2385 // updated if we are in mlist
2386 if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2387 *dir = ldir;
2388 }
2389
2390 // commit was successful, but may require other changes in the
2391 // filesystem, these would normally be tail recursive, but we have
2392 // flattened them here avoid unbounded stack usage
2393
2394 // need to drop?
2395 if (state == LFS_OK_DROPPED) {
2396 // steal state
2397 int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2398 if (err) {
2399 return err;
2400 }
2401
2402 // steal tail, note that this can't create a recursive drop
2403 lpair[0] = pdir.pair[0];
2404 lpair[1] = pdir.pair[1];
2405 lfs_pair_tole32(dir->tail);
2406 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2407 {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2408 dir->tail}),
2409 NULL);
2410 lfs_pair_fromle32(dir->tail);
2411 if (state < 0) {
2412 return state;
2413 }
2414
2415 ldir = pdir;
2416 }
2417
2418 // need to relocate?
2419 bool orphans = false;
2420 while (state == LFS_OK_RELOCATED) {
2421 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2422 "-> {0x%"PRIx32", 0x%"PRIx32"}",
2423 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2424 state = 0;
2425
2426 // update internal root
2427 if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2428 lfs->root[0] = ldir.pair[0];
2429 lfs->root[1] = ldir.pair[1];
2430 }
2431
2432 // update internally tracked dirs
2433 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2434 if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2435 d->m.pair[0] = ldir.pair[0];
2436 d->m.pair[1] = ldir.pair[1];
2437 }
2438
2439 if (d->type == LFS_TYPE_DIR &&
2440 lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2441 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2442 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2443 }
2444 }
2445
2446 // find parent
2447 lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2448 if (tag < 0 && tag != LFS_ERR_NOENT) {
2449 return tag;
2450 }
2451
2452 bool hasparent = (tag != LFS_ERR_NOENT);
2453 if (tag != LFS_ERR_NOENT) {
2454 // note that if we have a parent, we must have a pred, so this will
2455 // always create an orphan
2456 int err = lfs_fs_preporphans(lfs, +1);
2457 if (err) {
2458 return err;
2459 }
2460
2461 // fix pending move in this pair? this looks like an optimization but
2462 // is in fact _required_ since relocating may outdate the move.
2463 uint16_t moveid = 0x3ff;
2464 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2465 moveid = lfs_tag_id(lfs->gstate.tag);
2466 LFS_DEBUG("Fixing move while relocating "
2467 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2468 pdir.pair[0], pdir.pair[1], moveid);
2469 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2470 if (moveid < lfs_tag_id(tag)) {
2471 tag -= LFS_MKTAG(0, 1, 0);
2472 }
2473 }
2474
2475 lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2476 lfs_pair_tole32(ldir.pair);
2477 state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2478 {LFS_MKTAG_IF(moveid != 0x3ff,
2479 LFS_TYPE_DELETE, moveid, 0), NULL},
2480 {tag, ldir.pair}),
2481 NULL);
2482 lfs_pair_fromle32(ldir.pair);
2483 if (state < 0) {
2484 return state;
2485 }
2486
2487 if (state == LFS_OK_RELOCATED) {
2488 lpair[0] = ppair[0];
2489 lpair[1] = ppair[1];
2490 ldir = pdir;
2491 orphans = true;
2492 continue;
2493 }
2494 }
2495
2496 // find pred
2497 int err = lfs_fs_pred(lfs, lpair, &pdir);
2498 if (err && err != LFS_ERR_NOENT) {
2499 return err;
2500 }
2501 LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2502
2503 // if we can't find dir, it must be new
2504 if (err != LFS_ERR_NOENT) {
2505 if (lfs_gstate_hasorphans(&lfs->gstate)) {
2506 // next step, clean up orphans
2507 err = lfs_fs_preporphans(lfs, -hasparent);
2508 if (err) {
2509 return err;
2510 }
2511 }
2512
2513 // fix pending move in this pair? this looks like an optimization
2514 // but is in fact _required_ since relocating may outdate the move.
2515 uint16_t moveid = 0x3ff;
2516 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2517 moveid = lfs_tag_id(lfs->gstate.tag);
2518 LFS_DEBUG("Fixing move while relocating "
2519 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2520 pdir.pair[0], pdir.pair[1], moveid);
2521 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2522 }
2523
2524 // replace bad pair, either we clean up desync, or no desync occured
2525 lpair[0] = pdir.pair[0];
2526 lpair[1] = pdir.pair[1];
2527 lfs_pair_tole32(ldir.pair);
2528 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2529 {LFS_MKTAG_IF(moveid != 0x3ff,
2530 LFS_TYPE_DELETE, moveid, 0), NULL},
2531 {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2532 ldir.pair}),
2533 NULL);
2534 lfs_pair_fromle32(ldir.pair);
2535 if (state < 0) {
2536 return state;
2537 }
2538
2539 ldir = pdir;
2540 }
2541 }
2542
2543 return orphans ? LFS_OK_ORPHANED : 0;
2544 }
2545 #endif
2546
2547 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2548 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2549 const struct lfs_mattr *attrs, int attrcount) {
2550 int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2551 if (orphans < 0) {
2552 return orphans;
2553 }
2554
2555 if (orphans) {
2556 // make sure we've removed all orphans, this is a noop if there
2557 // are none, but if we had nested blocks failures we may have
2558 // created some
2559 int err = lfs_fs_deorphan(lfs, false);
2560 if (err) {
2561 return err;
2562 }
2563 }
2564
2565 return 0;
2566 }
2567 #endif
2568
2569
2570 /// Top level directory operations ///
2571 #ifndef LFS_READONLY
lfs_rawmkdir(lfs_t * lfs,const char * path)2572 static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
2573 // deorphan if we haven't yet, needed at most once after poweron
2574 int err = lfs_fs_forceconsistency(lfs);
2575 if (err) {
2576 return err;
2577 }
2578
2579 struct lfs_mlist cwd;
2580 cwd.next = lfs->mlist;
2581 uint16_t id;
2582 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2583 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2584 return (err < 0) ? err : LFS_ERR_EXIST;
2585 }
2586
2587 // check that name fits
2588 lfs_size_t nlen = strlen(path);
2589 if (nlen > lfs->name_max) {
2590 return LFS_ERR_NAMETOOLONG;
2591 }
2592
2593 // build up new directory
2594 lfs_alloc_ack(lfs);
2595 lfs_mdir_t dir;
2596 err = lfs_dir_alloc(lfs, &dir);
2597 if (err) {
2598 return err;
2599 }
2600
2601 // find end of list
2602 lfs_mdir_t pred = cwd.m;
2603 while (pred.split) {
2604 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2605 if (err) {
2606 return err;
2607 }
2608 }
2609
2610 // setup dir
2611 lfs_pair_tole32(pred.tail);
2612 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2613 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2614 lfs_pair_fromle32(pred.tail);
2615 if (err) {
2616 return err;
2617 }
2618
2619 // current block not end of list?
2620 if (cwd.m.split) {
2621 // update tails, this creates a desync
2622 err = lfs_fs_preporphans(lfs, +1);
2623 if (err) {
2624 return err;
2625 }
2626
2627 // it's possible our predecessor has to be relocated, and if
2628 // our parent is our predecessor's predecessor, this could have
2629 // caused our parent to go out of date, fortunately we can hook
2630 // ourselves into littlefs to catch this
2631 cwd.type = 0;
2632 cwd.id = 0;
2633 lfs->mlist = &cwd;
2634
2635 lfs_pair_tole32(dir.pair);
2636 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2637 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2638 lfs_pair_fromle32(dir.pair);
2639 if (err) {
2640 lfs->mlist = cwd.next;
2641 return err;
2642 }
2643
2644 lfs->mlist = cwd.next;
2645 err = lfs_fs_preporphans(lfs, -1);
2646 if (err) {
2647 return err;
2648 }
2649 }
2650
2651 // now insert into our parent block
2652 lfs_pair_tole32(dir.pair);
2653 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2654 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2655 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2656 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2657 {LFS_MKTAG_IF(!cwd.m.split,
2658 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2659 lfs_pair_fromle32(dir.pair);
2660 if (err) {
2661 return err;
2662 }
2663
2664 return 0;
2665 }
2666 #endif
2667
lfs_dir_rawopen(lfs_t * lfs,lfs_dir_t * dir,const char * path)2668 static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2669 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2670 if (tag < 0) {
2671 return tag;
2672 }
2673
2674 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2675 return LFS_ERR_NOTDIR;
2676 }
2677
2678 lfs_block_t pair[2];
2679 if (lfs_tag_id(tag) == 0x3ff) {
2680 // handle root dir separately
2681 pair[0] = lfs->root[0];
2682 pair[1] = lfs->root[1];
2683 } else {
2684 // get dir pair from parent
2685 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2686 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2687 if (res < 0) {
2688 return res;
2689 }
2690 lfs_pair_fromle32(pair);
2691 }
2692
2693 // fetch first pair
2694 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2695 if (err) {
2696 return err;
2697 }
2698
2699 // setup entry
2700 dir->head[0] = dir->m.pair[0];
2701 dir->head[1] = dir->m.pair[1];
2702 dir->id = 0;
2703 dir->pos = 0;
2704
2705 // add to list of mdirs
2706 dir->type = LFS_TYPE_DIR;
2707 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2708
2709 return 0;
2710 }
2711
lfs_dir_rawclose(lfs_t * lfs,lfs_dir_t * dir)2712 static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
2713 // remove from list of mdirs
2714 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2715
2716 return 0;
2717 }
2718
lfs_dir_rawread(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2719 static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2720 memset(info, 0, sizeof(*info));
2721
2722 // special offset for '.' and '..'
2723 if (dir->pos == 0) {
2724 info->type = LFS_TYPE_DIR;
2725 strcpy(info->name, ".");
2726 dir->pos += 1;
2727 return true;
2728 } else if (dir->pos == 1) {
2729 info->type = LFS_TYPE_DIR;
2730 strcpy(info->name, "..");
2731 dir->pos += 1;
2732 return true;
2733 }
2734
2735 while (true) {
2736 if (dir->id == dir->m.count) {
2737 if (!dir->m.split) {
2738 return false;
2739 }
2740
2741 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2742 if (err) {
2743 return err;
2744 }
2745
2746 dir->id = 0;
2747 }
2748
2749 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2750 if (err && err != LFS_ERR_NOENT) {
2751 return err;
2752 }
2753
2754 dir->id += 1;
2755 if (err != LFS_ERR_NOENT) {
2756 break;
2757 }
2758 }
2759
2760 dir->pos += 1;
2761 return true;
2762 }
2763
lfs_dir_rawseek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2764 static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2765 // simply walk from head dir
2766 int err = lfs_dir_rawrewind(lfs, dir);
2767 if (err) {
2768 return err;
2769 }
2770
2771 // first two for ./..
2772 dir->pos = lfs_min(2, off);
2773 off -= dir->pos;
2774
2775 // skip superblock entry
2776 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2777
2778 while (off > 0) {
2779 if (dir->id == dir->m.count) {
2780 if (!dir->m.split) {
2781 return LFS_ERR_INVAL;
2782 }
2783
2784 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2785 if (err) {
2786 return err;
2787 }
2788
2789 dir->id = 0;
2790 }
2791
2792 int diff = lfs_min(dir->m.count - dir->id, off);
2793 dir->id += diff;
2794 dir->pos += diff;
2795 off -= diff;
2796 }
2797
2798 return 0;
2799 }
2800
lfs_dir_rawtell(lfs_t * lfs,lfs_dir_t * dir)2801 static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
2802 (void)lfs;
2803 return dir->pos;
2804 }
2805
lfs_dir_rawrewind(lfs_t * lfs,lfs_dir_t * dir)2806 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
2807 // reload the head dir
2808 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2809 if (err) {
2810 return err;
2811 }
2812
2813 dir->id = 0;
2814 dir->pos = 0;
2815 return 0;
2816 }
2817
2818
2819 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2820 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2821 lfs_off_t size = *off;
2822 lfs_off_t b = lfs->cfg->block_size - 2*4;
2823 lfs_off_t i = size / b;
2824 if (i == 0) {
2825 return 0;
2826 }
2827
2828 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2829 *off = size - b*i - 4*lfs_popc(i);
2830 return i;
2831 }
2832
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2833 static int lfs_ctz_find(lfs_t *lfs,
2834 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2835 lfs_block_t head, lfs_size_t size,
2836 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2837 if (size == 0) {
2838 *block = LFS_BLOCK_NULL;
2839 *off = 0;
2840 return 0;
2841 }
2842
2843 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2844 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2845
2846 while (current > target) {
2847 lfs_size_t skip = lfs_min(
2848 lfs_npw2(current-target+1) - 1,
2849 lfs_ctz(current));
2850
2851 int err = lfs_bd_read(lfs,
2852 pcache, rcache, sizeof(head),
2853 head, 4*skip, &head, sizeof(head));
2854 head = lfs_fromle32(head);
2855 if (err) {
2856 return err;
2857 }
2858
2859 current -= 1 << skip;
2860 }
2861
2862 *block = head;
2863 *off = pos;
2864 return 0;
2865 }
2866
2867 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2868 static int lfs_ctz_extend(lfs_t *lfs,
2869 lfs_cache_t *pcache, lfs_cache_t *rcache,
2870 lfs_block_t head, lfs_size_t size,
2871 lfs_block_t *block, lfs_off_t *off) {
2872 while (true) {
2873 // go ahead and grab a block
2874 lfs_block_t nblock;
2875 int err = lfs_alloc(lfs, &nblock);
2876 if (err) {
2877 return err;
2878 }
2879
2880 {
2881 err = lfs_bd_erase(lfs, nblock);
2882 if (err) {
2883 if (err == LFS_ERR_CORRUPT) {
2884 goto relocate;
2885 }
2886 return err;
2887 }
2888
2889 if (size == 0) {
2890 *block = nblock;
2891 *off = 0;
2892 return 0;
2893 }
2894
2895 lfs_size_t noff = size - 1;
2896 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2897 noff = noff + 1;
2898
2899 // just copy out the last block if it is incomplete
2900 if (noff != lfs->cfg->block_size) {
2901 for (lfs_off_t i = 0; i < noff; i++) {
2902 uint8_t data;
2903 err = lfs_bd_read(lfs,
2904 NULL, rcache, noff-i,
2905 head, i, &data, 1);
2906 if (err) {
2907 return err;
2908 }
2909
2910 err = lfs_bd_prog(lfs,
2911 pcache, rcache, true,
2912 nblock, i, &data, 1);
2913 if (err) {
2914 if (err == LFS_ERR_CORRUPT) {
2915 goto relocate;
2916 }
2917 return err;
2918 }
2919 }
2920
2921 *block = nblock;
2922 *off = noff;
2923 return 0;
2924 }
2925
2926 // append block
2927 index += 1;
2928 lfs_size_t skips = lfs_ctz(index) + 1;
2929 lfs_block_t nhead = head;
2930 for (lfs_off_t i = 0; i < skips; i++) {
2931 nhead = lfs_tole32(nhead);
2932 err = lfs_bd_prog(lfs, pcache, rcache, true,
2933 nblock, 4*i, &nhead, 4);
2934 nhead = lfs_fromle32(nhead);
2935 if (err) {
2936 if (err == LFS_ERR_CORRUPT) {
2937 goto relocate;
2938 }
2939 return err;
2940 }
2941
2942 if (i != skips-1) {
2943 err = lfs_bd_read(lfs,
2944 NULL, rcache, sizeof(nhead),
2945 nhead, 4*i, &nhead, sizeof(nhead));
2946 nhead = lfs_fromle32(nhead);
2947 if (err) {
2948 return err;
2949 }
2950 }
2951 }
2952
2953 *block = nblock;
2954 *off = 4*skips;
2955 return 0;
2956 }
2957
2958 relocate:
2959 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2960
2961 // just clear cache and try a new block
2962 lfs_cache_drop(lfs, pcache);
2963 }
2964 }
2965 #endif
2966
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2967 static int lfs_ctz_traverse(lfs_t *lfs,
2968 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2969 lfs_block_t head, lfs_size_t size,
2970 int (*cb)(void*, lfs_block_t), void *data) {
2971 if (size == 0) {
2972 return 0;
2973 }
2974
2975 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2976
2977 while (true) {
2978 int err = cb(data, head);
2979 if (err) {
2980 return err;
2981 }
2982
2983 if (index == 0) {
2984 return 0;
2985 }
2986
2987 lfs_block_t heads[2];
2988 int count = 2 - (index & 1);
2989 err = lfs_bd_read(lfs,
2990 pcache, rcache, count*sizeof(head),
2991 head, 0, &heads, count*sizeof(head));
2992 heads[0] = lfs_fromle32(heads[0]);
2993 heads[1] = lfs_fromle32(heads[1]);
2994 if (err) {
2995 return err;
2996 }
2997
2998 for (int i = 0; i < count-1; i++) {
2999 err = cb(data, heads[i]);
3000 if (err) {
3001 return err;
3002 }
3003 }
3004
3005 head = heads[count-1];
3006 index -= count;
3007 }
3008 }
3009
3010
3011 /// Top level file operations ///
lfs_file_rawopencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)3012 static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
3013 const char *path, int flags,
3014 const struct lfs_file_config *cfg) {
3015 #ifndef LFS_READONLY
3016 // deorphan if we haven't yet, needed at most once after poweron
3017 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3018 int err = lfs_fs_forceconsistency(lfs);
3019 if (err) {
3020 return err;
3021 }
3022 }
3023 #else
3024 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3025 #endif
3026
3027 // setup simple file details
3028 int err;
3029 file->cfg = cfg;
3030 file->flags = flags;
3031 file->pos = 0;
3032 file->off = 0;
3033 file->cache.buffer = NULL;
3034
3035 // allocate entry for file if it doesn't exist
3036 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
3037 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
3038 err = tag;
3039 goto cleanup;
3040 }
3041
3042 // get id, add to list of mdirs to catch update changes
3043 file->type = LFS_TYPE_REG;
3044 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
3045
3046 #ifdef LFS_READONLY
3047 if (tag == LFS_ERR_NOENT) {
3048 err = LFS_ERR_NOENT;
3049 goto cleanup;
3050 #else
3051 if (tag == LFS_ERR_NOENT) {
3052 if (!(flags & LFS_O_CREAT)) {
3053 err = LFS_ERR_NOENT;
3054 goto cleanup;
3055 }
3056
3057 // check that name fits
3058 lfs_size_t nlen = strlen(path);
3059 if (nlen > lfs->name_max) {
3060 err = LFS_ERR_NAMETOOLONG;
3061 goto cleanup;
3062 }
3063
3064 // get next slot and create entry to remember name
3065 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3066 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
3067 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
3068 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
3069
3070 // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
3071 // not fit in a 128 byte block.
3072 err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
3073 if (err) {
3074 goto cleanup;
3075 }
3076
3077 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
3078 } else if (flags & LFS_O_EXCL) {
3079 err = LFS_ERR_EXIST;
3080 goto cleanup;
3081 #endif
3082 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
3083 err = LFS_ERR_ISDIR;
3084 goto cleanup;
3085 #ifndef LFS_READONLY
3086 } else if (flags & LFS_O_TRUNC) {
3087 // truncate if requested
3088 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
3089 file->flags |= LFS_F_DIRTY;
3090 #endif
3091 } else {
3092 // try to load what's on disk, if it's inlined we'll fix it later
3093 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
3094 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
3095 if (tag < 0) {
3096 err = tag;
3097 goto cleanup;
3098 }
3099 lfs_ctz_fromle32(&file->ctz);
3100 }
3101
3102 // fetch attrs
3103 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
3104 // if opened for read / read-write operations
3105 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
3106 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3107 LFS_MKTAG(0x7ff, 0x3ff, 0),
3108 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
3109 file->id, file->cfg->attrs[i].size),
3110 file->cfg->attrs[i].buffer);
3111 if (res < 0 && res != LFS_ERR_NOENT) {
3112 err = res;
3113 goto cleanup;
3114 }
3115 }
3116
3117 #ifndef LFS_READONLY
3118 // if opened for write / read-write operations
3119 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3120 if (file->cfg->attrs[i].size > lfs->attr_max) {
3121 err = LFS_ERR_NOSPC;
3122 goto cleanup;
3123 }
3124
3125 file->flags |= LFS_F_DIRTY;
3126 }
3127 #endif
3128 }
3129
3130 // allocate buffer if needed
3131 if (file->cfg->buffer) {
3132 file->cache.buffer = file->cfg->buffer;
3133 } else {
3134 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
3135 if (!file->cache.buffer) {
3136 err = LFS_ERR_NOMEM;
3137 goto cleanup;
3138 }
3139 }
3140
3141 // zero to avoid information leak
3142 lfs_cache_zero(lfs, &file->cache);
3143
3144 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
3145 // load inline files
3146 file->ctz.head = LFS_BLOCK_INLINE;
3147 file->ctz.size = lfs_tag_size(tag);
3148 file->flags |= LFS_F_INLINE;
3149 file->cache.block = file->ctz.head;
3150 file->cache.off = 0;
3151 file->cache.size = lfs->cfg->cache_size;
3152
3153 // don't always read (may be new/trunc file)
3154 if (file->ctz.size > 0) {
3155 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3156 LFS_MKTAG(0x700, 0x3ff, 0),
3157 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
3158 lfs_min(file->cache.size, 0x3fe)),
3159 file->cache.buffer);
3160 if (res < 0) {
3161 err = res;
3162 goto cleanup;
3163 }
3164 }
3165 }
3166
3167 return 0;
3168
3169 cleanup:
3170 // clean up lingering resources
3171 #ifndef LFS_READONLY
3172 file->flags |= LFS_F_ERRED;
3173 #endif
3174 lfs_file_rawclose(lfs, file);
3175 return err;
3176 }
3177
3178 #ifndef LFS_NO_MALLOC
3179 static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
3180 const char *path, int flags) {
3181 static const struct lfs_file_config defaults = {0};
3182 int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
3183 return err;
3184 }
3185 #endif
3186
3187 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
3188 #ifndef LFS_READONLY
3189 int err = lfs_file_rawsync(lfs, file);
3190 #else
3191 int err = 0;
3192 #endif
3193
3194 // remove from list of mdirs
3195 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3196
3197 // clean up memory
3198 if (!file->cfg->buffer) {
3199 lfs_free(file->cache.buffer);
3200 }
3201
3202 return err;
3203 }
3204
3205
3206 #ifndef LFS_READONLY
3207 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3208 while (true) {
3209 // just relocate what exists into new block
3210 lfs_block_t nblock;
3211 int err = lfs_alloc(lfs, &nblock);
3212 if (err) {
3213 return err;
3214 }
3215
3216 err = lfs_bd_erase(lfs, nblock);
3217 if (err) {
3218 if (err == LFS_ERR_CORRUPT) {
3219 goto relocate;
3220 }
3221 return err;
3222 }
3223
3224 // either read from dirty cache or disk
3225 for (lfs_off_t i = 0; i < file->off; i++) {
3226 uint8_t data;
3227 if (file->flags & LFS_F_INLINE) {
3228 err = lfs_dir_getread(lfs, &file->m,
3229 // note we evict inline files before they can be dirty
3230 NULL, &file->cache, file->off-i,
3231 LFS_MKTAG(0xfff, 0x1ff, 0),
3232 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3233 i, &data, 1);
3234 if (err) {
3235 return err;
3236 }
3237 } else {
3238 err = lfs_bd_read(lfs,
3239 &file->cache, &lfs->rcache, file->off-i,
3240 file->block, i, &data, 1);
3241 if (err) {
3242 return err;
3243 }
3244 }
3245
3246 err = lfs_bd_prog(lfs,
3247 &lfs->pcache, &lfs->rcache, true,
3248 nblock, i, &data, 1);
3249 if (err) {
3250 if (err == LFS_ERR_CORRUPT) {
3251 goto relocate;
3252 }
3253 return err;
3254 }
3255 }
3256
3257 // copy over new state of file
3258 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3259 file->cache.block = lfs->pcache.block;
3260 file->cache.off = lfs->pcache.off;
3261 file->cache.size = lfs->pcache.size;
3262 lfs_cache_zero(lfs, &lfs->pcache);
3263
3264 file->block = nblock;
3265 file->flags |= LFS_F_WRITING;
3266 return 0;
3267
3268 relocate:
3269 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3270
3271 // just clear cache and try a new block
3272 lfs_cache_drop(lfs, &lfs->pcache);
3273 }
3274 }
3275 #endif
3276
3277 #ifndef LFS_READONLY
3278 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3279 file->off = file->pos;
3280 lfs_alloc_ack(lfs);
3281 int err = lfs_file_relocate(lfs, file);
3282 if (err) {
3283 return err;
3284 }
3285
3286 file->flags &= ~LFS_F_INLINE;
3287 return 0;
3288 }
3289 #endif
3290
3291 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3292 if (file->flags & LFS_F_READING) {
3293 if (!(file->flags & LFS_F_INLINE)) {
3294 lfs_cache_drop(lfs, &file->cache);
3295 }
3296 file->flags &= ~LFS_F_READING;
3297 }
3298
3299 #ifndef LFS_READONLY
3300 if (file->flags & LFS_F_WRITING) {
3301 lfs_off_t pos = file->pos;
3302
3303 if (!(file->flags & LFS_F_INLINE)) {
3304 // copy over anything after current branch
3305 lfs_file_t orig = {
3306 .ctz.head = file->ctz.head,
3307 .ctz.size = file->ctz.size,
3308 .flags = LFS_O_RDONLY,
3309 .pos = file->pos,
3310 .cache = lfs->rcache,
3311 };
3312 lfs_cache_drop(lfs, &lfs->rcache);
3313
3314 while (file->pos < file->ctz.size) {
3315 // copy over a byte at a time, leave it up to caching
3316 // to make this efficient
3317 uint8_t data;
3318 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3319 if (res < 0) {
3320 return res;
3321 }
3322
3323 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3324 if (res < 0) {
3325 return res;
3326 }
3327
3328 // keep our reference to the rcache in sync
3329 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3330 lfs_cache_drop(lfs, &orig.cache);
3331 lfs_cache_drop(lfs, &lfs->rcache);
3332 }
3333 }
3334
3335 // write out what we have
3336 while (true) {
3337 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3338 if (err) {
3339 if (err == LFS_ERR_CORRUPT) {
3340 goto relocate;
3341 }
3342 return err;
3343 }
3344
3345 break;
3346
3347 relocate:
3348 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3349 err = lfs_file_relocate(lfs, file);
3350 if (err) {
3351 return err;
3352 }
3353 }
3354 } else {
3355 file->pos = lfs_max(file->pos, file->ctz.size);
3356 }
3357
3358 // actual file updates
3359 file->ctz.head = file->block;
3360 file->ctz.size = file->pos;
3361 file->flags &= ~LFS_F_WRITING;
3362 file->flags |= LFS_F_DIRTY;
3363
3364 file->pos = pos;
3365 }
3366 #endif
3367
3368 return 0;
3369 }
3370
3371 #ifndef LFS_READONLY
3372 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
3373 if (file->flags & LFS_F_ERRED) {
3374 // it's not safe to do anything if our file errored
3375 return 0;
3376 }
3377
3378 int err = lfs_file_flush(lfs, file);
3379 if (err) {
3380 file->flags |= LFS_F_ERRED;
3381 return err;
3382 }
3383
3384
3385 if ((file->flags & LFS_F_DIRTY) &&
3386 !lfs_pair_isnull(file->m.pair)) {
3387 // update dir entry
3388 uint16_t type;
3389 const void *buffer;
3390 lfs_size_t size;
3391 struct lfs_ctz ctz;
3392 if (file->flags & LFS_F_INLINE) {
3393 // inline the whole file
3394 type = LFS_TYPE_INLINESTRUCT;
3395 buffer = file->cache.buffer;
3396 size = file->ctz.size;
3397 } else {
3398 // update the ctz reference
3399 type = LFS_TYPE_CTZSTRUCT;
3400 // copy ctz so alloc will work during a relocate
3401 ctz = file->ctz;
3402 lfs_ctz_tole32(&ctz);
3403 buffer = &ctz;
3404 size = sizeof(ctz);
3405 }
3406
3407 // commit file data and attributes
3408 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3409 {LFS_MKTAG(type, file->id, size), buffer},
3410 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3411 file->cfg->attr_count), file->cfg->attrs}));
3412 if (err) {
3413 file->flags |= LFS_F_ERRED;
3414 return err;
3415 }
3416
3417 file->flags &= ~LFS_F_DIRTY;
3418 }
3419
3420 return 0;
3421 }
3422 #endif
3423
3424 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3425 void *buffer, lfs_size_t size) {
3426 uint8_t *data = buffer;
3427 lfs_size_t nsize = size;
3428
3429 if (file->pos >= file->ctz.size) {
3430 // eof if past end
3431 return 0;
3432 }
3433
3434 size = lfs_min(size, file->ctz.size - file->pos);
3435 nsize = size;
3436
3437 while (nsize > 0) {
3438 // check if we need a new block
3439 if (!(file->flags & LFS_F_READING) ||
3440 file->off == lfs->cfg->block_size) {
3441 if (!(file->flags & LFS_F_INLINE)) {
3442 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3443 file->ctz.head, file->ctz.size,
3444 file->pos, &file->block, &file->off);
3445 if (err) {
3446 return err;
3447 }
3448 } else {
3449 file->block = LFS_BLOCK_INLINE;
3450 file->off = file->pos;
3451 }
3452
3453 file->flags |= LFS_F_READING;
3454 }
3455
3456 // read as much as we can in current block
3457 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3458 if (file->flags & LFS_F_INLINE) {
3459 int err = lfs_dir_getread(lfs, &file->m,
3460 NULL, &file->cache, lfs->cfg->block_size,
3461 LFS_MKTAG(0xfff, 0x1ff, 0),
3462 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3463 file->off, data, diff);
3464 if (err) {
3465 return err;
3466 }
3467 } else {
3468 int err = lfs_bd_read(lfs,
3469 NULL, &file->cache, lfs->cfg->block_size,
3470 file->block, file->off, data, diff);
3471 if (err) {
3472 return err;
3473 }
3474 }
3475
3476 file->pos += diff;
3477 file->off += diff;
3478 data += diff;
3479 nsize -= diff;
3480 }
3481
3482 return size;
3483 }
3484
3485 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
3486 void *buffer, lfs_size_t size) {
3487 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3488
3489 #ifndef LFS_READONLY
3490 if (file->flags & LFS_F_WRITING) {
3491 // flush out any writes
3492 int err = lfs_file_flush(lfs, file);
3493 if (err) {
3494 return err;
3495 }
3496 }
3497 #endif
3498
3499 return lfs_file_flushedread(lfs, file, buffer, size);
3500 }
3501
3502
3503 #ifndef LFS_READONLY
3504 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3505 const void *buffer, lfs_size_t size) {
3506 const uint8_t *data = buffer;
3507 lfs_size_t nsize = size;
3508
3509 if ((file->flags & LFS_F_INLINE) &&
3510 lfs_max(file->pos+nsize, file->ctz.size) >
3511 lfs_min(0x3fe, lfs_min(
3512 lfs->cfg->cache_size,
3513 (lfs->cfg->metadata_max ?
3514 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3515 // inline file doesn't fit anymore
3516 int err = lfs_file_outline(lfs, file);
3517 if (err) {
3518 file->flags |= LFS_F_ERRED;
3519 return err;
3520 }
3521 }
3522
3523 while (nsize > 0) {
3524 // check if we need a new block
3525 if (!(file->flags & LFS_F_WRITING) ||
3526 file->off == lfs->cfg->block_size) {
3527 if (!(file->flags & LFS_F_INLINE)) {
3528 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3529 // find out which block we're extending from
3530 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3531 file->ctz.head, file->ctz.size,
3532 file->pos-1, &file->block, &(lfs_off_t){0});
3533 if (err) {
3534 file->flags |= LFS_F_ERRED;
3535 return err;
3536 }
3537
3538 // mark cache as dirty since we may have read data into it
3539 lfs_cache_zero(lfs, &file->cache);
3540 }
3541
3542 // extend file with new blocks
3543 lfs_alloc_ack(lfs);
3544 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3545 file->block, file->pos,
3546 &file->block, &file->off);
3547 if (err) {
3548 file->flags |= LFS_F_ERRED;
3549 return err;
3550 }
3551 } else {
3552 file->block = LFS_BLOCK_INLINE;
3553 file->off = file->pos;
3554 }
3555
3556 file->flags |= LFS_F_WRITING;
3557 }
3558
3559 // program as much as we can in current block
3560 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3561 while (true) {
3562 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3563 file->block, file->off, data, diff);
3564 if (err) {
3565 if (err == LFS_ERR_CORRUPT) {
3566 goto relocate;
3567 }
3568 file->flags |= LFS_F_ERRED;
3569 return err;
3570 }
3571
3572 break;
3573 relocate:
3574 err = lfs_file_relocate(lfs, file);
3575 if (err) {
3576 file->flags |= LFS_F_ERRED;
3577 return err;
3578 }
3579 }
3580
3581 file->pos += diff;
3582 file->off += diff;
3583 data += diff;
3584 nsize -= diff;
3585
3586 lfs_alloc_ack(lfs);
3587 }
3588
3589 return size;
3590 }
3591
3592 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
3593 const void *buffer, lfs_size_t size) {
3594 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3595
3596 if (file->flags & LFS_F_READING) {
3597 // drop any reads
3598 int err = lfs_file_flush(lfs, file);
3599 if (err) {
3600 return err;
3601 }
3602 }
3603
3604 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3605 file->pos = file->ctz.size;
3606 }
3607
3608 if (file->pos + size > lfs->file_max) {
3609 // Larger than file limit?
3610 return LFS_ERR_FBIG;
3611 }
3612
3613 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3614 // fill with zeros
3615 lfs_off_t pos = file->pos;
3616 file->pos = file->ctz.size;
3617
3618 while (file->pos < pos) {
3619 lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3620 if (res < 0) {
3621 return res;
3622 }
3623 }
3624 }
3625
3626 lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3627 if (nsize < 0) {
3628 return nsize;
3629 }
3630
3631 file->flags &= ~LFS_F_ERRED;
3632 return nsize;
3633 }
3634 #endif
3635
3636 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
3637 lfs_soff_t off, int whence) {
3638 // find new pos
3639 lfs_off_t npos = file->pos;
3640 if (whence == LFS_SEEK_SET) {
3641 npos = off;
3642 } else if (whence == LFS_SEEK_CUR) {
3643 if ((lfs_soff_t)file->pos + off < 0) {
3644 return LFS_ERR_INVAL;
3645 } else {
3646 npos = file->pos + off;
3647 }
3648 } else if (whence == LFS_SEEK_END) {
3649 lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
3650 if (res < 0) {
3651 return LFS_ERR_INVAL;
3652 } else {
3653 npos = res;
3654 }
3655 }
3656
3657 if (npos > lfs->file_max) {
3658 // file position out of range
3659 return LFS_ERR_INVAL;
3660 }
3661
3662 if (file->pos == npos) {
3663 // noop - position has not changed
3664 return npos;
3665 }
3666
3667 // if we're only reading and our new offset is still in the file's cache
3668 // we can avoid flushing and needing to reread the data
3669 if (
3670 #ifndef LFS_READONLY
3671 !(file->flags & LFS_F_WRITING)
3672 #else
3673 true
3674 #endif
3675 ) {
3676 int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3677 lfs_off_t noff = npos;
3678 int nindex = lfs_ctz_index(lfs, &noff);
3679 if (oindex == nindex
3680 && noff >= file->cache.off
3681 && noff < file->cache.off + file->cache.size) {
3682 file->pos = npos;
3683 file->off = noff;
3684 return npos;
3685 }
3686 }
3687
3688 // write out everything beforehand, may be noop if rdonly
3689 int err = lfs_file_flush(lfs, file);
3690 if (err) {
3691 return err;
3692 }
3693
3694 // update pos
3695 file->pos = npos;
3696 return npos;
3697 }
3698
3699 #ifndef LFS_READONLY
3700 static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3701 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3702
3703 if (size > LFS_FILE_MAX) {
3704 return LFS_ERR_INVAL;
3705 }
3706
3707 lfs_off_t pos = file->pos;
3708 lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
3709 if (size < oldsize) {
3710 // revert to inline file?
3711 if (size <= lfs_min(0x3fe, lfs_min(
3712 lfs->cfg->cache_size,
3713 (lfs->cfg->metadata_max ?
3714 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3715 // flush+seek to head
3716 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3717 if (res < 0) {
3718 return (int)res;
3719 }
3720
3721 // read our data into rcache temporarily
3722 lfs_cache_drop(lfs, &lfs->rcache);
3723 res = lfs_file_flushedread(lfs, file,
3724 lfs->rcache.buffer, size);
3725 if (res < 0) {
3726 return (int)res;
3727 }
3728
3729 file->ctz.head = LFS_BLOCK_INLINE;
3730 file->ctz.size = size;
3731 file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
3732 file->cache.block = file->ctz.head;
3733 file->cache.off = 0;
3734 file->cache.size = lfs->cfg->cache_size;
3735 memcpy(file->cache.buffer, lfs->rcache.buffer, size);
3736
3737 } else {
3738 // need to flush since directly changing metadata
3739 int err = lfs_file_flush(lfs, file);
3740 if (err) {
3741 return err;
3742 }
3743
3744 // lookup new head in ctz skip list
3745 err = lfs_ctz_find(lfs, NULL, &file->cache,
3746 file->ctz.head, file->ctz.size,
3747 size-1, &file->block, &(lfs_off_t){0});
3748 if (err) {
3749 return err;
3750 }
3751
3752 // need to set pos/block/off consistently so seeking back to
3753 // the old position does not get confused
3754 file->pos = size;
3755 file->ctz.head = file->block;
3756 file->ctz.size = size;
3757 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3758 }
3759 } else if (size > oldsize) {
3760 // flush+seek if not already at end
3761 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
3762 if (res < 0) {
3763 return (int)res;
3764 }
3765
3766 // fill with zeros
3767 while (file->pos < size) {
3768 res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
3769 if (res < 0) {
3770 return (int)res;
3771 }
3772 }
3773 }
3774
3775 // restore pos
3776 lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
3777 if (res < 0) {
3778 return (int)res;
3779 }
3780
3781 return 0;
3782 }
3783 #endif
3784
3785 static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
3786 (void)lfs;
3787 return file->pos;
3788 }
3789
3790 static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
3791 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3792 if (res < 0) {
3793 return (int)res;
3794 }
3795
3796 return 0;
3797 }
3798
3799 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
3800 (void)lfs;
3801
3802 #ifndef LFS_READONLY
3803 if (file->flags & LFS_F_WRITING) {
3804 return lfs_max(file->pos, file->ctz.size);
3805 }
3806 #endif
3807
3808 return file->ctz.size;
3809 }
3810
3811
3812 /// General fs operations ///
3813 static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3814 lfs_mdir_t cwd;
3815 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3816 if (tag < 0) {
3817 return (int)tag;
3818 }
3819
3820 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3821 }
3822
3823 #ifndef LFS_READONLY
3824 static int lfs_rawremove(lfs_t *lfs, const char *path) {
3825 // deorphan if we haven't yet, needed at most once after poweron
3826 int err = lfs_fs_forceconsistency(lfs);
3827 if (err) {
3828 return err;
3829 }
3830
3831 lfs_mdir_t cwd;
3832 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3833 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3834 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3835 }
3836
3837 struct lfs_mlist dir;
3838 dir.next = lfs->mlist;
3839 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3840 // must be empty before removal
3841 lfs_block_t pair[2];
3842 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3843 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3844 if (res < 0) {
3845 return (int)res;
3846 }
3847 lfs_pair_fromle32(pair);
3848
3849 err = lfs_dir_fetch(lfs, &dir.m, pair);
3850 if (err) {
3851 return err;
3852 }
3853
3854 if (dir.m.count > 0 || dir.m.split) {
3855 return LFS_ERR_NOTEMPTY;
3856 }
3857
3858 // mark fs as orphaned
3859 err = lfs_fs_preporphans(lfs, +1);
3860 if (err) {
3861 return err;
3862 }
3863
3864 // I know it's crazy but yes, dir can be changed by our parent's
3865 // commit (if predecessor is child)
3866 dir.type = 0;
3867 dir.id = 0;
3868 lfs->mlist = &dir;
3869 }
3870
3871 // delete the entry
3872 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3873 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3874 if (err) {
3875 lfs->mlist = dir.next;
3876 return err;
3877 }
3878
3879 lfs->mlist = dir.next;
3880 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3881 // fix orphan
3882 err = lfs_fs_preporphans(lfs, -1);
3883 if (err) {
3884 return err;
3885 }
3886
3887 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3888 if (err) {
3889 return err;
3890 }
3891
3892 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3893 if (err) {
3894 return err;
3895 }
3896 }
3897
3898 return 0;
3899 }
3900 #endif
3901
3902 #ifndef LFS_READONLY
3903 static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3904 // deorphan if we haven't yet, needed at most once after poweron
3905 int err = lfs_fs_forceconsistency(lfs);
3906 if (err) {
3907 return err;
3908 }
3909
3910 // find old entry
3911 lfs_mdir_t oldcwd;
3912 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3913 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3914 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3915 }
3916
3917 // find new entry
3918 lfs_mdir_t newcwd;
3919 uint16_t newid;
3920 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3921 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3922 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3923 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3924 }
3925
3926 // if we're in the same pair there's a few special cases...
3927 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3928 uint16_t newoldid = lfs_tag_id(oldtag);
3929
3930 struct lfs_mlist prevdir;
3931 prevdir.next = lfs->mlist;
3932 if (prevtag == LFS_ERR_NOENT) {
3933 // check that name fits
3934 lfs_size_t nlen = strlen(newpath);
3935 if (nlen > lfs->name_max) {
3936 return LFS_ERR_NAMETOOLONG;
3937 }
3938
3939 // there is a small chance we are being renamed in the same
3940 // directory/ to an id less than our old id, the global update
3941 // to handle this is a bit messy
3942 if (samepair && newid <= newoldid) {
3943 newoldid += 1;
3944 }
3945 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3946 return LFS_ERR_ISDIR;
3947 } else if (samepair && newid == newoldid) {
3948 // we're renaming to ourselves??
3949 return 0;
3950 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3951 // must be empty before removal
3952 lfs_block_t prevpair[2];
3953 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3954 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3955 if (res < 0) {
3956 return (int)res;
3957 }
3958 lfs_pair_fromle32(prevpair);
3959
3960 // must be empty before removal
3961 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3962 if (err) {
3963 return err;
3964 }
3965
3966 if (prevdir.m.count > 0 || prevdir.m.split) {
3967 return LFS_ERR_NOTEMPTY;
3968 }
3969
3970 // mark fs as orphaned
3971 err = lfs_fs_preporphans(lfs, +1);
3972 if (err) {
3973 return err;
3974 }
3975
3976 // I know it's crazy but yes, dir can be changed by our parent's
3977 // commit (if predecessor is child)
3978 prevdir.type = 0;
3979 prevdir.id = 0;
3980 lfs->mlist = &prevdir;
3981 }
3982
3983 if (!samepair) {
3984 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3985 }
3986
3987 // move over all attributes
3988 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3989 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3990 LFS_TYPE_DELETE, newid, 0), NULL},
3991 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
3992 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3993 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3994 {LFS_MKTAG_IF(samepair,
3995 LFS_TYPE_DELETE, newoldid, 0), NULL}));
3996 if (err) {
3997 lfs->mlist = prevdir.next;
3998 return err;
3999 }
4000
4001 // let commit clean up after move (if we're different! otherwise move
4002 // logic already fixed it for us)
4003 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
4004 // prep gstate and delete move id
4005 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4006 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
4007 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
4008 if (err) {
4009 lfs->mlist = prevdir.next;
4010 return err;
4011 }
4012 }
4013
4014 lfs->mlist = prevdir.next;
4015 if (prevtag != LFS_ERR_NOENT
4016 && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
4017 // fix orphan
4018 err = lfs_fs_preporphans(lfs, -1);
4019 if (err) {
4020 return err;
4021 }
4022
4023 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
4024 if (err) {
4025 return err;
4026 }
4027
4028 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
4029 if (err) {
4030 return err;
4031 }
4032 }
4033
4034 return 0;
4035 }
4036 #endif
4037
4038 static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
4039 uint8_t type, void *buffer, lfs_size_t size) {
4040 lfs_mdir_t cwd;
4041 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4042 if (tag < 0) {
4043 return tag;
4044 }
4045
4046 uint16_t id = lfs_tag_id(tag);
4047 if (id == 0x3ff) {
4048 // special case for root
4049 id = 0;
4050 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4051 if (err) {
4052 return err;
4053 }
4054 }
4055
4056 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
4057 LFS_MKTAG(LFS_TYPE_USERATTR + type,
4058 id, lfs_min(size, lfs->attr_max)),
4059 buffer);
4060 if (tag < 0) {
4061 if (tag == LFS_ERR_NOENT) {
4062 return LFS_ERR_NOATTR;
4063 }
4064
4065 return tag;
4066 }
4067
4068 return lfs_tag_size(tag);
4069 }
4070
4071 #ifndef LFS_READONLY
4072 static int lfs_commitattr(lfs_t *lfs, const char *path,
4073 uint8_t type, const void *buffer, lfs_size_t size) {
4074 lfs_mdir_t cwd;
4075 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4076 if (tag < 0) {
4077 return tag;
4078 }
4079
4080 uint16_t id = lfs_tag_id(tag);
4081 if (id == 0x3ff) {
4082 // special case for root
4083 id = 0;
4084 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4085 if (err) {
4086 return err;
4087 }
4088 }
4089
4090 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
4091 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
4092 }
4093 #endif
4094
4095 #ifndef LFS_READONLY
4096 static int lfs_rawsetattr(lfs_t *lfs, const char *path,
4097 uint8_t type, const void *buffer, lfs_size_t size) {
4098 if (size > lfs->attr_max) {
4099 return LFS_ERR_NOSPC;
4100 }
4101
4102 return lfs_commitattr(lfs, path, type, buffer, size);
4103 }
4104 #endif
4105
4106 #ifndef LFS_READONLY
4107 static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
4108 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
4109 }
4110 #endif
4111
4112
4113 /// Filesystem operations ///
4114 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
4115 lfs->cfg = cfg;
4116 lfs->block_count = cfg->block_count; // May be 0
4117 int err = 0;
4118
4119 #ifdef LFS_MULTIVERSION
4120 // this driver only supports minor version < current minor version
4121 LFS_ASSERT(!lfs->cfg->disk_version || (
4122 (0xffff & (lfs->cfg->disk_version >> 16))
4123 == LFS_DISK_VERSION_MAJOR
4124 && (0xffff & (lfs->cfg->disk_version >> 0))
4125 <= LFS_DISK_VERSION_MINOR));
4126 #endif
4127
4128 // check that bool is a truthy-preserving type
4129 //
4130 // note the most common reason for this failure is a before-c99 compiler,
4131 // which littlefs currently does not support
4132 LFS_ASSERT((bool)0x80000000);
4133
4134 // validate that the lfs-cfg sizes were initiated properly before
4135 // performing any arithmetic logics with them
4136 LFS_ASSERT(lfs->cfg->read_size != 0);
4137 LFS_ASSERT(lfs->cfg->prog_size != 0);
4138 LFS_ASSERT(lfs->cfg->cache_size != 0);
4139
4140 // check that block size is a multiple of cache size is a multiple
4141 // of prog and read sizes
4142 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
4143 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
4144 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
4145
4146 // check that the block size is large enough to fit all ctz pointers
4147 LFS_ASSERT(lfs->cfg->block_size >= 128);
4148 // this is the exact calculation for all ctz pointers, if this fails
4149 // and the simpler assert above does not, math must be broken
4150 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
4151 <= lfs->cfg->block_size);
4152
4153 // block_cycles = 0 is no longer supported.
4154 //
4155 // block_cycles is the number of erase cycles before littlefs evicts
4156 // metadata logs as a part of wear leveling. Suggested values are in the
4157 // range of 100-1000, or set block_cycles to -1 to disable block-level
4158 // wear-leveling.
4159 LFS_ASSERT(lfs->cfg->block_cycles != 0);
4160
4161
4162 // setup read cache
4163 if (lfs->cfg->read_buffer) {
4164 lfs->rcache.buffer = lfs->cfg->read_buffer;
4165 } else {
4166 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4167 if (!lfs->rcache.buffer) {
4168 err = LFS_ERR_NOMEM;
4169 goto cleanup;
4170 }
4171 }
4172
4173 // setup program cache
4174 if (lfs->cfg->prog_buffer) {
4175 lfs->pcache.buffer = lfs->cfg->prog_buffer;
4176 } else {
4177 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4178 if (!lfs->pcache.buffer) {
4179 err = LFS_ERR_NOMEM;
4180 goto cleanup;
4181 }
4182 }
4183
4184 // zero to avoid information leaks
4185 lfs_cache_zero(lfs, &lfs->rcache);
4186 lfs_cache_zero(lfs, &lfs->pcache);
4187
4188 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
4189 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
4190 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
4191 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
4192 if (lfs->cfg->lookahead_buffer) {
4193 lfs->free.buffer = lfs->cfg->lookahead_buffer;
4194 } else {
4195 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
4196 if (!lfs->free.buffer) {
4197 err = LFS_ERR_NOMEM;
4198 goto cleanup;
4199 }
4200 }
4201
4202 // check that the size limits are sane
4203 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
4204 lfs->name_max = lfs->cfg->name_max;
4205 if (!lfs->name_max) {
4206 lfs->name_max = LFS_NAME_MAX;
4207 }
4208
4209 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
4210 lfs->file_max = lfs->cfg->file_max;
4211 if (!lfs->file_max) {
4212 lfs->file_max = LFS_FILE_MAX;
4213 }
4214
4215 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
4216 lfs->attr_max = lfs->cfg->attr_max;
4217 if (!lfs->attr_max) {
4218 lfs->attr_max = LFS_ATTR_MAX;
4219 }
4220
4221 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
4222
4223 // setup default state
4224 lfs->root[0] = LFS_BLOCK_NULL;
4225 lfs->root[1] = LFS_BLOCK_NULL;
4226 lfs->mlist = NULL;
4227 lfs->seed = 0;
4228 lfs->gdisk = (lfs_gstate_t){0};
4229 lfs->gstate = (lfs_gstate_t){0};
4230 lfs->gdelta = (lfs_gstate_t){0};
4231 #ifdef LFS_MIGRATE
4232 lfs->lfs1 = NULL;
4233 #endif
4234
4235 return 0;
4236
4237 cleanup:
4238 lfs_deinit(lfs);
4239 return err;
4240 }
4241
4242 static int lfs_deinit(lfs_t *lfs) {
4243 // free allocated memory
4244 if (!lfs->cfg->read_buffer) {
4245 lfs_free(lfs->rcache.buffer);
4246 }
4247
4248 if (!lfs->cfg->prog_buffer) {
4249 lfs_free(lfs->pcache.buffer);
4250 }
4251
4252 if (!lfs->cfg->lookahead_buffer) {
4253 lfs_free(lfs->free.buffer);
4254 }
4255
4256 return 0;
4257 }
4258
4259
4260
4261 #ifndef LFS_READONLY
4262 static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
4263 int err = 0;
4264 {
4265 err = lfs_init(lfs, cfg);
4266 if (err) {
4267 return err;
4268 }
4269
4270 LFS_ASSERT(cfg->block_count != 0);
4271
4272 // create free lookahead
4273 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
4274 lfs->free.off = 0;
4275 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
4276 lfs->block_count);
4277 lfs->free.i = 0;
4278 lfs_alloc_ack(lfs);
4279
4280 // create root dir
4281 lfs_mdir_t root;
4282 err = lfs_dir_alloc(lfs, &root);
4283 if (err) {
4284 goto cleanup;
4285 }
4286
4287 // write one superblock
4288 lfs_superblock_t superblock = {
4289 .version = lfs_fs_disk_version(lfs),
4290 .block_size = lfs->cfg->block_size,
4291 .block_count = lfs->block_count,
4292 .name_max = lfs->name_max,
4293 .file_max = lfs->file_max,
4294 .attr_max = lfs->attr_max,
4295 };
4296
4297 lfs_superblock_tole32(&superblock);
4298 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4299 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4300 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4301 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4302 &superblock}));
4303 if (err) {
4304 goto cleanup;
4305 }
4306
4307 // force compaction to prevent accidentally mounting any
4308 // older version of littlefs that may live on disk
4309 root.erased = false;
4310 err = lfs_dir_commit(lfs, &root, NULL, 0);
4311 if (err) {
4312 goto cleanup;
4313 }
4314
4315 // sanity check that fetch works
4316 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4317 if (err) {
4318 goto cleanup;
4319 }
4320 }
4321
4322 cleanup:
4323 lfs_deinit(lfs);
4324 return err;
4325
4326 }
4327 #endif
4328
4329 static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
4330 int err = lfs_init(lfs, cfg);
4331 if (err) {
4332 return err;
4333 }
4334
4335 // scan directory blocks for superblock and any global updates
4336 lfs_mdir_t dir = {.tail = {0, 1}};
4337 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4338 lfs_size_t tortoise_i = 1;
4339 lfs_size_t tortoise_period = 1;
4340 while (!lfs_pair_isnull(dir.tail)) {
4341 // detect cycles with Brent's algorithm
4342 if (lfs_pair_issync(dir.tail, tortoise)) {
4343 LFS_WARN("Cycle detected in tail list");
4344 err = LFS_ERR_CORRUPT;
4345 goto cleanup;
4346 }
4347 if (tortoise_i == tortoise_period) {
4348 tortoise[0] = dir.tail[0];
4349 tortoise[1] = dir.tail[1];
4350 tortoise_i = 0;
4351 tortoise_period *= 2;
4352 }
4353 tortoise_i += 1;
4354
4355 // fetch next block in tail list
4356 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4357 LFS_MKTAG(0x7ff, 0x3ff, 0),
4358 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4359 NULL,
4360 lfs_dir_find_match, &(struct lfs_dir_find_match){
4361 lfs, "littlefs", 8});
4362 if (tag < 0) {
4363 err = tag;
4364 goto cleanup;
4365 }
4366
4367 // has superblock?
4368 if (tag && !lfs_tag_isdelete(tag)) {
4369 // update root
4370 lfs->root[0] = dir.pair[0];
4371 lfs->root[1] = dir.pair[1];
4372
4373 // grab superblock
4374 lfs_superblock_t superblock;
4375 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4376 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4377 &superblock);
4378 if (tag < 0) {
4379 err = tag;
4380 goto cleanup;
4381 }
4382 lfs_superblock_fromle32(&superblock);
4383
4384 // check version
4385 uint16_t major_version = (0xffff & (superblock.version >> 16));
4386 uint16_t minor_version = (0xffff & (superblock.version >> 0));
4387 if (major_version != lfs_fs_disk_version_major(lfs)
4388 || minor_version > lfs_fs_disk_version_minor(lfs)) {
4389 LFS_ERROR("Invalid version "
4390 "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
4391 major_version,
4392 minor_version,
4393 lfs_fs_disk_version_major(lfs),
4394 lfs_fs_disk_version_minor(lfs));
4395 err = LFS_ERR_INVAL;
4396 goto cleanup;
4397 }
4398
4399 // found older minor version? set an in-device only bit in the
4400 // gstate so we know we need to rewrite the superblock before
4401 // the first write
4402 if (minor_version < lfs_fs_disk_version_minor(lfs)) {
4403 LFS_DEBUG("Found older minor version "
4404 "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
4405 major_version,
4406 minor_version,
4407 lfs_fs_disk_version_major(lfs),
4408 lfs_fs_disk_version_minor(lfs));
4409 // note this bit is reserved on disk, so fetching more gstate
4410 // will not interfere here
4411 lfs_fs_prepsuperblock(lfs, true);
4412 }
4413
4414 // check superblock configuration
4415 if (superblock.name_max) {
4416 if (superblock.name_max > lfs->name_max) {
4417 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4418 superblock.name_max, lfs->name_max);
4419 err = LFS_ERR_INVAL;
4420 goto cleanup;
4421 }
4422
4423 lfs->name_max = superblock.name_max;
4424 }
4425
4426 if (superblock.file_max) {
4427 if (superblock.file_max > lfs->file_max) {
4428 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4429 superblock.file_max, lfs->file_max);
4430 err = LFS_ERR_INVAL;
4431 goto cleanup;
4432 }
4433
4434 lfs->file_max = superblock.file_max;
4435 }
4436
4437 if (superblock.attr_max) {
4438 if (superblock.attr_max > lfs->attr_max) {
4439 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4440 superblock.attr_max, lfs->attr_max);
4441 err = LFS_ERR_INVAL;
4442 goto cleanup;
4443 }
4444
4445 lfs->attr_max = superblock.attr_max;
4446 }
4447
4448 // this is where we get the block_count from disk if block_count=0
4449 if (lfs->cfg->block_count
4450 && superblock.block_count != lfs->cfg->block_count) {
4451 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4452 superblock.block_count, lfs->cfg->block_count);
4453 err = LFS_ERR_INVAL;
4454 goto cleanup;
4455 }
4456
4457 lfs->block_count = superblock.block_count;
4458
4459 if (superblock.block_size != lfs->cfg->block_size) {
4460 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4461 superblock.block_size, lfs->cfg->block_size);
4462 err = LFS_ERR_INVAL;
4463 goto cleanup;
4464 }
4465 }
4466
4467 // has gstate?
4468 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4469 if (err) {
4470 goto cleanup;
4471 }
4472 }
4473
4474 // update littlefs with gstate
4475 if (!lfs_gstate_iszero(&lfs->gstate)) {
4476 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4477 lfs->gstate.tag,
4478 lfs->gstate.pair[0],
4479 lfs->gstate.pair[1]);
4480 }
4481 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4482 lfs->gdisk = lfs->gstate;
4483
4484 // setup free lookahead, to distribute allocations uniformly across
4485 // boots, we start the allocator at a random location
4486 lfs->free.off = lfs->seed % lfs->block_count;
4487 lfs_alloc_drop(lfs);
4488
4489 return 0;
4490
4491 cleanup:
4492 lfs_rawunmount(lfs);
4493 return err;
4494 }
4495
4496 static int lfs_rawunmount(lfs_t *lfs) {
4497 return lfs_deinit(lfs);
4498 }
4499
4500
4501 /// Filesystem filesystem operations ///
4502 static int lfs_fs_rawstat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
4503 // if the superblock is up-to-date, we must be on the most recent
4504 // minor version of littlefs
4505 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4506 fsinfo->disk_version = lfs_fs_disk_version(lfs);
4507
4508 // otherwise we need to read the minor version on disk
4509 } else {
4510 // fetch the superblock
4511 lfs_mdir_t dir;
4512 int err = lfs_dir_fetch(lfs, &dir, lfs->root);
4513 if (err) {
4514 return err;
4515 }
4516
4517 lfs_superblock_t superblock;
4518 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4519 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4520 &superblock);
4521 if (tag < 0) {
4522 return tag;
4523 }
4524 lfs_superblock_fromle32(&superblock);
4525
4526 // read the on-disk version
4527 fsinfo->disk_version = superblock.version;
4528 }
4529
4530 // filesystem geometry
4531 fsinfo->block_size = lfs->cfg->block_size;
4532 fsinfo->block_count = lfs->block_count;
4533
4534 // other on-disk configuration, we cache all of these for internal use
4535 fsinfo->name_max = lfs->name_max;
4536 fsinfo->file_max = lfs->file_max;
4537 fsinfo->attr_max = lfs->attr_max;
4538
4539 return 0;
4540 }
4541
4542 int lfs_fs_rawtraverse(lfs_t *lfs,
4543 int (*cb)(void *data, lfs_block_t block), void *data,
4544 bool includeorphans) {
4545 // iterate over metadata pairs
4546 lfs_mdir_t dir = {.tail = {0, 1}};
4547
4548 #ifdef LFS_MIGRATE
4549 // also consider v1 blocks during migration
4550 if (lfs->lfs1) {
4551 int err = lfs1_traverse(lfs, cb, data);
4552 if (err) {
4553 return err;
4554 }
4555
4556 dir.tail[0] = lfs->root[0];
4557 dir.tail[1] = lfs->root[1];
4558 }
4559 #endif
4560
4561 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4562 lfs_size_t tortoise_i = 1;
4563 lfs_size_t tortoise_period = 1;
4564 while (!lfs_pair_isnull(dir.tail)) {
4565 // detect cycles with Brent's algorithm
4566 if (lfs_pair_issync(dir.tail, tortoise)) {
4567 LFS_WARN("Cycle detected in tail list");
4568 return LFS_ERR_CORRUPT;
4569 }
4570 if (tortoise_i == tortoise_period) {
4571 tortoise[0] = dir.tail[0];
4572 tortoise[1] = dir.tail[1];
4573 tortoise_i = 0;
4574 tortoise_period *= 2;
4575 }
4576 tortoise_i += 1;
4577
4578 for (int i = 0; i < 2; i++) {
4579 int err = cb(data, dir.tail[i]);
4580 if (err) {
4581 return err;
4582 }
4583 }
4584
4585 // iterate through ids in directory
4586 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4587 if (err) {
4588 return err;
4589 }
4590
4591 for (uint16_t id = 0; id < dir.count; id++) {
4592 struct lfs_ctz ctz;
4593 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4594 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4595 if (tag < 0) {
4596 if (tag == LFS_ERR_NOENT) {
4597 continue;
4598 }
4599 return tag;
4600 }
4601 lfs_ctz_fromle32(&ctz);
4602
4603 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4604 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4605 ctz.head, ctz.size, cb, data);
4606 if (err) {
4607 return err;
4608 }
4609 } else if (includeorphans &&
4610 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4611 for (int i = 0; i < 2; i++) {
4612 err = cb(data, (&ctz.head)[i]);
4613 if (err) {
4614 return err;
4615 }
4616 }
4617 }
4618 }
4619 }
4620
4621 #ifndef LFS_READONLY
4622 // iterate over any open files
4623 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4624 if (f->type != LFS_TYPE_REG) {
4625 continue;
4626 }
4627
4628 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4629 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4630 f->ctz.head, f->ctz.size, cb, data);
4631 if (err) {
4632 return err;
4633 }
4634 }
4635
4636 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4637 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4638 f->block, f->pos, cb, data);
4639 if (err) {
4640 return err;
4641 }
4642 }
4643 }
4644 #endif
4645
4646 return 0;
4647 }
4648
4649 #ifndef LFS_READONLY
4650 static int lfs_fs_pred(lfs_t *lfs,
4651 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4652 // iterate over all directory directory entries
4653 pdir->tail[0] = 0;
4654 pdir->tail[1] = 1;
4655 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4656 lfs_size_t tortoise_i = 1;
4657 lfs_size_t tortoise_period = 1;
4658 while (!lfs_pair_isnull(pdir->tail)) {
4659 // detect cycles with Brent's algorithm
4660 if (lfs_pair_issync(pdir->tail, tortoise)) {
4661 LFS_WARN("Cycle detected in tail list");
4662 return LFS_ERR_CORRUPT;
4663 }
4664 if (tortoise_i == tortoise_period) {
4665 tortoise[0] = pdir->tail[0];
4666 tortoise[1] = pdir->tail[1];
4667 tortoise_i = 0;
4668 tortoise_period *= 2;
4669 }
4670 tortoise_i += 1;
4671
4672 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4673 return 0;
4674 }
4675
4676 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4677 if (err) {
4678 return err;
4679 }
4680 }
4681
4682 return LFS_ERR_NOENT;
4683 }
4684 #endif
4685
4686 #ifndef LFS_READONLY
4687 struct lfs_fs_parent_match {
4688 lfs_t *lfs;
4689 const lfs_block_t pair[2];
4690 };
4691 #endif
4692
4693 #ifndef LFS_READONLY
4694 static int lfs_fs_parent_match(void *data,
4695 lfs_tag_t tag, const void *buffer) {
4696 struct lfs_fs_parent_match *find = data;
4697 lfs_t *lfs = find->lfs;
4698 const struct lfs_diskoff *disk = buffer;
4699 (void)tag;
4700
4701 lfs_block_t child[2];
4702 int err = lfs_bd_read(lfs,
4703 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4704 disk->block, disk->off, &child, sizeof(child));
4705 if (err) {
4706 return err;
4707 }
4708
4709 lfs_pair_fromle32(child);
4710 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4711 }
4712 #endif
4713
4714 #ifndef LFS_READONLY
4715 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4716 lfs_mdir_t *parent) {
4717 // use fetchmatch with callback to find pairs
4718 parent->tail[0] = 0;
4719 parent->tail[1] = 1;
4720 lfs_block_t tortoise[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
4721 lfs_size_t tortoise_i = 1;
4722 lfs_size_t tortoise_period = 1;
4723 while (!lfs_pair_isnull(parent->tail)) {
4724 // detect cycles with Brent's algorithm
4725 if (lfs_pair_issync(parent->tail, tortoise)) {
4726 LFS_WARN("Cycle detected in tail list");
4727 return LFS_ERR_CORRUPT;
4728 }
4729 if (tortoise_i == tortoise_period) {
4730 tortoise[0] = parent->tail[0];
4731 tortoise[1] = parent->tail[1];
4732 tortoise_i = 0;
4733 tortoise_period *= 2;
4734 }
4735 tortoise_i += 1;
4736
4737 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4738 LFS_MKTAG(0x7ff, 0, 0x3ff),
4739 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4740 NULL,
4741 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4742 lfs, {pair[0], pair[1]}});
4743 if (tag && tag != LFS_ERR_NOENT) {
4744 return tag;
4745 }
4746 }
4747
4748 return LFS_ERR_NOENT;
4749 }
4750 #endif
4751
4752 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
4753 lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
4754 | (uint32_t)needssuperblock << 9;
4755 }
4756
4757 #ifndef LFS_READONLY
4758 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4759 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
4760 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
4761 lfs->gstate.tag += orphans;
4762 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4763 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4764
4765 return 0;
4766 }
4767 #endif
4768
4769 #ifndef LFS_READONLY
4770 static void lfs_fs_prepmove(lfs_t *lfs,
4771 uint16_t id, const lfs_block_t pair[2]) {
4772 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4773 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4774 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4775 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4776 }
4777 #endif
4778
4779 #ifndef LFS_READONLY
4780 static int lfs_fs_desuperblock(lfs_t *lfs) {
4781 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4782 return 0;
4783 }
4784
4785 LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
4786 lfs->root[0],
4787 lfs->root[1]);
4788
4789 lfs_mdir_t root;
4790 int err = lfs_dir_fetch(lfs, &root, lfs->root);
4791 if (err) {
4792 return err;
4793 }
4794
4795 // write a new superblock
4796 lfs_superblock_t superblock = {
4797 .version = lfs_fs_disk_version(lfs),
4798 .block_size = lfs->cfg->block_size,
4799 .block_count = lfs->block_count,
4800 .name_max = lfs->name_max,
4801 .file_max = lfs->file_max,
4802 .attr_max = lfs->attr_max,
4803 };
4804
4805 lfs_superblock_tole32(&superblock);
4806 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4807 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4808 &superblock}));
4809 if (err) {
4810 return err;
4811 }
4812
4813 lfs_fs_prepsuperblock(lfs, false);
4814 return 0;
4815 }
4816 #endif
4817
4818 #ifndef LFS_READONLY
4819 static int lfs_fs_demove(lfs_t *lfs) {
4820 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4821 return 0;
4822 }
4823
4824 // Fix bad moves
4825 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4826 lfs->gdisk.pair[0],
4827 lfs->gdisk.pair[1],
4828 lfs_tag_id(lfs->gdisk.tag));
4829
4830 // no other gstate is supported at this time, so if we found something else
4831 // something most likely went wrong in gstate calculation
4832 LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
4833
4834 // fetch and delete the moved entry
4835 lfs_mdir_t movedir;
4836 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4837 if (err) {
4838 return err;
4839 }
4840
4841 // prep gstate and delete move id
4842 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4843 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4844 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4845 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4846 if (err) {
4847 return err;
4848 }
4849
4850 return 0;
4851 }
4852 #endif
4853
4854 #ifndef LFS_READONLY
4855 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4856 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4857 return 0;
4858 }
4859
4860 // Check for orphans in two separate passes:
4861 // - 1 for half-orphans (relocations)
4862 // - 2 for full-orphans (removes/renames)
4863 //
4864 // Two separate passes are needed as half-orphans can contain outdated
4865 // references to full-orphans, effectively hiding them from the deorphan
4866 // search.
4867 //
4868 int pass = 0;
4869 while (pass < 2) {
4870 // Fix any orphans
4871 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4872 lfs_mdir_t dir;
4873 bool moreorphans = false;
4874
4875 // iterate over all directory directory entries
4876 while (!lfs_pair_isnull(pdir.tail)) {
4877 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4878 if (err) {
4879 return err;
4880 }
4881
4882 // check head blocks for orphans
4883 if (!pdir.split) {
4884 // check if we have a parent
4885 lfs_mdir_t parent;
4886 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4887 if (tag < 0 && tag != LFS_ERR_NOENT) {
4888 return tag;
4889 }
4890
4891 if (pass == 0 && tag != LFS_ERR_NOENT) {
4892 lfs_block_t pair[2];
4893 lfs_stag_t state = lfs_dir_get(lfs, &parent,
4894 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4895 if (state < 0) {
4896 return state;
4897 }
4898 lfs_pair_fromle32(pair);
4899
4900 if (!lfs_pair_issync(pair, pdir.tail)) {
4901 // we have desynced
4902 LFS_DEBUG("Fixing half-orphan "
4903 "{0x%"PRIx32", 0x%"PRIx32"} "
4904 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4905 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4906
4907 // fix pending move in this pair? this looks like an
4908 // optimization but is in fact _required_ since
4909 // relocating may outdate the move.
4910 uint16_t moveid = 0x3ff;
4911 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
4912 moveid = lfs_tag_id(lfs->gstate.tag);
4913 LFS_DEBUG("Fixing move while fixing orphans "
4914 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4915 pdir.pair[0], pdir.pair[1], moveid);
4916 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4917 }
4918
4919 lfs_pair_tole32(pair);
4920 state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4921 {LFS_MKTAG_IF(moveid != 0x3ff,
4922 LFS_TYPE_DELETE, moveid, 0), NULL},
4923 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
4924 pair}));
4925 lfs_pair_fromle32(pair);
4926 if (state < 0) {
4927 return state;
4928 }
4929
4930 // did our commit create more orphans?
4931 if (state == LFS_OK_ORPHANED) {
4932 moreorphans = true;
4933 }
4934
4935 // refetch tail
4936 continue;
4937 }
4938 }
4939
4940 // note we only check for full orphans if we may have had a
4941 // power-loss, otherwise orphans are created intentionally
4942 // during operations such as lfs_mkdir
4943 if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
4944 // we are an orphan
4945 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4946 pdir.tail[0], pdir.tail[1]);
4947
4948 // steal state
4949 err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
4950 if (err) {
4951 return err;
4952 }
4953
4954 // steal tail
4955 lfs_pair_tole32(dir.tail);
4956 int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4957 {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
4958 dir.tail}));
4959 lfs_pair_fromle32(dir.tail);
4960 if (state < 0) {
4961 return state;
4962 }
4963
4964 // did our commit create more orphans?
4965 if (state == LFS_OK_ORPHANED) {
4966 moreorphans = true;
4967 }
4968
4969 // refetch tail
4970 continue;
4971 }
4972 }
4973
4974 pdir = dir;
4975 }
4976
4977 pass = moreorphans ? 0 : pass+1;
4978 }
4979
4980 // mark orphans as fixed
4981 return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
4982 }
4983 #endif
4984
4985 #ifndef LFS_READONLY
4986 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4987 int err = lfs_fs_desuperblock(lfs);
4988 if (err) {
4989 return err;
4990 }
4991
4992 err = lfs_fs_demove(lfs);
4993 if (err) {
4994 return err;
4995 }
4996
4997 err = lfs_fs_deorphan(lfs, true);
4998 if (err) {
4999 return err;
5000 }
5001
5002 return 0;
5003 }
5004 #endif
5005
5006 #ifndef LFS_READONLY
5007 static int lfs_fs_rawmkconsistent(lfs_t *lfs) {
5008 // lfs_fs_forceconsistency does most of the work here
5009 int err = lfs_fs_forceconsistency(lfs);
5010 if (err) {
5011 return err;
5012 }
5013
5014 // do we have any pending gstate?
5015 lfs_gstate_t delta = {0};
5016 lfs_gstate_xor(&delta, &lfs->gdisk);
5017 lfs_gstate_xor(&delta, &lfs->gstate);
5018 if (!lfs_gstate_iszero(&delta)) {
5019 // lfs_dir_commit will implicitly write out any pending gstate
5020 lfs_mdir_t root;
5021 err = lfs_dir_fetch(lfs, &root, lfs->root);
5022 if (err) {
5023 return err;
5024 }
5025
5026 err = lfs_dir_commit(lfs, &root, NULL, 0);
5027 if (err) {
5028 return err;
5029 }
5030 }
5031
5032 return 0;
5033 }
5034 #endif
5035
5036 static int lfs_fs_size_count(void *p, lfs_block_t block) {
5037 (void)block;
5038 lfs_size_t *size = p;
5039 *size += 1;
5040 return 0;
5041 }
5042
5043 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
5044 lfs_size_t size = 0;
5045 int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
5046 if (err) {
5047 return err;
5048 }
5049
5050 return size;
5051 }
5052
5053 #ifndef LFS_READONLY
5054 static int lfs_fs_rawgrow(lfs_t *lfs, lfs_size_t block_count) {
5055 // shrinking is not supported
5056 LFS_ASSERT(block_count >= lfs->block_count);
5057
5058 if (block_count > lfs->block_count) {
5059 lfs->block_count = block_count;
5060
5061 // fetch the root
5062 lfs_mdir_t root;
5063 int err = lfs_dir_fetch(lfs, &root, lfs->root);
5064 if (err) {
5065 return err;
5066 }
5067
5068 // update the superblock
5069 lfs_superblock_t superblock;
5070 lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0),
5071 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5072 &superblock);
5073 if (tag < 0) {
5074 return tag;
5075 }
5076 lfs_superblock_fromle32(&superblock);
5077
5078 superblock.block_count = lfs->block_count;
5079
5080 lfs_superblock_tole32(&superblock);
5081 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
5082 {tag, &superblock}));
5083 if (err) {
5084 return err;
5085 }
5086 }
5087
5088 return 0;
5089 }
5090 #endif
5091
5092 #ifdef LFS_MIGRATE
5093 ////// Migration from littelfs v1 below this //////
5094
5095 /// Version info ///
5096
5097 // Software library version
5098 // Major (top-nibble), incremented on backwards incompatible changes
5099 // Minor (bottom-nibble), incremented on feature additions
5100 #define LFS1_VERSION 0x00010007
5101 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
5102 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
5103
5104 // Version of On-disk data structures
5105 // Major (top-nibble), incremented on backwards incompatible changes
5106 // Minor (bottom-nibble), incremented on feature additions
5107 #define LFS1_DISK_VERSION 0x00010001
5108 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
5109 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
5110
5111
5112 /// v1 Definitions ///
5113
5114 // File types
5115 enum lfs1_type {
5116 LFS1_TYPE_REG = 0x11,
5117 LFS1_TYPE_DIR = 0x22,
5118 LFS1_TYPE_SUPERBLOCK = 0x2e,
5119 };
5120
5121 typedef struct lfs1 {
5122 lfs_block_t root[2];
5123 } lfs1_t;
5124
5125 typedef struct lfs1_entry {
5126 lfs_off_t off;
5127
5128 struct lfs1_disk_entry {
5129 uint8_t type;
5130 uint8_t elen;
5131 uint8_t alen;
5132 uint8_t nlen;
5133 union {
5134 struct {
5135 lfs_block_t head;
5136 lfs_size_t size;
5137 } file;
5138 lfs_block_t dir[2];
5139 } u;
5140 } d;
5141 } lfs1_entry_t;
5142
5143 typedef struct lfs1_dir {
5144 struct lfs1_dir *next;
5145 lfs_block_t pair[2];
5146 lfs_off_t off;
5147
5148 lfs_block_t head[2];
5149 lfs_off_t pos;
5150
5151 struct lfs1_disk_dir {
5152 uint32_t rev;
5153 lfs_size_t size;
5154 lfs_block_t tail[2];
5155 } d;
5156 } lfs1_dir_t;
5157
5158 typedef struct lfs1_superblock {
5159 lfs_off_t off;
5160
5161 struct lfs1_disk_superblock {
5162 uint8_t type;
5163 uint8_t elen;
5164 uint8_t alen;
5165 uint8_t nlen;
5166 lfs_block_t root[2];
5167 uint32_t block_size;
5168 uint32_t block_count;
5169 uint32_t version;
5170 char magic[8];
5171 } d;
5172 } lfs1_superblock_t;
5173
5174
5175 /// Low-level wrappers v1->v2 ///
5176 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
5177 *crc = lfs_crc(*crc, buffer, size);
5178 }
5179
5180 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
5181 lfs_off_t off, void *buffer, lfs_size_t size) {
5182 // if we ever do more than writes to alternating pairs,
5183 // this may need to consider pcache
5184 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
5185 block, off, buffer, size);
5186 }
5187
5188 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
5189 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
5190 for (lfs_off_t i = 0; i < size; i++) {
5191 uint8_t c;
5192 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
5193 if (err) {
5194 return err;
5195 }
5196
5197 lfs1_crc(crc, &c, 1);
5198 }
5199
5200 return 0;
5201 }
5202
5203
5204 /// Endian swapping functions ///
5205 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
5206 d->rev = lfs_fromle32(d->rev);
5207 d->size = lfs_fromle32(d->size);
5208 d->tail[0] = lfs_fromle32(d->tail[0]);
5209 d->tail[1] = lfs_fromle32(d->tail[1]);
5210 }
5211
5212 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
5213 d->rev = lfs_tole32(d->rev);
5214 d->size = lfs_tole32(d->size);
5215 d->tail[0] = lfs_tole32(d->tail[0]);
5216 d->tail[1] = lfs_tole32(d->tail[1]);
5217 }
5218
5219 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
5220 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
5221 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
5222 }
5223
5224 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
5225 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
5226 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
5227 }
5228
5229 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
5230 d->root[0] = lfs_fromle32(d->root[0]);
5231 d->root[1] = lfs_fromle32(d->root[1]);
5232 d->block_size = lfs_fromle32(d->block_size);
5233 d->block_count = lfs_fromle32(d->block_count);
5234 d->version = lfs_fromle32(d->version);
5235 }
5236
5237
5238 ///// Metadata pair and directory operations ///
5239 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
5240 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
5241 }
5242
5243 static int lfs1_dir_fetch(lfs_t *lfs,
5244 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
5245 // copy out pair, otherwise may be aliasing dir
5246 const lfs_block_t tpair[2] = {pair[0], pair[1]};
5247 bool valid = false;
5248
5249 // check both blocks for the most recent revision
5250 for (int i = 0; i < 2; i++) {
5251 struct lfs1_disk_dir test;
5252 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
5253 lfs1_dir_fromle32(&test);
5254 if (err) {
5255 if (err == LFS_ERR_CORRUPT) {
5256 continue;
5257 }
5258 return err;
5259 }
5260
5261 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
5262 continue;
5263 }
5264
5265 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
5266 (0x7fffffff & test.size) > lfs->cfg->block_size) {
5267 continue;
5268 }
5269
5270 uint32_t crc = 0xffffffff;
5271 lfs1_dir_tole32(&test);
5272 lfs1_crc(&crc, &test, sizeof(test));
5273 lfs1_dir_fromle32(&test);
5274 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
5275 (0x7fffffff & test.size) - sizeof(test), &crc);
5276 if (err) {
5277 if (err == LFS_ERR_CORRUPT) {
5278 continue;
5279 }
5280 return err;
5281 }
5282
5283 if (crc != 0) {
5284 continue;
5285 }
5286
5287 valid = true;
5288
5289 // setup dir in case it's valid
5290 dir->pair[0] = tpair[(i+0) % 2];
5291 dir->pair[1] = tpair[(i+1) % 2];
5292 dir->off = sizeof(dir->d);
5293 dir->d = test;
5294 }
5295
5296 if (!valid) {
5297 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
5298 tpair[0], tpair[1]);
5299 return LFS_ERR_CORRUPT;
5300 }
5301
5302 return 0;
5303 }
5304
5305 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
5306 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
5307 if (!(0x80000000 & dir->d.size)) {
5308 entry->off = dir->off;
5309 return LFS_ERR_NOENT;
5310 }
5311
5312 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
5313 if (err) {
5314 return err;
5315 }
5316
5317 dir->off = sizeof(dir->d);
5318 dir->pos += sizeof(dir->d) + 4;
5319 }
5320
5321 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
5322 &entry->d, sizeof(entry->d));
5323 lfs1_entry_fromle32(&entry->d);
5324 if (err) {
5325 return err;
5326 }
5327
5328 entry->off = dir->off;
5329 dir->off += lfs1_entry_size(entry);
5330 dir->pos += lfs1_entry_size(entry);
5331 return 0;
5332 }
5333
5334 /// littlefs v1 specific operations ///
5335 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
5336 if (lfs_pair_isnull(lfs->lfs1->root)) {
5337 return 0;
5338 }
5339
5340 // iterate over metadata pairs
5341 lfs1_dir_t dir;
5342 lfs1_entry_t entry;
5343 lfs_block_t cwd[2] = {0, 1};
5344
5345 while (true) {
5346 for (int i = 0; i < 2; i++) {
5347 int err = cb(data, cwd[i]);
5348 if (err) {
5349 return err;
5350 }
5351 }
5352
5353 int err = lfs1_dir_fetch(lfs, &dir, cwd);
5354 if (err) {
5355 return err;
5356 }
5357
5358 // iterate over contents
5359 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
5360 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
5361 &entry.d, sizeof(entry.d));
5362 lfs1_entry_fromle32(&entry.d);
5363 if (err) {
5364 return err;
5365 }
5366
5367 dir.off += lfs1_entry_size(&entry);
5368 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
5369 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
5370 entry.d.u.file.head, entry.d.u.file.size, cb, data);
5371 if (err) {
5372 return err;
5373 }
5374 }
5375 }
5376
5377 // we also need to check if we contain a threaded v2 directory
5378 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
5379 while (dir2.split) {
5380 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5381 if (err) {
5382 break;
5383 }
5384
5385 for (int i = 0; i < 2; i++) {
5386 err = cb(data, dir2.pair[i]);
5387 if (err) {
5388 return err;
5389 }
5390 }
5391 }
5392
5393 cwd[0] = dir.d.tail[0];
5394 cwd[1] = dir.d.tail[1];
5395
5396 if (lfs_pair_isnull(cwd)) {
5397 break;
5398 }
5399 }
5400
5401 return 0;
5402 }
5403
5404 static int lfs1_moved(lfs_t *lfs, const void *e) {
5405 if (lfs_pair_isnull(lfs->lfs1->root)) {
5406 return 0;
5407 }
5408
5409 // skip superblock
5410 lfs1_dir_t cwd;
5411 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
5412 if (err) {
5413 return err;
5414 }
5415
5416 // iterate over all directory directory entries
5417 lfs1_entry_t entry;
5418 while (!lfs_pair_isnull(cwd.d.tail)) {
5419 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
5420 if (err) {
5421 return err;
5422 }
5423
5424 while (true) {
5425 err = lfs1_dir_next(lfs, &cwd, &entry);
5426 if (err && err != LFS_ERR_NOENT) {
5427 return err;
5428 }
5429
5430 if (err == LFS_ERR_NOENT) {
5431 break;
5432 }
5433
5434 if (!(0x80 & entry.d.type) &&
5435 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
5436 return true;
5437 }
5438 }
5439 }
5440
5441 return false;
5442 }
5443
5444 /// Filesystem operations ///
5445 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
5446 const struct lfs_config *cfg) {
5447 int err = 0;
5448 {
5449 err = lfs_init(lfs, cfg);
5450 if (err) {
5451 return err;
5452 }
5453
5454 lfs->lfs1 = lfs1;
5455 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5456 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5457
5458 // setup free lookahead
5459 lfs->free.off = 0;
5460 lfs->free.size = 0;
5461 lfs->free.i = 0;
5462 lfs_alloc_ack(lfs);
5463
5464 // load superblock
5465 lfs1_dir_t dir;
5466 lfs1_superblock_t superblock;
5467 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5468 if (err && err != LFS_ERR_CORRUPT) {
5469 goto cleanup;
5470 }
5471
5472 if (!err) {
5473 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5474 &superblock.d, sizeof(superblock.d));
5475 lfs1_superblock_fromle32(&superblock.d);
5476 if (err) {
5477 goto cleanup;
5478 }
5479
5480 lfs->lfs1->root[0] = superblock.d.root[0];
5481 lfs->lfs1->root[1] = superblock.d.root[1];
5482 }
5483
5484 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5485 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5486 0, 1);
5487 err = LFS_ERR_CORRUPT;
5488 goto cleanup;
5489 }
5490
5491 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5492 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
5493 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5494 minor_version > LFS1_DISK_VERSION_MINOR)) {
5495 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5496 err = LFS_ERR_INVAL;
5497 goto cleanup;
5498 }
5499
5500 return 0;
5501 }
5502
5503 cleanup:
5504 lfs_deinit(lfs);
5505 return err;
5506 }
5507
5508 static int lfs1_unmount(lfs_t *lfs) {
5509 return lfs_deinit(lfs);
5510 }
5511
5512 /// v1 migration ///
5513 static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
5514 struct lfs1 lfs1;
5515
5516 // Indeterminate filesystem size not allowed for migration.
5517 LFS_ASSERT(cfg->block_count != 0);
5518
5519 int err = lfs1_mount(lfs, &lfs1, cfg);
5520 if (err) {
5521 return err;
5522 }
5523
5524 {
5525 // iterate through each directory, copying over entries
5526 // into new directory
5527 lfs1_dir_t dir1;
5528 lfs_mdir_t dir2;
5529 dir1.d.tail[0] = lfs->lfs1->root[0];
5530 dir1.d.tail[1] = lfs->lfs1->root[1];
5531 while (!lfs_pair_isnull(dir1.d.tail)) {
5532 // iterate old dir
5533 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5534 if (err) {
5535 goto cleanup;
5536 }
5537
5538 // create new dir and bind as temporary pretend root
5539 err = lfs_dir_alloc(lfs, &dir2);
5540 if (err) {
5541 goto cleanup;
5542 }
5543
5544 dir2.rev = dir1.d.rev;
5545 dir1.head[0] = dir1.pair[0];
5546 dir1.head[1] = dir1.pair[1];
5547 lfs->root[0] = dir2.pair[0];
5548 lfs->root[1] = dir2.pair[1];
5549
5550 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5551 if (err) {
5552 goto cleanup;
5553 }
5554
5555 while (true) {
5556 lfs1_entry_t entry1;
5557 err = lfs1_dir_next(lfs, &dir1, &entry1);
5558 if (err && err != LFS_ERR_NOENT) {
5559 goto cleanup;
5560 }
5561
5562 if (err == LFS_ERR_NOENT) {
5563 break;
5564 }
5565
5566 // check that entry has not been moved
5567 if (entry1.d.type & 0x80) {
5568 int moved = lfs1_moved(lfs, &entry1.d.u);
5569 if (moved < 0) {
5570 err = moved;
5571 goto cleanup;
5572 }
5573
5574 if (moved) {
5575 continue;
5576 }
5577
5578 entry1.d.type &= ~0x80;
5579 }
5580
5581 // also fetch name
5582 char name[LFS_NAME_MAX+1];
5583 memset(name, 0, sizeof(name));
5584 err = lfs1_bd_read(lfs, dir1.pair[0],
5585 entry1.off + 4+entry1.d.elen+entry1.d.alen,
5586 name, entry1.d.nlen);
5587 if (err) {
5588 goto cleanup;
5589 }
5590
5591 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5592
5593 // create entry in new dir
5594 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5595 if (err) {
5596 goto cleanup;
5597 }
5598
5599 uint16_t id;
5600 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5601 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5602 err = (err < 0) ? err : LFS_ERR_EXIST;
5603 goto cleanup;
5604 }
5605
5606 lfs1_entry_tole32(&entry1.d);
5607 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5608 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5609 {LFS_MKTAG_IF_ELSE(isdir,
5610 LFS_TYPE_DIR, id, entry1.d.nlen,
5611 LFS_TYPE_REG, id, entry1.d.nlen),
5612 name},
5613 {LFS_MKTAG_IF_ELSE(isdir,
5614 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5615 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5616 &entry1.d.u}));
5617 lfs1_entry_fromle32(&entry1.d);
5618 if (err) {
5619 goto cleanup;
5620 }
5621 }
5622
5623 if (!lfs_pair_isnull(dir1.d.tail)) {
5624 // find last block and update tail to thread into fs
5625 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5626 if (err) {
5627 goto cleanup;
5628 }
5629
5630 while (dir2.split) {
5631 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5632 if (err) {
5633 goto cleanup;
5634 }
5635 }
5636
5637 lfs_pair_tole32(dir2.pair);
5638 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5639 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5640 lfs_pair_fromle32(dir2.pair);
5641 if (err) {
5642 goto cleanup;
5643 }
5644 }
5645
5646 // Copy over first block to thread into fs. Unfortunately
5647 // if this fails there is not much we can do.
5648 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5649 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5650 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5651
5652 err = lfs_bd_erase(lfs, dir1.head[1]);
5653 if (err) {
5654 goto cleanup;
5655 }
5656
5657 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5658 if (err) {
5659 goto cleanup;
5660 }
5661
5662 for (lfs_off_t i = 0; i < dir2.off; i++) {
5663 uint8_t dat;
5664 err = lfs_bd_read(lfs,
5665 NULL, &lfs->rcache, dir2.off,
5666 dir2.pair[0], i, &dat, 1);
5667 if (err) {
5668 goto cleanup;
5669 }
5670
5671 err = lfs_bd_prog(lfs,
5672 &lfs->pcache, &lfs->rcache, true,
5673 dir1.head[1], i, &dat, 1);
5674 if (err) {
5675 goto cleanup;
5676 }
5677 }
5678
5679 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5680 if (err) {
5681 goto cleanup;
5682 }
5683 }
5684
5685 // Create new superblock. This marks a successful migration!
5686 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5687 if (err) {
5688 goto cleanup;
5689 }
5690
5691 dir2.pair[0] = dir1.pair[0];
5692 dir2.pair[1] = dir1.pair[1];
5693 dir2.rev = dir1.d.rev;
5694 dir2.off = sizeof(dir2.rev);
5695 dir2.etag = 0xffffffff;
5696 dir2.count = 0;
5697 dir2.tail[0] = lfs->lfs1->root[0];
5698 dir2.tail[1] = lfs->lfs1->root[1];
5699 dir2.erased = false;
5700 dir2.split = true;
5701
5702 lfs_superblock_t superblock = {
5703 .version = LFS_DISK_VERSION,
5704 .block_size = lfs->cfg->block_size,
5705 .block_count = lfs->cfg->block_count,
5706 .name_max = lfs->name_max,
5707 .file_max = lfs->file_max,
5708 .attr_max = lfs->attr_max,
5709 };
5710
5711 lfs_superblock_tole32(&superblock);
5712 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5713 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5714 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5715 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5716 &superblock}));
5717 if (err) {
5718 goto cleanup;
5719 }
5720
5721 // sanity check that fetch works
5722 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5723 if (err) {
5724 goto cleanup;
5725 }
5726
5727 // force compaction to prevent accidentally mounting v1
5728 dir2.erased = false;
5729 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5730 if (err) {
5731 goto cleanup;
5732 }
5733 }
5734
5735 cleanup:
5736 lfs1_unmount(lfs);
5737 return err;
5738 }
5739
5740 #endif
5741
5742
5743 /// Public API wrappers ///
5744
5745 // Here we can add tracing/thread safety easily
5746
5747 // Thread-safe wrappers if enabled
5748 #ifdef LFS_THREADSAFE
5749 #define LFS_LOCK(cfg) cfg->lock(cfg)
5750 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5751 #else
5752 #define LFS_LOCK(cfg) ((void)cfg, 0)
5753 #define LFS_UNLOCK(cfg) ((void)cfg)
5754 #endif
5755
5756 // Public API
5757 #ifndef LFS_READONLY
5758 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5759 int err = LFS_LOCK(cfg);
5760 if (err) {
5761 return err;
5762 }
5763 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5764 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5765 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5766 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5767 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5768 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5769 ".prog_buffer=%p, .lookahead_buffer=%p, "
5770 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5771 ".attr_max=%"PRIu32"})",
5772 (void*)lfs, (void*)cfg, cfg->context,
5773 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5774 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5775 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5776 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5777 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5778 cfg->name_max, cfg->file_max, cfg->attr_max);
5779
5780 err = lfs_rawformat(lfs, cfg);
5781
5782 LFS_TRACE("lfs_format -> %d", err);
5783 LFS_UNLOCK(cfg);
5784 return err;
5785 }
5786 #endif
5787
5788 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5789 int err = LFS_LOCK(cfg);
5790 if (err) {
5791 return err;
5792 }
5793 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5794 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5795 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5796 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5797 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5798 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5799 ".prog_buffer=%p, .lookahead_buffer=%p, "
5800 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5801 ".attr_max=%"PRIu32"})",
5802 (void*)lfs, (void*)cfg, cfg->context,
5803 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5804 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5805 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5806 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5807 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5808 cfg->name_max, cfg->file_max, cfg->attr_max);
5809
5810 err = lfs_rawmount(lfs, cfg);
5811
5812 LFS_TRACE("lfs_mount -> %d", err);
5813 LFS_UNLOCK(cfg);
5814 return err;
5815 }
5816
5817 int lfs_unmount(lfs_t *lfs) {
5818 int err = LFS_LOCK(lfs->cfg);
5819 if (err) {
5820 return err;
5821 }
5822 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
5823
5824 err = lfs_rawunmount(lfs);
5825
5826 LFS_TRACE("lfs_unmount -> %d", err);
5827 LFS_UNLOCK(lfs->cfg);
5828 return err;
5829 }
5830
5831 #ifndef LFS_READONLY
5832 int lfs_remove(lfs_t *lfs, const char *path) {
5833 int err = LFS_LOCK(lfs->cfg);
5834 if (err) {
5835 return err;
5836 }
5837 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
5838
5839 err = lfs_rawremove(lfs, path);
5840
5841 LFS_TRACE("lfs_remove -> %d", err);
5842 LFS_UNLOCK(lfs->cfg);
5843 return err;
5844 }
5845 #endif
5846
5847 #ifndef LFS_READONLY
5848 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5849 int err = LFS_LOCK(lfs->cfg);
5850 if (err) {
5851 return err;
5852 }
5853 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5854
5855 err = lfs_rawrename(lfs, oldpath, newpath);
5856
5857 LFS_TRACE("lfs_rename -> %d", err);
5858 LFS_UNLOCK(lfs->cfg);
5859 return err;
5860 }
5861 #endif
5862
5863 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5864 int err = LFS_LOCK(lfs->cfg);
5865 if (err) {
5866 return err;
5867 }
5868 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5869
5870 err = lfs_rawstat(lfs, path, info);
5871
5872 LFS_TRACE("lfs_stat -> %d", err);
5873 LFS_UNLOCK(lfs->cfg);
5874 return err;
5875 }
5876
5877 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5878 uint8_t type, void *buffer, lfs_size_t size) {
5879 int err = LFS_LOCK(lfs->cfg);
5880 if (err) {
5881 return err;
5882 }
5883 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5884 (void*)lfs, path, type, buffer, size);
5885
5886 lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
5887
5888 LFS_TRACE("lfs_getattr -> %"PRId32, res);
5889 LFS_UNLOCK(lfs->cfg);
5890 return res;
5891 }
5892
5893 #ifndef LFS_READONLY
5894 int lfs_setattr(lfs_t *lfs, const char *path,
5895 uint8_t type, const void *buffer, lfs_size_t size) {
5896 int err = LFS_LOCK(lfs->cfg);
5897 if (err) {
5898 return err;
5899 }
5900 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5901 (void*)lfs, path, type, buffer, size);
5902
5903 err = lfs_rawsetattr(lfs, path, type, buffer, size);
5904
5905 LFS_TRACE("lfs_setattr -> %d", err);
5906 LFS_UNLOCK(lfs->cfg);
5907 return err;
5908 }
5909 #endif
5910
5911 #ifndef LFS_READONLY
5912 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
5913 int err = LFS_LOCK(lfs->cfg);
5914 if (err) {
5915 return err;
5916 }
5917 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
5918
5919 err = lfs_rawremoveattr(lfs, path, type);
5920
5921 LFS_TRACE("lfs_removeattr -> %d", err);
5922 LFS_UNLOCK(lfs->cfg);
5923 return err;
5924 }
5925 #endif
5926
5927 #ifndef LFS_NO_MALLOC
5928 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
5929 int err = LFS_LOCK(lfs->cfg);
5930 if (err) {
5931 return err;
5932 }
5933 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
5934 (void*)lfs, (void*)file, path, flags);
5935 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5936
5937 err = lfs_file_rawopen(lfs, file, path, flags);
5938
5939 LFS_TRACE("lfs_file_open -> %d", err);
5940 LFS_UNLOCK(lfs->cfg);
5941 return err;
5942 }
5943 #endif
5944
5945 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
5946 const char *path, int flags,
5947 const struct lfs_file_config *cfg) {
5948 int err = LFS_LOCK(lfs->cfg);
5949 if (err) {
5950 return err;
5951 }
5952 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
5953 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
5954 (void*)lfs, (void*)file, path, flags,
5955 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
5956 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5957
5958 err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
5959
5960 LFS_TRACE("lfs_file_opencfg -> %d", err);
5961 LFS_UNLOCK(lfs->cfg);
5962 return err;
5963 }
5964
5965 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
5966 int err = LFS_LOCK(lfs->cfg);
5967 if (err) {
5968 return err;
5969 }
5970 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
5971 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5972
5973 err = lfs_file_rawclose(lfs, file);
5974
5975 LFS_TRACE("lfs_file_close -> %d", err);
5976 LFS_UNLOCK(lfs->cfg);
5977 return err;
5978 }
5979
5980 #ifndef LFS_READONLY
5981 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
5982 int err = LFS_LOCK(lfs->cfg);
5983 if (err) {
5984 return err;
5985 }
5986 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
5987 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5988
5989 err = lfs_file_rawsync(lfs, file);
5990
5991 LFS_TRACE("lfs_file_sync -> %d", err);
5992 LFS_UNLOCK(lfs->cfg);
5993 return err;
5994 }
5995 #endif
5996
5997 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
5998 void *buffer, lfs_size_t size) {
5999 int err = LFS_LOCK(lfs->cfg);
6000 if (err) {
6001 return err;
6002 }
6003 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
6004 (void*)lfs, (void*)file, buffer, size);
6005 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6006
6007 lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
6008
6009 LFS_TRACE("lfs_file_read -> %"PRId32, res);
6010 LFS_UNLOCK(lfs->cfg);
6011 return res;
6012 }
6013
6014 #ifndef LFS_READONLY
6015 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
6016 const void *buffer, lfs_size_t size) {
6017 int err = LFS_LOCK(lfs->cfg);
6018 if (err) {
6019 return err;
6020 }
6021 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
6022 (void*)lfs, (void*)file, buffer, size);
6023 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6024
6025 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
6026
6027 LFS_TRACE("lfs_file_write -> %"PRId32, res);
6028 LFS_UNLOCK(lfs->cfg);
6029 return res;
6030 }
6031 #endif
6032
6033 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
6034 lfs_soff_t off, int whence) {
6035 int err = LFS_LOCK(lfs->cfg);
6036 if (err) {
6037 return err;
6038 }
6039 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
6040 (void*)lfs, (void*)file, off, whence);
6041 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6042
6043 lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
6044
6045 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
6046 LFS_UNLOCK(lfs->cfg);
6047 return res;
6048 }
6049
6050 #ifndef LFS_READONLY
6051 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
6052 int err = LFS_LOCK(lfs->cfg);
6053 if (err) {
6054 return err;
6055 }
6056 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
6057 (void*)lfs, (void*)file, size);
6058 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6059
6060 err = lfs_file_rawtruncate(lfs, file, size);
6061
6062 LFS_TRACE("lfs_file_truncate -> %d", err);
6063 LFS_UNLOCK(lfs->cfg);
6064 return err;
6065 }
6066 #endif
6067
6068 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
6069 int err = LFS_LOCK(lfs->cfg);
6070 if (err) {
6071 return err;
6072 }
6073 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
6074 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6075
6076 lfs_soff_t res = lfs_file_rawtell(lfs, file);
6077
6078 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
6079 LFS_UNLOCK(lfs->cfg);
6080 return res;
6081 }
6082
6083 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
6084 int err = LFS_LOCK(lfs->cfg);
6085 if (err) {
6086 return err;
6087 }
6088 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
6089
6090 err = lfs_file_rawrewind(lfs, file);
6091
6092 LFS_TRACE("lfs_file_rewind -> %d", err);
6093 LFS_UNLOCK(lfs->cfg);
6094 return err;
6095 }
6096
6097 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
6098 int err = LFS_LOCK(lfs->cfg);
6099 if (err) {
6100 return err;
6101 }
6102 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
6103 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6104
6105 lfs_soff_t res = lfs_file_rawsize(lfs, file);
6106
6107 LFS_TRACE("lfs_file_size -> %"PRId32, res);
6108 LFS_UNLOCK(lfs->cfg);
6109 return res;
6110 }
6111
6112 #ifndef LFS_READONLY
6113 int lfs_mkdir(lfs_t *lfs, const char *path) {
6114 int err = LFS_LOCK(lfs->cfg);
6115 if (err) {
6116 return err;
6117 }
6118 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
6119
6120 err = lfs_rawmkdir(lfs, path);
6121
6122 LFS_TRACE("lfs_mkdir -> %d", err);
6123 LFS_UNLOCK(lfs->cfg);
6124 return err;
6125 }
6126 #endif
6127
6128 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
6129 int err = LFS_LOCK(lfs->cfg);
6130 if (err) {
6131 return err;
6132 }
6133 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
6134 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
6135
6136 err = lfs_dir_rawopen(lfs, dir, path);
6137
6138 LFS_TRACE("lfs_dir_open -> %d", err);
6139 LFS_UNLOCK(lfs->cfg);
6140 return err;
6141 }
6142
6143 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
6144 int err = LFS_LOCK(lfs->cfg);
6145 if (err) {
6146 return err;
6147 }
6148 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
6149
6150 err = lfs_dir_rawclose(lfs, dir);
6151
6152 LFS_TRACE("lfs_dir_close -> %d", err);
6153 LFS_UNLOCK(lfs->cfg);
6154 return err;
6155 }
6156
6157 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
6158 int err = LFS_LOCK(lfs->cfg);
6159 if (err) {
6160 return err;
6161 }
6162 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
6163 (void*)lfs, (void*)dir, (void*)info);
6164
6165 err = lfs_dir_rawread(lfs, dir, info);
6166
6167 LFS_TRACE("lfs_dir_read -> %d", err);
6168 LFS_UNLOCK(lfs->cfg);
6169 return err;
6170 }
6171
6172 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
6173 int err = LFS_LOCK(lfs->cfg);
6174 if (err) {
6175 return err;
6176 }
6177 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
6178 (void*)lfs, (void*)dir, off);
6179
6180 err = lfs_dir_rawseek(lfs, dir, off);
6181
6182 LFS_TRACE("lfs_dir_seek -> %d", err);
6183 LFS_UNLOCK(lfs->cfg);
6184 return err;
6185 }
6186
6187 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
6188 int err = LFS_LOCK(lfs->cfg);
6189 if (err) {
6190 return err;
6191 }
6192 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
6193
6194 lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
6195
6196 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
6197 LFS_UNLOCK(lfs->cfg);
6198 return res;
6199 }
6200
6201 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
6202 int err = LFS_LOCK(lfs->cfg);
6203 if (err) {
6204 return err;
6205 }
6206 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
6207
6208 err = lfs_dir_rawrewind(lfs, dir);
6209
6210 LFS_TRACE("lfs_dir_rewind -> %d", err);
6211 LFS_UNLOCK(lfs->cfg);
6212 return err;
6213 }
6214
6215 int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
6216 int err = LFS_LOCK(lfs->cfg);
6217 if (err) {
6218 return err;
6219 }
6220 LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo);
6221
6222 err = lfs_fs_rawstat(lfs, fsinfo);
6223
6224 LFS_TRACE("lfs_fs_stat -> %d", err);
6225 LFS_UNLOCK(lfs->cfg);
6226 return err;
6227 }
6228
6229 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
6230 int err = LFS_LOCK(lfs->cfg);
6231 if (err) {
6232 return err;
6233 }
6234 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
6235
6236 lfs_ssize_t res = lfs_fs_rawsize(lfs);
6237
6238 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
6239 LFS_UNLOCK(lfs->cfg);
6240 return res;
6241 }
6242
6243 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
6244 int err = LFS_LOCK(lfs->cfg);
6245 if (err) {
6246 return err;
6247 }
6248 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
6249 (void*)lfs, (void*)(uintptr_t)cb, data);
6250
6251 err = lfs_fs_rawtraverse(lfs, cb, data, true);
6252
6253 LFS_TRACE("lfs_fs_traverse -> %d", err);
6254 LFS_UNLOCK(lfs->cfg);
6255 return err;
6256 }
6257
6258 #ifndef LFS_READONLY
6259 int lfs_fs_gc(lfs_t *lfs) {
6260 int err = LFS_LOCK(lfs->cfg);
6261 if (err) {
6262 return err;
6263 }
6264 LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
6265
6266 err = lfs_fs_rawgc(lfs);
6267
6268 LFS_TRACE("lfs_fs_gc -> %d", err);
6269 LFS_UNLOCK(lfs->cfg);
6270 return err;
6271 }
6272 #endif
6273
6274 #ifndef LFS_READONLY
6275 int lfs_fs_mkconsistent(lfs_t *lfs) {
6276 int err = LFS_LOCK(lfs->cfg);
6277 if (err) {
6278 return err;
6279 }
6280 LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
6281
6282 err = lfs_fs_rawmkconsistent(lfs);
6283
6284 LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
6285 LFS_UNLOCK(lfs->cfg);
6286 return err;
6287 }
6288 #endif
6289
6290 #ifndef LFS_READONLY
6291 int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
6292 int err = LFS_LOCK(lfs->cfg);
6293 if (err) {
6294 return err;
6295 }
6296 LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count);
6297
6298 err = lfs_fs_rawgrow(lfs, block_count);
6299
6300 LFS_TRACE("lfs_fs_grow -> %d", err);
6301 LFS_UNLOCK(lfs->cfg);
6302 return err;
6303 }
6304 #endif
6305
6306 #ifdef LFS_MIGRATE
6307 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
6308 int err = LFS_LOCK(cfg);
6309 if (err) {
6310 return err;
6311 }
6312 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
6313 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
6314 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
6315 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
6316 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
6317 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
6318 ".prog_buffer=%p, .lookahead_buffer=%p, "
6319 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
6320 ".attr_max=%"PRIu32"})",
6321 (void*)lfs, (void*)cfg, cfg->context,
6322 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
6323 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
6324 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
6325 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
6326 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
6327 cfg->name_max, cfg->file_max, cfg->attr_max);
6328
6329 err = lfs_rawmigrate(lfs, cfg);
6330
6331 LFS_TRACE("lfs_migrate -> %d", err);
6332 LFS_UNLOCK(cfg);
6333 return err;
6334 }
6335 #endif
6336
6337