1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2022, The littlefs authors.
5 * Copyright (c) 2017, Arm Limited. All rights reserved.
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8 #include "lfs.h"
9 #include "lfs_util.h"
10
11 #ifdef __ZEPHYR__
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(littlefs, CONFIG_FS_LOG_LEVEL);
14 #endif
15
16 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
17 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
18
19 enum {
20 LFS_OK_RELOCATED = 1,
21 LFS_OK_DROPPED = 2,
22 LFS_OK_ORPHANED = 3,
23 };
24
25 enum {
26 LFS_CMP_EQ = 0,
27 LFS_CMP_LT = 1,
28 LFS_CMP_GT = 2,
29 };
30
31
32 /// Caching block device operations ///
33
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)34 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
35 // do not zero, cheaper if cache is readonly or only going to be
36 // written with identical data (during relocates)
37 (void)lfs;
38 rcache->block = LFS_BLOCK_NULL;
39 }
40
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)41 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
42 // zero to avoid information leak
43 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
44 pcache->block = LFS_BLOCK_NULL;
45 }
46
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)47 static int lfs_bd_read(lfs_t *lfs,
48 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
49 lfs_block_t block, lfs_off_t off,
50 void *buffer, lfs_size_t size) {
51 uint8_t *data = buffer;
52 if (off+size > lfs->cfg->block_size
53 || (lfs->block_count && block >= lfs->block_count)) {
54 return LFS_ERR_CORRUPT;
55 }
56
57 while (size > 0) {
58 lfs_size_t diff = size;
59
60 if (pcache && block == pcache->block &&
61 off < pcache->off + pcache->size) {
62 if (off >= pcache->off) {
63 // is already in pcache?
64 diff = lfs_min(diff, pcache->size - (off-pcache->off));
65 memcpy(data, &pcache->buffer[off-pcache->off], diff);
66
67 data += diff;
68 off += diff;
69 size -= diff;
70 continue;
71 }
72
73 // pcache takes priority
74 diff = lfs_min(diff, pcache->off-off);
75 }
76
77 if (block == rcache->block &&
78 off < rcache->off + rcache->size) {
79 if (off >= rcache->off) {
80 // is already in rcache?
81 diff = lfs_min(diff, rcache->size - (off-rcache->off));
82 memcpy(data, &rcache->buffer[off-rcache->off], diff);
83
84 data += diff;
85 off += diff;
86 size -= diff;
87 continue;
88 }
89
90 // rcache takes priority
91 diff = lfs_min(diff, rcache->off-off);
92 }
93
94 if (size >= hint && off % lfs->cfg->read_size == 0 &&
95 size >= lfs->cfg->read_size) {
96 // bypass cache?
97 diff = lfs_aligndown(diff, lfs->cfg->read_size);
98 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
99 if (err) {
100 return err;
101 }
102
103 data += diff;
104 off += diff;
105 size -= diff;
106 continue;
107 }
108
109 // load to cache, first condition can no longer fail
110 LFS_ASSERT(!lfs->block_count || block < lfs->block_count);
111 rcache->block = block;
112 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
113 rcache->size = lfs_min(
114 lfs_min(
115 lfs_alignup(off+hint, lfs->cfg->read_size),
116 lfs->cfg->block_size)
117 - rcache->off,
118 lfs->cfg->cache_size);
119 int err = lfs->cfg->read(lfs->cfg, rcache->block,
120 rcache->off, rcache->buffer, rcache->size);
121 LFS_ASSERT(err <= 0);
122 if (err) {
123 return err;
124 }
125 }
126
127 return 0;
128 }
129
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)130 static int lfs_bd_cmp(lfs_t *lfs,
131 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
132 lfs_block_t block, lfs_off_t off,
133 const void *buffer, lfs_size_t size) {
134 const uint8_t *data = buffer;
135 lfs_size_t diff = 0;
136
137 for (lfs_off_t i = 0; i < size; i += diff) {
138 uint8_t dat[8];
139
140 diff = lfs_min(size-i, sizeof(dat));
141 int err = lfs_bd_read(lfs,
142 pcache, rcache, hint-i,
143 block, off+i, &dat, diff);
144 if (err) {
145 return err;
146 }
147
148 int res = memcmp(dat, data + i, diff);
149 if (res) {
150 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
151 }
152 }
153
154 return LFS_CMP_EQ;
155 }
156
lfs_bd_crc(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,lfs_size_t size,uint32_t * crc)157 static int lfs_bd_crc(lfs_t *lfs,
158 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
159 lfs_block_t block, lfs_off_t off, lfs_size_t size, uint32_t *crc) {
160 lfs_size_t diff = 0;
161
162 for (lfs_off_t i = 0; i < size; i += diff) {
163 uint8_t dat[8];
164 diff = lfs_min(size-i, sizeof(dat));
165 int err = lfs_bd_read(lfs,
166 pcache, rcache, hint-i,
167 block, off+i, &dat, diff);
168 if (err) {
169 return err;
170 }
171
172 *crc = lfs_crc(*crc, &dat, diff);
173 }
174
175 return 0;
176 }
177
178 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)179 static int lfs_bd_flush(lfs_t *lfs,
180 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
181 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
182 LFS_ASSERT(pcache->block < lfs->block_count);
183 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
184 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
185 pcache->off, pcache->buffer, diff);
186 LFS_ASSERT(err <= 0);
187 if (err) {
188 return err;
189 }
190
191 if (validate) {
192 // check data on disk
193 lfs_cache_drop(lfs, rcache);
194 int res = lfs_bd_cmp(lfs,
195 NULL, rcache, diff,
196 pcache->block, pcache->off, pcache->buffer, diff);
197 if (res < 0) {
198 return res;
199 }
200
201 if (res != LFS_CMP_EQ) {
202 return LFS_ERR_CORRUPT;
203 }
204 }
205
206 lfs_cache_zero(lfs, pcache);
207 }
208
209 return 0;
210 }
211 #endif
212
213 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)214 static int lfs_bd_sync(lfs_t *lfs,
215 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
216 lfs_cache_drop(lfs, rcache);
217
218 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
219 if (err) {
220 return err;
221 }
222
223 err = lfs->cfg->sync(lfs->cfg);
224 LFS_ASSERT(err <= 0);
225 return err;
226 }
227 #endif
228
229 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)230 static int lfs_bd_prog(lfs_t *lfs,
231 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
232 lfs_block_t block, lfs_off_t off,
233 const void *buffer, lfs_size_t size) {
234 const uint8_t *data = buffer;
235 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->block_count);
236 LFS_ASSERT(off + size <= lfs->cfg->block_size);
237
238 while (size > 0) {
239 if (block == pcache->block &&
240 off >= pcache->off &&
241 off < pcache->off + lfs->cfg->cache_size) {
242 // already fits in pcache?
243 lfs_size_t diff = lfs_min(size,
244 lfs->cfg->cache_size - (off-pcache->off));
245 memcpy(&pcache->buffer[off-pcache->off], data, diff);
246
247 data += diff;
248 off += diff;
249 size -= diff;
250
251 pcache->size = lfs_max(pcache->size, off - pcache->off);
252 if (pcache->size == lfs->cfg->cache_size) {
253 // eagerly flush out pcache if we fill up
254 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
255 if (err) {
256 return err;
257 }
258 }
259
260 continue;
261 }
262
263 // pcache must have been flushed, either by programming and
264 // entire block or manually flushing the pcache
265 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
266
267 // prepare pcache, first condition can no longer fail
268 pcache->block = block;
269 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
270 pcache->size = 0;
271 }
272
273 return 0;
274 }
275 #endif
276
277 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)278 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
279 LFS_ASSERT(block < lfs->block_count);
280 int err = lfs->cfg->erase(lfs->cfg, block);
281 LFS_ASSERT(err <= 0);
282 return err;
283 }
284 #endif
285
286
287 /// Small type-level utilities ///
288
289 // some operations on paths
lfs_path_namelen(const char * path)290 static inline lfs_size_t lfs_path_namelen(const char *path) {
291 return strcspn(path, "/");
292 }
293
lfs_path_islast(const char * path)294 static inline bool lfs_path_islast(const char *path) {
295 lfs_size_t namelen = lfs_path_namelen(path);
296 return path[namelen + strspn(path + namelen, "/")] == '\0';
297 }
298
lfs_path_isdir(const char * path)299 static inline bool lfs_path_isdir(const char *path) {
300 return path[lfs_path_namelen(path)] != '\0';
301 }
302
303 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])304 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
305 lfs_block_t t = pair[0];
306 pair[0] = pair[1];
307 pair[1] = t;
308 }
309
lfs_pair_isnull(const lfs_block_t pair[2])310 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
311 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
312 }
313
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])314 static inline int lfs_pair_cmp(
315 const lfs_block_t paira[2],
316 const lfs_block_t pairb[2]) {
317 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
318 paira[0] == pairb[1] || paira[1] == pairb[0]);
319 }
320
lfs_pair_issync(const lfs_block_t paira[2],const lfs_block_t pairb[2])321 static inline bool lfs_pair_issync(
322 const lfs_block_t paira[2],
323 const lfs_block_t pairb[2]) {
324 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
325 (paira[0] == pairb[1] && paira[1] == pairb[0]);
326 }
327
lfs_pair_fromle32(lfs_block_t pair[2])328 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
329 pair[0] = lfs_fromle32(pair[0]);
330 pair[1] = lfs_fromle32(pair[1]);
331 }
332
333 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])334 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
335 pair[0] = lfs_tole32(pair[0]);
336 pair[1] = lfs_tole32(pair[1]);
337 }
338 #endif
339
340 // operations on 32-bit entry tags
341 typedef uint32_t lfs_tag_t;
342 typedef int32_t lfs_stag_t;
343
344 #define LFS_MKTAG(type, id, size) \
345 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
346
347 #define LFS_MKTAG_IF(cond, type, id, size) \
348 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
349
350 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
351 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
352
lfs_tag_isvalid(lfs_tag_t tag)353 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
354 return !(tag & 0x80000000);
355 }
356
lfs_tag_isdelete(lfs_tag_t tag)357 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
358 return ((int32_t)(tag << 22) >> 22) == -1;
359 }
360
lfs_tag_type1(lfs_tag_t tag)361 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
362 return (tag & 0x70000000) >> 20;
363 }
364
lfs_tag_type2(lfs_tag_t tag)365 static inline uint16_t lfs_tag_type2(lfs_tag_t tag) {
366 return (tag & 0x78000000) >> 20;
367 }
368
lfs_tag_type3(lfs_tag_t tag)369 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
370 return (tag & 0x7ff00000) >> 20;
371 }
372
lfs_tag_chunk(lfs_tag_t tag)373 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
374 return (tag & 0x0ff00000) >> 20;
375 }
376
lfs_tag_splice(lfs_tag_t tag)377 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
378 return (int8_t)lfs_tag_chunk(tag);
379 }
380
lfs_tag_id(lfs_tag_t tag)381 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
382 return (tag & 0x000ffc00) >> 10;
383 }
384
lfs_tag_size(lfs_tag_t tag)385 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
386 return tag & 0x000003ff;
387 }
388
lfs_tag_dsize(lfs_tag_t tag)389 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
390 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
391 }
392
393 // operations on attributes in attribute lists
394 struct lfs_mattr {
395 lfs_tag_t tag;
396 const void *buffer;
397 };
398
399 struct lfs_diskoff {
400 lfs_block_t block;
401 lfs_off_t off;
402 };
403
404 #define LFS_MKATTRS(...) \
405 (struct lfs_mattr[]){__VA_ARGS__}, \
406 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
407
408 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)409 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
410 for (int i = 0; i < 3; i++) {
411 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
412 }
413 }
414
lfs_gstate_iszero(const lfs_gstate_t * a)415 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
416 for (int i = 0; i < 3; i++) {
417 if (((uint32_t*)a)[i] != 0) {
418 return false;
419 }
420 }
421 return true;
422 }
423
424 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)425 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
426 return lfs_tag_size(a->tag);
427 }
428
lfs_gstate_getorphans(const lfs_gstate_t * a)429 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
430 return lfs_tag_size(a->tag) & 0x1ff;
431 }
432
lfs_gstate_hasmove(const lfs_gstate_t * a)433 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
434 return lfs_tag_type1(a->tag);
435 }
436 #endif
437
lfs_gstate_needssuperblock(const lfs_gstate_t * a)438 static inline bool lfs_gstate_needssuperblock(const lfs_gstate_t *a) {
439 return lfs_tag_size(a->tag) >> 9;
440 }
441
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)442 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
443 const lfs_block_t *pair) {
444 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
445 }
446
lfs_gstate_fromle32(lfs_gstate_t * a)447 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
448 a->tag = lfs_fromle32(a->tag);
449 a->pair[0] = lfs_fromle32(a->pair[0]);
450 a->pair[1] = lfs_fromle32(a->pair[1]);
451 }
452
453 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)454 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
455 a->tag = lfs_tole32(a->tag);
456 a->pair[0] = lfs_tole32(a->pair[0]);
457 a->pair[1] = lfs_tole32(a->pair[1]);
458 }
459 #endif
460
461 // operations on forward-CRCs used to track erased state
462 struct lfs_fcrc {
463 lfs_size_t size;
464 uint32_t crc;
465 };
466
lfs_fcrc_fromle32(struct lfs_fcrc * fcrc)467 static void lfs_fcrc_fromle32(struct lfs_fcrc *fcrc) {
468 fcrc->size = lfs_fromle32(fcrc->size);
469 fcrc->crc = lfs_fromle32(fcrc->crc);
470 }
471
472 #ifndef LFS_READONLY
lfs_fcrc_tole32(struct lfs_fcrc * fcrc)473 static void lfs_fcrc_tole32(struct lfs_fcrc *fcrc) {
474 fcrc->size = lfs_tole32(fcrc->size);
475 fcrc->crc = lfs_tole32(fcrc->crc);
476 }
477 #endif
478
479 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)480 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
481 ctz->head = lfs_fromle32(ctz->head);
482 ctz->size = lfs_fromle32(ctz->size);
483 }
484
485 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)486 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
487 ctz->head = lfs_tole32(ctz->head);
488 ctz->size = lfs_tole32(ctz->size);
489 }
490 #endif
491
lfs_superblock_fromle32(lfs_superblock_t * superblock)492 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
493 superblock->version = lfs_fromle32(superblock->version);
494 superblock->block_size = lfs_fromle32(superblock->block_size);
495 superblock->block_count = lfs_fromle32(superblock->block_count);
496 superblock->name_max = lfs_fromle32(superblock->name_max);
497 superblock->file_max = lfs_fromle32(superblock->file_max);
498 superblock->attr_max = lfs_fromle32(superblock->attr_max);
499 }
500
501 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)502 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
503 superblock->version = lfs_tole32(superblock->version);
504 superblock->block_size = lfs_tole32(superblock->block_size);
505 superblock->block_count = lfs_tole32(superblock->block_count);
506 superblock->name_max = lfs_tole32(superblock->name_max);
507 superblock->file_max = lfs_tole32(superblock->file_max);
508 superblock->attr_max = lfs_tole32(superblock->attr_max);
509 }
510 #endif
511
512 #ifndef LFS_NO_ASSERT
513 #if __ASSERT_ON
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)514 static bool lfs_mlist_isopen(struct lfs_mlist *head,
515 struct lfs_mlist *node) {
516 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
517 if (*p == (struct lfs_mlist*)node) {
518 return true;
519 }
520 }
521
522 return false;
523 }
524 #endif
525 #endif
526
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)527 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
528 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
529 if (*p == mlist) {
530 *p = (*p)->next;
531 break;
532 }
533 }
534 }
535
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)536 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
537 mlist->next = lfs->mlist;
538 lfs->mlist = mlist;
539 }
540
541 // some other filesystem operations
lfs_fs_disk_version(lfs_t * lfs)542 static uint32_t lfs_fs_disk_version(lfs_t *lfs) {
543 (void)lfs;
544 #ifdef LFS_MULTIVERSION
545 if (lfs->cfg->disk_version) {
546 return lfs->cfg->disk_version;
547 } else
548 #endif
549 {
550 return LFS_DISK_VERSION;
551 }
552 }
553
lfs_fs_disk_version_major(lfs_t * lfs)554 static uint16_t lfs_fs_disk_version_major(lfs_t *lfs) {
555 return 0xffff & (lfs_fs_disk_version(lfs) >> 16);
556
557 }
558
lfs_fs_disk_version_minor(lfs_t * lfs)559 static uint16_t lfs_fs_disk_version_minor(lfs_t *lfs) {
560 return 0xffff & (lfs_fs_disk_version(lfs) >> 0);
561 }
562
563
564 /// Internal operations predeclared here ///
565 #ifndef LFS_READONLY
566 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
567 const struct lfs_mattr *attrs, int attrcount);
568 static int lfs_dir_compact(lfs_t *lfs,
569 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
570 lfs_mdir_t *source, uint16_t begin, uint16_t end);
571 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
572 const void *buffer, lfs_size_t size);
573 static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
574 const void *buffer, lfs_size_t size);
575 static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file);
576 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
577 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
578
579 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
580 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
581 static void lfs_fs_prepmove(lfs_t *lfs,
582 uint16_t id, const lfs_block_t pair[2]);
583 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
584 lfs_mdir_t *pdir);
585 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
586 lfs_mdir_t *parent);
587 static int lfs_fs_forceconsistency(lfs_t *lfs);
588 #endif
589
590 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock);
591
592 #ifdef LFS_MIGRATE
593 static int lfs1_traverse(lfs_t *lfs,
594 int (*cb)(void*, lfs_block_t), void *data);
595 #endif
596
597 static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir);
598
599 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
600 void *buffer, lfs_size_t size);
601 static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
602 void *buffer, lfs_size_t size);
603 static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file);
604 static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file);
605
606 static lfs_ssize_t lfs_fs_size_(lfs_t *lfs);
607 static int lfs_fs_traverse_(lfs_t *lfs,
608 int (*cb)(void *data, lfs_block_t block), void *data,
609 bool includeorphans);
610
611 static int lfs_deinit(lfs_t *lfs);
612 static int lfs_unmount_(lfs_t *lfs);
613
614
615 /// Block allocator ///
616
617 // allocations should call this when all allocated blocks are committed to
618 // the filesystem
619 //
620 // after a checkpoint, the block allocator may realloc any untracked blocks
lfs_alloc_ckpoint(lfs_t * lfs)621 static void lfs_alloc_ckpoint(lfs_t *lfs) {
622 lfs->lookahead.ckpoint = lfs->block_count;
623 }
624
625 // drop the lookahead buffer, this is done during mounting and failed
626 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)627 static void lfs_alloc_drop(lfs_t *lfs) {
628 lfs->lookahead.size = 0;
629 lfs->lookahead.next = 0;
630 lfs_alloc_ckpoint(lfs);
631 }
632
633 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)634 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
635 lfs_t *lfs = (lfs_t*)p;
636 lfs_block_t off = ((block - lfs->lookahead.start)
637 + lfs->block_count) % lfs->block_count;
638
639 if (off < lfs->lookahead.size) {
640 lfs->lookahead.buffer[off / 8] |= 1U << (off % 8);
641 }
642
643 return 0;
644 }
645 #endif
646
647 #ifndef LFS_READONLY
lfs_alloc_scan(lfs_t * lfs)648 static int lfs_alloc_scan(lfs_t *lfs) {
649 // move lookahead buffer to the first unused block
650 //
651 // note we limit the lookahead buffer to at most the amount of blocks
652 // checkpointed, this prevents the math in lfs_alloc from underflowing
653 lfs->lookahead.start = (lfs->lookahead.start + lfs->lookahead.next)
654 % lfs->block_count;
655 lfs->lookahead.next = 0;
656 lfs->lookahead.size = lfs_min(
657 8*lfs->cfg->lookahead_size,
658 lfs->lookahead.ckpoint);
659
660 // find mask of free blocks from tree
661 memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
662 int err = lfs_fs_traverse_(lfs, lfs_alloc_lookahead, lfs, true);
663 if (err) {
664 lfs_alloc_drop(lfs);
665 return err;
666 }
667
668 return 0;
669 }
670 #endif
671
672 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)673 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
674 while (true) {
675 // scan our lookahead buffer for free blocks
676 while (lfs->lookahead.next < lfs->lookahead.size) {
677 if (!(lfs->lookahead.buffer[lfs->lookahead.next / 8]
678 & (1U << (lfs->lookahead.next % 8)))) {
679 // found a free block
680 *block = (lfs->lookahead.start + lfs->lookahead.next)
681 % lfs->block_count;
682
683 // eagerly find next free block to maximize how many blocks
684 // lfs_alloc_ckpoint makes available for scanning
685 while (true) {
686 lfs->lookahead.next += 1;
687 lfs->lookahead.ckpoint -= 1;
688
689 if (lfs->lookahead.next >= lfs->lookahead.size
690 || !(lfs->lookahead.buffer[lfs->lookahead.next / 8]
691 & (1U << (lfs->lookahead.next % 8)))) {
692 return 0;
693 }
694 }
695 }
696
697 lfs->lookahead.next += 1;
698 lfs->lookahead.ckpoint -= 1;
699 }
700
701 // In order to keep our block allocator from spinning forever when our
702 // filesystem is full, we mark points where there are no in-flight
703 // allocations with a checkpoint before starting a set of allocations.
704 //
705 // If we've looked at all blocks since the last checkpoint, we report
706 // the filesystem as out of storage.
707 //
708 if (lfs->lookahead.ckpoint <= 0) {
709 LFS_ERROR("No more free space 0x%"PRIx32,
710 (lfs->lookahead.start + lfs->lookahead.next)
711 % lfs->block_count);
712 return LFS_ERR_NOSPC;
713 }
714
715 // No blocks in our lookahead buffer, we need to scan the filesystem for
716 // unused blocks in the next lookahead window.
717 int err = lfs_alloc_scan(lfs);
718 if(err) {
719 return err;
720 }
721 }
722 }
723 #endif
724
725 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)726 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
727 lfs_tag_t gmask, lfs_tag_t gtag,
728 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
729 lfs_off_t off = dir->off;
730 lfs_tag_t ntag = dir->etag;
731 lfs_stag_t gdiff = 0;
732
733 // synthetic moves
734 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
735 lfs_tag_id(gmask) != 0) {
736 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(gtag)) {
737 return LFS_ERR_NOENT;
738 } else if (lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(gtag)) {
739 gdiff -= LFS_MKTAG(0, 1, 0);
740 }
741 }
742
743 // iterate over dir block backwards (for faster lookups)
744 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
745 off -= lfs_tag_dsize(ntag);
746 lfs_tag_t tag = ntag;
747 int err = lfs_bd_read(lfs,
748 NULL, &lfs->rcache, sizeof(ntag),
749 dir->pair[0], off, &ntag, sizeof(ntag));
750 if (err) {
751 return err;
752 }
753
754 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
755
756 if (lfs_tag_id(gmask) != 0 &&
757 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
758 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
759 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
760 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
761 // found where we were created
762 return LFS_ERR_NOENT;
763 }
764
765 // move around splices
766 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
767 }
768
769 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
770 if (lfs_tag_isdelete(tag)) {
771 return LFS_ERR_NOENT;
772 }
773
774 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
775 err = lfs_bd_read(lfs,
776 NULL, &lfs->rcache, diff,
777 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
778 if (err) {
779 return err;
780 }
781
782 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
783
784 return tag + gdiff;
785 }
786 }
787
788 return LFS_ERR_NOENT;
789 }
790
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)791 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
792 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
793 return lfs_dir_getslice(lfs, dir,
794 gmask, gtag,
795 0, buffer, lfs_tag_size(gtag));
796 }
797
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)798 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
799 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
800 lfs_tag_t gmask, lfs_tag_t gtag,
801 lfs_off_t off, void *buffer, lfs_size_t size) {
802 uint8_t *data = buffer;
803 if (off+size > lfs->cfg->block_size) {
804 return LFS_ERR_CORRUPT;
805 }
806
807 while (size > 0) {
808 lfs_size_t diff = size;
809
810 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
811 off < pcache->off + pcache->size) {
812 if (off >= pcache->off) {
813 // is already in pcache?
814 diff = lfs_min(diff, pcache->size - (off-pcache->off));
815 memcpy(data, &pcache->buffer[off-pcache->off], diff);
816
817 data += diff;
818 off += diff;
819 size -= diff;
820 continue;
821 }
822
823 // pcache takes priority
824 diff = lfs_min(diff, pcache->off-off);
825 }
826
827 if (rcache->block == LFS_BLOCK_INLINE &&
828 off < rcache->off + rcache->size) {
829 if (off >= rcache->off) {
830 // is already in rcache?
831 diff = lfs_min(diff, rcache->size - (off-rcache->off));
832 memcpy(data, &rcache->buffer[off-rcache->off], diff);
833
834 data += diff;
835 off += diff;
836 size -= diff;
837 continue;
838 }
839
840 // rcache takes priority
841 diff = lfs_min(diff, rcache->off-off);
842 }
843
844 // load to cache, first condition can no longer fail
845 rcache->block = LFS_BLOCK_INLINE;
846 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
847 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
848 lfs->cfg->cache_size);
849 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
850 rcache->off, rcache->buffer, rcache->size);
851 if (err < 0) {
852 return err;
853 }
854 }
855
856 return 0;
857 }
858
859 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)860 static int lfs_dir_traverse_filter(void *p,
861 lfs_tag_t tag, const void *buffer) {
862 lfs_tag_t *filtertag = p;
863 (void)buffer;
864
865 // which mask depends on unique bit in tag structure
866 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
867 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
868 : LFS_MKTAG(0x700, 0x3ff, 0);
869
870 // check for redundancy
871 if ((mask & tag) == (mask & *filtertag) ||
872 lfs_tag_isdelete(*filtertag) ||
873 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
874 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
875 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
876 *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
877 return true;
878 }
879
880 // check if we need to adjust for created/deleted tags
881 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
882 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
883 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
884 }
885
886 return false;
887 }
888 #endif
889
890 #ifndef LFS_READONLY
891 // maximum recursive depth of lfs_dir_traverse, the deepest call:
892 //
893 // traverse with commit
894 // '-> traverse with move
895 // '-> traverse with filter
896 //
897 #define LFS_DIR_TRAVERSE_DEPTH 3
898
899 struct lfs_dir_traverse {
900 const lfs_mdir_t *dir;
901 lfs_off_t off;
902 lfs_tag_t ptag;
903 const struct lfs_mattr *attrs;
904 int attrcount;
905
906 lfs_tag_t tmask;
907 lfs_tag_t ttag;
908 uint16_t begin;
909 uint16_t end;
910 int16_t diff;
911
912 int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
913 void *data;
914
915 lfs_tag_t tag;
916 const void *buffer;
917 struct lfs_diskoff disk;
918 };
919
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)920 static int lfs_dir_traverse(lfs_t *lfs,
921 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
922 const struct lfs_mattr *attrs, int attrcount,
923 lfs_tag_t tmask, lfs_tag_t ttag,
924 uint16_t begin, uint16_t end, int16_t diff,
925 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
926 // This function in inherently recursive, but bounded. To allow tool-based
927 // analysis without unnecessary code-cost we use an explicit stack
928 struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
929 unsigned sp = 0;
930 int res;
931
932 // iterate over directory and attrs
933 lfs_tag_t tag;
934 const void *buffer;
935 struct lfs_diskoff disk = {0};
936 while (true) {
937 {
938 if (off+lfs_tag_dsize(ptag) < dir->off) {
939 off += lfs_tag_dsize(ptag);
940 int err = lfs_bd_read(lfs,
941 NULL, &lfs->rcache, sizeof(tag),
942 dir->pair[0], off, &tag, sizeof(tag));
943 if (err) {
944 return err;
945 }
946
947 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
948 disk.block = dir->pair[0];
949 disk.off = off+sizeof(lfs_tag_t);
950 buffer = &disk;
951 ptag = tag;
952 } else if (attrcount > 0) {
953 tag = attrs[0].tag;
954 buffer = attrs[0].buffer;
955 attrs += 1;
956 attrcount -= 1;
957 } else {
958 // finished traversal, pop from stack?
959 res = 0;
960 break;
961 }
962
963 // do we need to filter?
964 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
965 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
966 continue;
967 }
968
969 if (lfs_tag_id(tmask) != 0) {
970 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
971 // recurse, scan for duplicates, and update tag based on
972 // creates/deletes
973 stack[sp] = (struct lfs_dir_traverse){
974 .dir = dir,
975 .off = off,
976 .ptag = ptag,
977 .attrs = attrs,
978 .attrcount = attrcount,
979 .tmask = tmask,
980 .ttag = ttag,
981 .begin = begin,
982 .end = end,
983 .diff = diff,
984 .cb = cb,
985 .data = data,
986 .tag = tag,
987 .buffer = buffer,
988 .disk = disk,
989 };
990 sp += 1;
991
992 tmask = 0;
993 ttag = 0;
994 begin = 0;
995 end = 0;
996 diff = 0;
997 cb = lfs_dir_traverse_filter;
998 data = &stack[sp-1].tag;
999 continue;
1000 }
1001 }
1002
1003 popped:
1004 // in filter range?
1005 if (lfs_tag_id(tmask) != 0 &&
1006 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
1007 continue;
1008 }
1009
1010 // handle special cases for mcu-side operations
1011 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
1012 // do nothing
1013 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
1014 // Without this condition, lfs_dir_traverse can exhibit an
1015 // extremely expensive O(n^3) of nested loops when renaming.
1016 // This happens because lfs_dir_traverse tries to filter tags by
1017 // the tags in the source directory, triggering a second
1018 // lfs_dir_traverse with its own filter operation.
1019 //
1020 // traverse with commit
1021 // '-> traverse with filter
1022 // '-> traverse with move
1023 // '-> traverse with filter
1024 //
1025 // However we don't actually care about filtering the second set of
1026 // tags, since duplicate tags have no effect when filtering.
1027 //
1028 // This check skips this unnecessary recursive filtering explicitly,
1029 // reducing this runtime from O(n^3) to O(n^2).
1030 if (cb == lfs_dir_traverse_filter) {
1031 continue;
1032 }
1033
1034 // recurse into move
1035 stack[sp] = (struct lfs_dir_traverse){
1036 .dir = dir,
1037 .off = off,
1038 .ptag = ptag,
1039 .attrs = attrs,
1040 .attrcount = attrcount,
1041 .tmask = tmask,
1042 .ttag = ttag,
1043 .begin = begin,
1044 .end = end,
1045 .diff = diff,
1046 .cb = cb,
1047 .data = data,
1048 .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
1049 };
1050 sp += 1;
1051
1052 uint16_t fromid = lfs_tag_size(tag);
1053 uint16_t toid = lfs_tag_id(tag);
1054 dir = buffer;
1055 off = 0;
1056 ptag = 0xffffffff;
1057 attrs = NULL;
1058 attrcount = 0;
1059 tmask = LFS_MKTAG(0x600, 0x3ff, 0);
1060 ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
1061 begin = fromid;
1062 end = fromid+1;
1063 diff = toid-fromid+diff;
1064 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
1065 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
1066 const struct lfs_attr *a = buffer;
1067 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
1068 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
1069 if (res < 0) {
1070 return res;
1071 }
1072
1073 if (res) {
1074 break;
1075 }
1076 }
1077 } else {
1078 res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
1079 if (res < 0) {
1080 return res;
1081 }
1082
1083 if (res) {
1084 break;
1085 }
1086 }
1087 }
1088
1089 if (sp > 0) {
1090 // pop from the stack and return, fortunately all pops share
1091 // a destination
1092 dir = stack[sp-1].dir;
1093 off = stack[sp-1].off;
1094 ptag = stack[sp-1].ptag;
1095 attrs = stack[sp-1].attrs;
1096 attrcount = stack[sp-1].attrcount;
1097 tmask = stack[sp-1].tmask;
1098 ttag = stack[sp-1].ttag;
1099 begin = stack[sp-1].begin;
1100 end = stack[sp-1].end;
1101 diff = stack[sp-1].diff;
1102 cb = stack[sp-1].cb;
1103 data = stack[sp-1].data;
1104 tag = stack[sp-1].tag;
1105 buffer = stack[sp-1].buffer;
1106 disk = stack[sp-1].disk;
1107 sp -= 1;
1108 goto popped;
1109 } else {
1110 return res;
1111 }
1112 }
1113 #endif
1114
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)1115 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
1116 lfs_mdir_t *dir, const lfs_block_t pair[2],
1117 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
1118 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1119 // we can find tag very efficiently during a fetch, since we're already
1120 // scanning the entire directory
1121 lfs_stag_t besttag = -1;
1122
1123 // if either block address is invalid we return LFS_ERR_CORRUPT here,
1124 // otherwise later writes to the pair could fail
1125 if (lfs->block_count
1126 && (pair[0] >= lfs->block_count || pair[1] >= lfs->block_count)) {
1127 return LFS_ERR_CORRUPT;
1128 }
1129
1130 // find the block with the most recent revision
1131 uint32_t revs[2] = {0, 0};
1132 int r = 0;
1133 for (int i = 0; i < 2; i++) {
1134 int err = lfs_bd_read(lfs,
1135 NULL, &lfs->rcache, sizeof(revs[i]),
1136 pair[i], 0, &revs[i], sizeof(revs[i]));
1137 revs[i] = lfs_fromle32(revs[i]);
1138 if (err && err != LFS_ERR_CORRUPT) {
1139 return err;
1140 }
1141
1142 if (err != LFS_ERR_CORRUPT &&
1143 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1144 r = i;
1145 }
1146 }
1147
1148 dir->pair[0] = pair[(r+0)%2];
1149 dir->pair[1] = pair[(r+1)%2];
1150 dir->rev = revs[(r+0)%2];
1151 dir->off = 0; // nonzero = found some commits
1152
1153 // now scan tags to fetch the actual dir and find possible match
1154 for (int i = 0; i < 2; i++) {
1155 lfs_off_t off = 0;
1156 lfs_tag_t ptag = 0xffffffff;
1157
1158 uint16_t tempcount = 0;
1159 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1160 bool tempsplit = false;
1161 lfs_stag_t tempbesttag = besttag;
1162
1163 // assume not erased until proven otherwise
1164 bool maybeerased = false;
1165 bool hasfcrc = false;
1166 struct lfs_fcrc fcrc;
1167
1168 dir->rev = lfs_tole32(dir->rev);
1169 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1170 dir->rev = lfs_fromle32(dir->rev);
1171
1172 while (true) {
1173 // extract next tag
1174 lfs_tag_t tag;
1175 off += lfs_tag_dsize(ptag);
1176 int err = lfs_bd_read(lfs,
1177 NULL, &lfs->rcache, lfs->cfg->block_size,
1178 dir->pair[0], off, &tag, sizeof(tag));
1179 if (err) {
1180 if (err == LFS_ERR_CORRUPT) {
1181 // can't continue?
1182 break;
1183 }
1184 return err;
1185 }
1186
1187 crc = lfs_crc(crc, &tag, sizeof(tag));
1188 tag = lfs_frombe32(tag) ^ ptag;
1189
1190 // next commit not yet programmed?
1191 if (!lfs_tag_isvalid(tag)) {
1192 // we only might be erased if the last tag was a crc
1193 maybeerased = (lfs_tag_type2(ptag) == LFS_TYPE_CCRC);
1194 break;
1195 // out of range?
1196 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1197 break;
1198 }
1199
1200 ptag = tag;
1201
1202 if (lfs_tag_type2(tag) == LFS_TYPE_CCRC) {
1203 // check the crc attr
1204 uint32_t dcrc;
1205 err = lfs_bd_read(lfs,
1206 NULL, &lfs->rcache, lfs->cfg->block_size,
1207 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1208 if (err) {
1209 if (err == LFS_ERR_CORRUPT) {
1210 break;
1211 }
1212 return err;
1213 }
1214 dcrc = lfs_fromle32(dcrc);
1215
1216 if (crc != dcrc) {
1217 break;
1218 }
1219
1220 // reset the next bit if we need to
1221 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1222
1223 // toss our crc into the filesystem seed for
1224 // pseudorandom numbers, note we use another crc here
1225 // as a collection function because it is sufficiently
1226 // random and convenient
1227 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1228
1229 // update with what's found so far
1230 besttag = tempbesttag;
1231 dir->off = off + lfs_tag_dsize(tag);
1232 dir->etag = ptag;
1233 dir->count = tempcount;
1234 dir->tail[0] = temptail[0];
1235 dir->tail[1] = temptail[1];
1236 dir->split = tempsplit;
1237
1238 // reset crc, hasfcrc
1239 crc = 0xffffffff;
1240 continue;
1241 }
1242
1243 // crc the entry first, hopefully leaving it in the cache
1244 err = lfs_bd_crc(lfs,
1245 NULL, &lfs->rcache, lfs->cfg->block_size,
1246 dir->pair[0], off+sizeof(tag),
1247 lfs_tag_dsize(tag)-sizeof(tag), &crc);
1248 if (err) {
1249 if (err == LFS_ERR_CORRUPT) {
1250 break;
1251 }
1252 return err;
1253 }
1254
1255 // directory modification tags?
1256 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1257 // increase count of files if necessary
1258 if (lfs_tag_id(tag) >= tempcount) {
1259 tempcount = lfs_tag_id(tag) + 1;
1260 }
1261 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1262 tempcount += lfs_tag_splice(tag);
1263
1264 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1265 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1266 tempbesttag |= 0x80000000;
1267 } else if (tempbesttag != -1 &&
1268 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1269 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1270 }
1271 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1272 tempsplit = (lfs_tag_chunk(tag) & 1);
1273
1274 err = lfs_bd_read(lfs,
1275 NULL, &lfs->rcache, lfs->cfg->block_size,
1276 dir->pair[0], off+sizeof(tag), &temptail, 8);
1277 if (err) {
1278 if (err == LFS_ERR_CORRUPT) {
1279 break;
1280 }
1281 return err;
1282 }
1283 lfs_pair_fromle32(temptail);
1284 } else if (lfs_tag_type3(tag) == LFS_TYPE_FCRC) {
1285 err = lfs_bd_read(lfs,
1286 NULL, &lfs->rcache, lfs->cfg->block_size,
1287 dir->pair[0], off+sizeof(tag),
1288 &fcrc, sizeof(fcrc));
1289 if (err) {
1290 if (err == LFS_ERR_CORRUPT) {
1291 break;
1292 }
1293 }
1294
1295 lfs_fcrc_fromle32(&fcrc);
1296 hasfcrc = true;
1297 }
1298
1299 // found a match for our fetcher?
1300 if ((fmask & tag) == (fmask & ftag)) {
1301 int res = cb(data, tag, &(struct lfs_diskoff){
1302 dir->pair[0], off+sizeof(tag)});
1303 if (res < 0) {
1304 if (res == LFS_ERR_CORRUPT) {
1305 break;
1306 }
1307 return res;
1308 }
1309
1310 if (res == LFS_CMP_EQ) {
1311 // found a match
1312 tempbesttag = tag;
1313 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1314 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1315 // found an identical tag, but contents didn't match
1316 // this must mean that our besttag has been overwritten
1317 tempbesttag = -1;
1318 } else if (res == LFS_CMP_GT &&
1319 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1320 // found a greater match, keep track to keep things sorted
1321 tempbesttag = tag | 0x80000000;
1322 }
1323 }
1324 }
1325
1326 // found no valid commits?
1327 if (dir->off == 0) {
1328 // try the other block?
1329 lfs_pair_swap(dir->pair);
1330 dir->rev = revs[(r+1)%2];
1331 continue;
1332 }
1333
1334 // did we end on a valid commit? we may have an erased block
1335 dir->erased = false;
1336 if (maybeerased && dir->off % lfs->cfg->prog_size == 0) {
1337 #ifdef LFS_MULTIVERSION
1338 // note versions < lfs2.1 did not have fcrc tags, if
1339 // we're < lfs2.1 treat missing fcrc as erased data
1340 //
1341 // we don't strictly need to do this, but otherwise writing
1342 // to lfs2.0 disks becomes very inefficient
1343 if (lfs_fs_disk_version(lfs) < 0x00020001) {
1344 dir->erased = true;
1345
1346 } else
1347 #endif
1348 if (hasfcrc) {
1349 // check for an fcrc matching the next prog's erased state, if
1350 // this failed most likely a previous prog was interrupted, we
1351 // need a new erase
1352 uint32_t fcrc_ = 0xffffffff;
1353 int err = lfs_bd_crc(lfs,
1354 NULL, &lfs->rcache, lfs->cfg->block_size,
1355 dir->pair[0], dir->off, fcrc.size, &fcrc_);
1356 if (err && err != LFS_ERR_CORRUPT) {
1357 return err;
1358 }
1359
1360 // found beginning of erased part?
1361 dir->erased = (fcrc_ == fcrc.crc);
1362 }
1363 }
1364
1365 // synthetic move
1366 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1367 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1368 besttag |= 0x80000000;
1369 } else if (besttag != -1 &&
1370 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1371 besttag -= LFS_MKTAG(0, 1, 0);
1372 }
1373 }
1374
1375 // found tag? or found best id?
1376 if (id) {
1377 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1378 }
1379
1380 if (lfs_tag_isvalid(besttag)) {
1381 return besttag;
1382 } else if (lfs_tag_id(besttag) < dir->count) {
1383 return LFS_ERR_NOENT;
1384 } else {
1385 return 0;
1386 }
1387 }
1388
1389 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1390 dir->pair[0], dir->pair[1]);
1391 return LFS_ERR_CORRUPT;
1392 }
1393
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1394 static int lfs_dir_fetch(lfs_t *lfs,
1395 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1396 // note, mask=-1, tag=-1 can never match a tag since this
1397 // pattern has the invalid bit set
1398 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1399 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1400 }
1401
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1402 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1403 lfs_gstate_t *gstate) {
1404 lfs_gstate_t temp;
1405 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1406 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1407 if (res < 0 && res != LFS_ERR_NOENT) {
1408 return res;
1409 }
1410
1411 if (res != LFS_ERR_NOENT) {
1412 // xor together to find resulting gstate
1413 lfs_gstate_fromle32(&temp);
1414 lfs_gstate_xor(gstate, &temp);
1415 }
1416
1417 return 0;
1418 }
1419
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1420 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1421 uint16_t id, struct lfs_info *info) {
1422 if (id == 0x3ff) {
1423 // special case for root
1424 strcpy(info->name, "/");
1425 info->type = LFS_TYPE_DIR;
1426 return 0;
1427 }
1428
1429 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1430 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1431 if (tag < 0) {
1432 return (int)tag;
1433 }
1434
1435 info->type = lfs_tag_type3(tag);
1436
1437 struct lfs_ctz ctz;
1438 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1439 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1440 if (tag < 0) {
1441 return (int)tag;
1442 }
1443 lfs_ctz_fromle32(&ctz);
1444
1445 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1446 info->size = ctz.size;
1447 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1448 info->size = lfs_tag_size(tag);
1449 }
1450
1451 return 0;
1452 }
1453
1454 struct lfs_dir_find_match {
1455 lfs_t *lfs;
1456 const void *name;
1457 lfs_size_t size;
1458 };
1459
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1460 static int lfs_dir_find_match(void *data,
1461 lfs_tag_t tag, const void *buffer) {
1462 struct lfs_dir_find_match *name = data;
1463 lfs_t *lfs = name->lfs;
1464 const struct lfs_diskoff *disk = buffer;
1465
1466 // compare with disk
1467 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1468 int res = lfs_bd_cmp(lfs,
1469 NULL, &lfs->rcache, diff,
1470 disk->block, disk->off, name->name, diff);
1471 if (res != LFS_CMP_EQ) {
1472 return res;
1473 }
1474
1475 // only equal if our size is still the same
1476 if (name->size != lfs_tag_size(tag)) {
1477 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1478 }
1479
1480 // found a match!
1481 return LFS_CMP_EQ;
1482 }
1483
1484 // lfs_dir_find tries to set path and id even if file is not found
1485 //
1486 // returns:
1487 // - 0 if file is found
1488 // - LFS_ERR_NOENT if file or parent is not found
1489 // - LFS_ERR_NOTDIR if parent is not a dir
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1490 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1491 const char **path, uint16_t *id) {
1492 // we reduce path to a single name if we can find it
1493 const char *name = *path;
1494
1495 // default to root dir
1496 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1497 dir->tail[0] = lfs->root[0];
1498 dir->tail[1] = lfs->root[1];
1499
1500 // empty paths are not allowed
1501 if (*name == '\0') {
1502 return LFS_ERR_INVAL;
1503 }
1504
1505 while (true) {
1506 nextname:
1507 // skip slashes if we're a directory
1508 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
1509 name += strspn(name, "/");
1510 }
1511 lfs_size_t namelen = strcspn(name, "/");
1512
1513 // skip '.'
1514 if (namelen == 1 && memcmp(name, ".", 1) == 0) {
1515 name += namelen;
1516 goto nextname;
1517 }
1518
1519 // error on unmatched '..', trying to go above root?
1520 if (namelen == 2 && memcmp(name, "..", 2) == 0) {
1521 return LFS_ERR_INVAL;
1522 }
1523
1524 // skip if matched by '..' in name
1525 const char *suffix = name + namelen;
1526 lfs_size_t sufflen;
1527 int depth = 1;
1528 while (true) {
1529 suffix += strspn(suffix, "/");
1530 sufflen = strcspn(suffix, "/");
1531 if (sufflen == 0) {
1532 break;
1533 }
1534
1535 if (sufflen == 1 && memcmp(suffix, ".", 1) == 0) {
1536 // noop
1537 } else if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1538 depth -= 1;
1539 if (depth == 0) {
1540 name = suffix + sufflen;
1541 goto nextname;
1542 }
1543 } else {
1544 depth += 1;
1545 }
1546
1547 suffix += sufflen;
1548 }
1549
1550 // found path
1551 if (*name == '\0') {
1552 return tag;
1553 }
1554
1555 // update what we've found so far
1556 *path = name;
1557
1558 // only continue if we're a directory
1559 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1560 return LFS_ERR_NOTDIR;
1561 }
1562
1563 // grab the entry data
1564 if (lfs_tag_id(tag) != 0x3ff) {
1565 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1566 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1567 if (res < 0) {
1568 return res;
1569 }
1570 lfs_pair_fromle32(dir->tail);
1571 }
1572
1573 // find entry matching name
1574 while (true) {
1575 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1576 LFS_MKTAG(0x780, 0, 0),
1577 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1578 id,
1579 lfs_dir_find_match, &(struct lfs_dir_find_match){
1580 lfs, name, namelen});
1581 if (tag < 0) {
1582 return tag;
1583 }
1584
1585 if (tag) {
1586 break;
1587 }
1588
1589 if (!dir->split) {
1590 return LFS_ERR_NOENT;
1591 }
1592 }
1593
1594 // to next name
1595 name += namelen;
1596 }
1597 }
1598
1599 // commit logic
1600 struct lfs_commit {
1601 lfs_block_t block;
1602 lfs_off_t off;
1603 lfs_tag_t ptag;
1604 uint32_t crc;
1605
1606 lfs_off_t begin;
1607 lfs_off_t end;
1608 };
1609
1610 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1611 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1612 const void *buffer, lfs_size_t size) {
1613 int err = lfs_bd_prog(lfs,
1614 &lfs->pcache, &lfs->rcache, false,
1615 commit->block, commit->off ,
1616 (const uint8_t*)buffer, size);
1617 if (err) {
1618 return err;
1619 }
1620
1621 commit->crc = lfs_crc(commit->crc, buffer, size);
1622 commit->off += size;
1623 return 0;
1624 }
1625 #endif
1626
1627 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1628 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1629 lfs_tag_t tag, const void *buffer) {
1630 // check if we fit
1631 lfs_size_t dsize = lfs_tag_dsize(tag);
1632 if (commit->off + dsize > commit->end) {
1633 return LFS_ERR_NOSPC;
1634 }
1635
1636 // write out tag
1637 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1638 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1639 if (err) {
1640 return err;
1641 }
1642
1643 if (!(tag & 0x80000000)) {
1644 // from memory
1645 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1646 if (err) {
1647 return err;
1648 }
1649 } else {
1650 // from disk
1651 const struct lfs_diskoff *disk = buffer;
1652 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1653 // rely on caching to make this efficient
1654 uint8_t dat;
1655 err = lfs_bd_read(lfs,
1656 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1657 disk->block, disk->off+i, &dat, 1);
1658 if (err) {
1659 return err;
1660 }
1661
1662 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1663 if (err) {
1664 return err;
1665 }
1666 }
1667 }
1668
1669 commit->ptag = tag & 0x7fffffff;
1670 return 0;
1671 }
1672 #endif
1673
1674 #ifndef LFS_READONLY
1675
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1676 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1677 // align to program units
1678 //
1679 // this gets a bit complex as we have two types of crcs:
1680 // - 5-word crc with fcrc to check following prog (middle of block)
1681 // - 2-word crc with no following prog (end of block)
1682 const lfs_off_t end = lfs_alignup(
1683 lfs_min(commit->off + 5*sizeof(uint32_t), lfs->cfg->block_size),
1684 lfs->cfg->prog_size);
1685
1686 lfs_off_t off1 = 0;
1687 uint32_t crc1 = 0;
1688
1689 // create crc tags to fill up remainder of commit, note that
1690 // padding is not crced, which lets fetches skip padding but
1691 // makes committing a bit more complicated
1692 while (commit->off < end) {
1693 lfs_off_t noff = (
1694 lfs_min(end - (commit->off+sizeof(lfs_tag_t)), 0x3fe)
1695 + (commit->off+sizeof(lfs_tag_t)));
1696 // too large for crc tag? need padding commits
1697 if (noff < end) {
1698 noff = lfs_min(noff, end - 5*sizeof(uint32_t));
1699 }
1700
1701 // space for fcrc?
1702 uint8_t eperturb = (uint8_t)-1;
1703 if (noff >= end && noff <= lfs->cfg->block_size - lfs->cfg->prog_size) {
1704 // first read the leading byte, this always contains a bit
1705 // we can perturb to avoid writes that don't change the fcrc
1706 int err = lfs_bd_read(lfs,
1707 NULL, &lfs->rcache, lfs->cfg->prog_size,
1708 commit->block, noff, &eperturb, 1);
1709 if (err && err != LFS_ERR_CORRUPT) {
1710 return err;
1711 }
1712
1713 #ifdef LFS_MULTIVERSION
1714 // unfortunately fcrcs break mdir fetching < lfs2.1, so only write
1715 // these if we're a >= lfs2.1 filesystem
1716 if (lfs_fs_disk_version(lfs) <= 0x00020000) {
1717 // don't write fcrc
1718 } else
1719 #endif
1720 {
1721 // find the expected fcrc, don't bother avoiding a reread
1722 // of the eperturb, it should still be in our cache
1723 struct lfs_fcrc fcrc = {
1724 .size = lfs->cfg->prog_size,
1725 .crc = 0xffffffff
1726 };
1727 err = lfs_bd_crc(lfs,
1728 NULL, &lfs->rcache, lfs->cfg->prog_size,
1729 commit->block, noff, fcrc.size, &fcrc.crc);
1730 if (err && err != LFS_ERR_CORRUPT) {
1731 return err;
1732 }
1733
1734 lfs_fcrc_tole32(&fcrc);
1735 err = lfs_dir_commitattr(lfs, commit,
1736 LFS_MKTAG(LFS_TYPE_FCRC, 0x3ff, sizeof(struct lfs_fcrc)),
1737 &fcrc);
1738 if (err) {
1739 return err;
1740 }
1741 }
1742 }
1743
1744 // build commit crc
1745 struct {
1746 lfs_tag_t tag;
1747 uint32_t crc;
1748 } ccrc;
1749 lfs_tag_t ntag = LFS_MKTAG(
1750 LFS_TYPE_CCRC + (((uint8_t)~eperturb) >> 7), 0x3ff,
1751 noff - (commit->off+sizeof(lfs_tag_t)));
1752 ccrc.tag = lfs_tobe32(ntag ^ commit->ptag);
1753 commit->crc = lfs_crc(commit->crc, &ccrc.tag, sizeof(lfs_tag_t));
1754 ccrc.crc = lfs_tole32(commit->crc);
1755
1756 int err = lfs_bd_prog(lfs,
1757 &lfs->pcache, &lfs->rcache, false,
1758 commit->block, commit->off, &ccrc, sizeof(ccrc));
1759 if (err) {
1760 return err;
1761 }
1762
1763 // keep track of non-padding checksum to verify
1764 if (off1 == 0) {
1765 off1 = commit->off + sizeof(lfs_tag_t);
1766 crc1 = commit->crc;
1767 }
1768
1769 commit->off = noff;
1770 // perturb valid bit?
1771 commit->ptag = ntag ^ ((0x80UL & ~eperturb) << 24);
1772 // reset crc for next commit
1773 commit->crc = 0xffffffff;
1774
1775 // manually flush here since we don't prog the padding, this confuses
1776 // the caching layer
1777 if (noff >= end || noff >= lfs->pcache.off + lfs->cfg->cache_size) {
1778 // flush buffers
1779 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1780 if (err) {
1781 return err;
1782 }
1783 }
1784 }
1785
1786 // successful commit, check checksums to make sure
1787 //
1788 // note that we don't need to check padding commits, worst
1789 // case if they are corrupted we would have had to compact anyways
1790 lfs_off_t off = commit->begin;
1791 uint32_t crc = 0xffffffff;
1792 int err = lfs_bd_crc(lfs,
1793 NULL, &lfs->rcache, off1+sizeof(uint32_t),
1794 commit->block, off, off1-off, &crc);
1795 if (err) {
1796 return err;
1797 }
1798
1799 // check non-padding commits against known crc
1800 if (crc != crc1) {
1801 return LFS_ERR_CORRUPT;
1802 }
1803
1804 // make sure to check crc in case we happen to pick
1805 // up an unrelated crc (frozen block?)
1806 err = lfs_bd_crc(lfs,
1807 NULL, &lfs->rcache, sizeof(uint32_t),
1808 commit->block, off1, sizeof(uint32_t), &crc);
1809 if (err) {
1810 return err;
1811 }
1812
1813 if (crc != 0) {
1814 return LFS_ERR_CORRUPT;
1815 }
1816
1817 return 0;
1818 }
1819 #endif
1820
1821 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1822 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1823 // allocate pair of dir blocks (backwards, so we write block 1 first)
1824 for (int i = 0; i < 2; i++) {
1825 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1826 if (err) {
1827 return err;
1828 }
1829 }
1830
1831 // zero for reproducibility in case initial block is unreadable
1832 dir->rev = 0;
1833
1834 // rather than clobbering one of the blocks we just pretend
1835 // the revision may be valid
1836 int err = lfs_bd_read(lfs,
1837 NULL, &lfs->rcache, sizeof(dir->rev),
1838 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1839 dir->rev = lfs_fromle32(dir->rev);
1840 if (err && err != LFS_ERR_CORRUPT) {
1841 return err;
1842 }
1843
1844 // to make sure we don't immediately evict, align the new revision count
1845 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1846 // is tweaked this way
1847 if (lfs->cfg->block_cycles > 0) {
1848 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1849 }
1850
1851 // set defaults
1852 dir->off = sizeof(dir->rev);
1853 dir->etag = 0xffffffff;
1854 dir->count = 0;
1855 dir->tail[0] = LFS_BLOCK_NULL;
1856 dir->tail[1] = LFS_BLOCK_NULL;
1857 dir->erased = false;
1858 dir->split = false;
1859
1860 // don't write out yet, let caller take care of that
1861 return 0;
1862 }
1863 #endif
1864
1865 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1866 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1867 // steal state
1868 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1869 if (err) {
1870 return err;
1871 }
1872
1873 // steal tail
1874 lfs_pair_tole32(tail->tail);
1875 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1876 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1877 lfs_pair_fromle32(tail->tail);
1878 if (err) {
1879 return err;
1880 }
1881
1882 return 0;
1883 }
1884 #endif
1885
1886 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1887 static int lfs_dir_split(lfs_t *lfs,
1888 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1889 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1890 // create tail metadata pair
1891 lfs_mdir_t tail;
1892 int err = lfs_dir_alloc(lfs, &tail);
1893 if (err) {
1894 return err;
1895 }
1896
1897 tail.split = dir->split;
1898 tail.tail[0] = dir->tail[0];
1899 tail.tail[1] = dir->tail[1];
1900
1901 // note we don't care about LFS_OK_RELOCATED
1902 int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1903 if (res < 0) {
1904 return res;
1905 }
1906
1907 dir->tail[0] = tail.pair[0];
1908 dir->tail[1] = tail.pair[1];
1909 dir->split = true;
1910
1911 // update root if needed
1912 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1913 lfs->root[0] = tail.pair[0];
1914 lfs->root[1] = tail.pair[1];
1915 }
1916
1917 return 0;
1918 }
1919 #endif
1920
1921 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1922 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1923 lfs_size_t *size = p;
1924 (void)buffer;
1925
1926 *size += lfs_tag_dsize(tag);
1927 return 0;
1928 }
1929 #endif
1930
1931 #ifndef LFS_READONLY
1932 struct lfs_dir_commit_commit {
1933 lfs_t *lfs;
1934 struct lfs_commit *commit;
1935 };
1936 #endif
1937
1938 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1939 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1940 struct lfs_dir_commit_commit *commit = p;
1941 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1942 }
1943 #endif
1944
1945 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1946 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1947 // If our revision count == n * block_cycles, we should force a relocation,
1948 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1949 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1950 // 1. block_cycles = 1, which would prevent relocations from terminating
1951 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1952 // one metadata block in the pair, effectively making this useless
1953 return (lfs->cfg->block_cycles > 0
1954 && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1955 }
1956 #endif
1957
1958 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1959 static int lfs_dir_compact(lfs_t *lfs,
1960 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1961 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1962 // save some state in case block is bad
1963 bool relocated = false;
1964 bool tired = lfs_dir_needsrelocation(lfs, dir);
1965
1966 // increment revision count
1967 dir->rev += 1;
1968
1969 // do not proactively relocate blocks during migrations, this
1970 // can cause a number of failure states such: clobbering the
1971 // v1 superblock if we relocate root, and invalidating directory
1972 // pointers if we relocate the head of a directory. On top of
1973 // this, relocations increase the overall complexity of
1974 // lfs_migration, which is already a delicate operation.
1975 #ifdef LFS_MIGRATE
1976 if (lfs->lfs1) {
1977 tired = false;
1978 }
1979 #endif
1980
1981 if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1982 // we're writing too much, time to relocate
1983 goto relocate;
1984 }
1985
1986 // begin loop to commit compaction to blocks until a compact sticks
1987 while (true) {
1988 {
1989 // setup commit state
1990 struct lfs_commit commit = {
1991 .block = dir->pair[1],
1992 .off = 0,
1993 .ptag = 0xffffffff,
1994 .crc = 0xffffffff,
1995
1996 .begin = 0,
1997 .end = (lfs->cfg->metadata_max ?
1998 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1999 };
2000
2001 // erase block to write to
2002 int err = lfs_bd_erase(lfs, dir->pair[1]);
2003 if (err) {
2004 if (err == LFS_ERR_CORRUPT) {
2005 goto relocate;
2006 }
2007 return err;
2008 }
2009
2010 // write out header
2011 dir->rev = lfs_tole32(dir->rev);
2012 err = lfs_dir_commitprog(lfs, &commit,
2013 &dir->rev, sizeof(dir->rev));
2014 dir->rev = lfs_fromle32(dir->rev);
2015 if (err) {
2016 if (err == LFS_ERR_CORRUPT) {
2017 goto relocate;
2018 }
2019 return err;
2020 }
2021
2022 // traverse the directory, this time writing out all unique tags
2023 err = lfs_dir_traverse(lfs,
2024 source, 0, 0xffffffff, attrs, attrcount,
2025 LFS_MKTAG(0x400, 0x3ff, 0),
2026 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
2027 begin, end, -begin,
2028 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2029 lfs, &commit});
2030 if (err) {
2031 if (err == LFS_ERR_CORRUPT) {
2032 goto relocate;
2033 }
2034 return err;
2035 }
2036
2037 // commit tail, which may be new after last size check
2038 if (!lfs_pair_isnull(dir->tail)) {
2039 lfs_pair_tole32(dir->tail);
2040 err = lfs_dir_commitattr(lfs, &commit,
2041 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2042 dir->tail);
2043 lfs_pair_fromle32(dir->tail);
2044 if (err) {
2045 if (err == LFS_ERR_CORRUPT) {
2046 goto relocate;
2047 }
2048 return err;
2049 }
2050 }
2051
2052 // bring over gstate?
2053 lfs_gstate_t delta = {0};
2054 if (!relocated) {
2055 lfs_gstate_xor(&delta, &lfs->gdisk);
2056 lfs_gstate_xor(&delta, &lfs->gstate);
2057 }
2058 lfs_gstate_xor(&delta, &lfs->gdelta);
2059 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2060
2061 err = lfs_dir_getgstate(lfs, dir, &delta);
2062 if (err) {
2063 return err;
2064 }
2065
2066 if (!lfs_gstate_iszero(&delta)) {
2067 lfs_gstate_tole32(&delta);
2068 err = lfs_dir_commitattr(lfs, &commit,
2069 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2070 sizeof(delta)), &delta);
2071 if (err) {
2072 if (err == LFS_ERR_CORRUPT) {
2073 goto relocate;
2074 }
2075 return err;
2076 }
2077 }
2078
2079 // complete commit with crc
2080 err = lfs_dir_commitcrc(lfs, &commit);
2081 if (err) {
2082 if (err == LFS_ERR_CORRUPT) {
2083 goto relocate;
2084 }
2085 return err;
2086 }
2087
2088 // successful compaction, swap dir pair to indicate most recent
2089 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2090 lfs_pair_swap(dir->pair);
2091 dir->count = end - begin;
2092 dir->off = commit.off;
2093 dir->etag = commit.ptag;
2094 // update gstate
2095 lfs->gdelta = (lfs_gstate_t){0};
2096 if (!relocated) {
2097 lfs->gdisk = lfs->gstate;
2098 }
2099 }
2100 break;
2101
2102 relocate:
2103 // commit was corrupted, drop caches and prepare to relocate block
2104 relocated = true;
2105 lfs_cache_drop(lfs, &lfs->pcache);
2106 if (!tired) {
2107 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
2108 }
2109
2110 // can't relocate superblock, filesystem is now frozen
2111 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2112 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
2113 dir->pair[1]);
2114 return LFS_ERR_NOSPC;
2115 }
2116
2117 // relocate half of pair
2118 int err = lfs_alloc(lfs, &dir->pair[1]);
2119 if (err && (err != LFS_ERR_NOSPC || !tired)) {
2120 return err;
2121 }
2122
2123 tired = false;
2124 continue;
2125 }
2126
2127 return relocated ? LFS_OK_RELOCATED : 0;
2128 }
2129 #endif
2130
2131 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)2132 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
2133 const struct lfs_mattr *attrs, int attrcount,
2134 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
2135 while (true) {
2136 // find size of first split, we do this by halving the split until
2137 // the metadata is guaranteed to fit
2138 //
2139 // Note that this isn't a true binary search, we never increase the
2140 // split size. This may result in poorly distributed metadata but isn't
2141 // worth the extra code size or performance hit to fix.
2142 lfs_size_t split = begin;
2143 while (end - split > 1) {
2144 lfs_size_t size = 0;
2145 int err = lfs_dir_traverse(lfs,
2146 source, 0, 0xffffffff, attrs, attrcount,
2147 LFS_MKTAG(0x400, 0x3ff, 0),
2148 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
2149 split, end, -split,
2150 lfs_dir_commit_size, &size);
2151 if (err) {
2152 return err;
2153 }
2154
2155 // space is complicated, we need room for:
2156 //
2157 // - tail: 4+2*4 = 12 bytes
2158 // - gstate: 4+3*4 = 16 bytes
2159 // - move delete: 4 = 4 bytes
2160 // - crc: 4+4 = 8 bytes
2161 // total = 40 bytes
2162 //
2163 // And we cap at half a block to avoid degenerate cases with
2164 // nearly-full metadata blocks.
2165 //
2166 lfs_size_t metadata_max = (lfs->cfg->metadata_max)
2167 ? lfs->cfg->metadata_max
2168 : lfs->cfg->block_size;
2169 if (end - split < 0xff
2170 && size <= lfs_min(
2171 metadata_max - 40,
2172 lfs_alignup(
2173 metadata_max/2,
2174 lfs->cfg->prog_size))) {
2175 break;
2176 }
2177
2178 split = split + ((end - split) / 2);
2179 }
2180
2181 if (split == begin) {
2182 // no split needed
2183 break;
2184 }
2185
2186 // split into two metadata pairs and continue
2187 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2188 source, split, end);
2189 if (err && err != LFS_ERR_NOSPC) {
2190 return err;
2191 }
2192
2193 if (err) {
2194 // we can't allocate a new block, try to compact with degraded
2195 // performance
2196 LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
2197 dir->pair[0], dir->pair[1]);
2198 break;
2199 } else {
2200 end = split;
2201 }
2202 }
2203
2204 if (lfs_dir_needsrelocation(lfs, dir)
2205 && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
2206 // oh no! we're writing too much to the superblock,
2207 // should we expand?
2208 lfs_ssize_t size = lfs_fs_size_(lfs);
2209 if (size < 0) {
2210 return size;
2211 }
2212
2213 // littlefs cannot reclaim expanded superblocks, so expand cautiously
2214 //
2215 // if our filesystem is more than ~88% full, don't expand, this is
2216 // somewhat arbitrary
2217 if (lfs->block_count - size > lfs->block_count/8) {
2218 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
2219 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
2220 source, begin, end);
2221 if (err && err != LFS_ERR_NOSPC) {
2222 return err;
2223 }
2224
2225 if (err) {
2226 // welp, we tried, if we ran out of space there's not much
2227 // we can do, we'll error later if we've become frozen
2228 LFS_WARN("Unable to expand superblock");
2229 } else {
2230 // duplicate the superblock entry into the new superblock
2231 end = 1;
2232 }
2233 }
2234 }
2235
2236 return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2237 }
2238 #endif
2239
2240 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2241 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2242 const lfs_block_t pair[2],
2243 const struct lfs_mattr *attrs, int attrcount,
2244 lfs_mdir_t *pdir) {
2245 int state = 0;
2246
2247 // calculate changes to the directory
2248 bool hasdelete = false;
2249 for (int i = 0; i < attrcount; i++) {
2250 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2251 dir->count += 1;
2252 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2253 LFS_ASSERT(dir->count > 0);
2254 dir->count -= 1;
2255 hasdelete = true;
2256 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2257 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2258 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2259 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2260 lfs_pair_fromle32(dir->tail);
2261 }
2262 }
2263
2264 // should we actually drop the directory block?
2265 if (hasdelete && dir->count == 0) {
2266 LFS_ASSERT(pdir);
2267 int err = lfs_fs_pred(lfs, dir->pair, pdir);
2268 if (err && err != LFS_ERR_NOENT) {
2269 return err;
2270 }
2271
2272 if (err != LFS_ERR_NOENT && pdir->split) {
2273 state = LFS_OK_DROPPED;
2274 goto fixmlist;
2275 }
2276 }
2277
2278 if (dir->erased) {
2279 // try to commit
2280 struct lfs_commit commit = {
2281 .block = dir->pair[0],
2282 .off = dir->off,
2283 .ptag = dir->etag,
2284 .crc = 0xffffffff,
2285
2286 .begin = dir->off,
2287 .end = (lfs->cfg->metadata_max ?
2288 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2289 };
2290
2291 // traverse attrs that need to be written out
2292 lfs_pair_tole32(dir->tail);
2293 int err = lfs_dir_traverse(lfs,
2294 dir, dir->off, dir->etag, attrs, attrcount,
2295 0, 0, 0, 0, 0,
2296 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2297 lfs, &commit});
2298 lfs_pair_fromle32(dir->tail);
2299 if (err) {
2300 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2301 goto compact;
2302 }
2303 return err;
2304 }
2305
2306 // commit any global diffs if we have any
2307 lfs_gstate_t delta = {0};
2308 lfs_gstate_xor(&delta, &lfs->gstate);
2309 lfs_gstate_xor(&delta, &lfs->gdisk);
2310 lfs_gstate_xor(&delta, &lfs->gdelta);
2311 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2312 if (!lfs_gstate_iszero(&delta)) {
2313 err = lfs_dir_getgstate(lfs, dir, &delta);
2314 if (err) {
2315 return err;
2316 }
2317
2318 lfs_gstate_tole32(&delta);
2319 err = lfs_dir_commitattr(lfs, &commit,
2320 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2321 sizeof(delta)), &delta);
2322 if (err) {
2323 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2324 goto compact;
2325 }
2326 return err;
2327 }
2328 }
2329
2330 // finalize commit with the crc
2331 err = lfs_dir_commitcrc(lfs, &commit);
2332 if (err) {
2333 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2334 goto compact;
2335 }
2336 return err;
2337 }
2338
2339 // successful commit, update dir
2340 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2341 dir->off = commit.off;
2342 dir->etag = commit.ptag;
2343 // and update gstate
2344 lfs->gdisk = lfs->gstate;
2345 lfs->gdelta = (lfs_gstate_t){0};
2346
2347 goto fixmlist;
2348 }
2349
2350 compact:
2351 // fall back to compaction
2352 lfs_cache_drop(lfs, &lfs->pcache);
2353
2354 state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2355 dir, 0, dir->count);
2356 if (state < 0) {
2357 return state;
2358 }
2359
2360 goto fixmlist;
2361
2362 fixmlist:;
2363 // this complicated bit of logic is for fixing up any active
2364 // metadata-pairs that we may have affected
2365 //
2366 // note we have to make two passes since the mdir passed to
2367 // lfs_dir_commit could also be in this list, and even then
2368 // we need to copy the pair so they don't get clobbered if we refetch
2369 // our mdir.
2370 lfs_block_t oldpair[2] = {pair[0], pair[1]};
2371 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2372 if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2373 d->m = *dir;
2374 if (d->m.pair != pair) {
2375 for (int i = 0; i < attrcount; i++) {
2376 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2377 d->id == lfs_tag_id(attrs[i].tag)) {
2378 d->m.pair[0] = LFS_BLOCK_NULL;
2379 d->m.pair[1] = LFS_BLOCK_NULL;
2380 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2381 d->id > lfs_tag_id(attrs[i].tag)) {
2382 d->id -= 1;
2383 if (d->type == LFS_TYPE_DIR) {
2384 ((lfs_dir_t*)d)->pos -= 1;
2385 }
2386 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2387 d->id >= lfs_tag_id(attrs[i].tag)) {
2388 d->id += 1;
2389 if (d->type == LFS_TYPE_DIR) {
2390 ((lfs_dir_t*)d)->pos += 1;
2391 }
2392 }
2393 }
2394 }
2395
2396 while (d->id >= d->m.count && d->m.split) {
2397 // we split and id is on tail now
2398 if (lfs_pair_cmp(d->m.tail, lfs->root) != 0) {
2399 d->id -= d->m.count;
2400 }
2401 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2402 if (err) {
2403 return err;
2404 }
2405 }
2406 }
2407 }
2408
2409 return state;
2410 }
2411 #endif
2412
2413 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2414 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2415 const struct lfs_mattr *attrs, int attrcount) {
2416 // check for any inline files that aren't RAM backed and
2417 // forcefully evict them, needed for filesystem consistency
2418 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2419 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2420 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2421 f->ctz.size > lfs->cfg->cache_size) {
2422 int err = lfs_file_outline(lfs, f);
2423 if (err) {
2424 return err;
2425 }
2426
2427 err = lfs_file_flush(lfs, f);
2428 if (err) {
2429 return err;
2430 }
2431 }
2432 }
2433
2434 lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2435 lfs_mdir_t ldir = *dir;
2436 lfs_mdir_t pdir;
2437 int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2438 attrs, attrcount, &pdir);
2439 if (state < 0) {
2440 return state;
2441 }
2442
2443 // update if we're not in mlist, note we may have already been
2444 // updated if we are in mlist
2445 if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2446 *dir = ldir;
2447 }
2448
2449 // commit was successful, but may require other changes in the
2450 // filesystem, these would normally be tail recursive, but we have
2451 // flattened them here avoid unbounded stack usage
2452
2453 // need to drop?
2454 if (state == LFS_OK_DROPPED) {
2455 // steal state
2456 int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2457 if (err) {
2458 return err;
2459 }
2460
2461 // steal tail, note that this can't create a recursive drop
2462 lpair[0] = pdir.pair[0];
2463 lpair[1] = pdir.pair[1];
2464 lfs_pair_tole32(dir->tail);
2465 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2466 {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2467 dir->tail}),
2468 NULL);
2469 lfs_pair_fromle32(dir->tail);
2470 if (state < 0) {
2471 return state;
2472 }
2473
2474 ldir = pdir;
2475 }
2476
2477 // need to relocate?
2478 bool orphans = false;
2479 while (state == LFS_OK_RELOCATED) {
2480 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2481 "-> {0x%"PRIx32", 0x%"PRIx32"}",
2482 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2483 state = 0;
2484
2485 // update internal root
2486 if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2487 lfs->root[0] = ldir.pair[0];
2488 lfs->root[1] = ldir.pair[1];
2489 }
2490
2491 // update internally tracked dirs
2492 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2493 if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2494 d->m.pair[0] = ldir.pair[0];
2495 d->m.pair[1] = ldir.pair[1];
2496 }
2497
2498 if (d->type == LFS_TYPE_DIR &&
2499 lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2500 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2501 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2502 }
2503 }
2504
2505 // find parent
2506 lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2507 if (tag < 0 && tag != LFS_ERR_NOENT) {
2508 return tag;
2509 }
2510
2511 bool hasparent = (tag != LFS_ERR_NOENT);
2512 if (tag != LFS_ERR_NOENT) {
2513 // note that if we have a parent, we must have a pred, so this will
2514 // always create an orphan
2515 int err = lfs_fs_preporphans(lfs, +1);
2516 if (err) {
2517 return err;
2518 }
2519
2520 // fix pending move in this pair? this looks like an optimization but
2521 // is in fact _required_ since relocating may outdate the move.
2522 uint16_t moveid = 0x3ff;
2523 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2524 moveid = lfs_tag_id(lfs->gstate.tag);
2525 LFS_DEBUG("Fixing move while relocating "
2526 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2527 pdir.pair[0], pdir.pair[1], moveid);
2528 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2529 if (moveid < lfs_tag_id(tag)) {
2530 tag -= LFS_MKTAG(0, 1, 0);
2531 }
2532 }
2533
2534 lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2535 lfs_pair_tole32(ldir.pair);
2536 state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2537 {LFS_MKTAG_IF(moveid != 0x3ff,
2538 LFS_TYPE_DELETE, moveid, 0), NULL},
2539 {tag, ldir.pair}),
2540 NULL);
2541 lfs_pair_fromle32(ldir.pair);
2542 if (state < 0) {
2543 return state;
2544 }
2545
2546 if (state == LFS_OK_RELOCATED) {
2547 lpair[0] = ppair[0];
2548 lpair[1] = ppair[1];
2549 ldir = pdir;
2550 orphans = true;
2551 continue;
2552 }
2553 }
2554
2555 // find pred
2556 int err = lfs_fs_pred(lfs, lpair, &pdir);
2557 if (err && err != LFS_ERR_NOENT) {
2558 return err;
2559 }
2560 LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2561
2562 // if we can't find dir, it must be new
2563 if (err != LFS_ERR_NOENT) {
2564 if (lfs_gstate_hasorphans(&lfs->gstate)) {
2565 // next step, clean up orphans
2566 err = lfs_fs_preporphans(lfs, -hasparent);
2567 if (err) {
2568 return err;
2569 }
2570 }
2571
2572 // fix pending move in this pair? this looks like an optimization
2573 // but is in fact _required_ since relocating may outdate the move.
2574 uint16_t moveid = 0x3ff;
2575 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2576 moveid = lfs_tag_id(lfs->gstate.tag);
2577 LFS_DEBUG("Fixing move while relocating "
2578 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2579 pdir.pair[0], pdir.pair[1], moveid);
2580 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2581 }
2582
2583 // replace bad pair, either we clean up desync, or no desync occured
2584 lpair[0] = pdir.pair[0];
2585 lpair[1] = pdir.pair[1];
2586 lfs_pair_tole32(ldir.pair);
2587 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2588 {LFS_MKTAG_IF(moveid != 0x3ff,
2589 LFS_TYPE_DELETE, moveid, 0), NULL},
2590 {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2591 ldir.pair}),
2592 NULL);
2593 lfs_pair_fromle32(ldir.pair);
2594 if (state < 0) {
2595 return state;
2596 }
2597
2598 ldir = pdir;
2599 }
2600 }
2601
2602 return orphans ? LFS_OK_ORPHANED : 0;
2603 }
2604 #endif
2605
2606 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2607 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2608 const struct lfs_mattr *attrs, int attrcount) {
2609 int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2610 if (orphans < 0) {
2611 return orphans;
2612 }
2613
2614 if (orphans) {
2615 // make sure we've removed all orphans, this is a noop if there
2616 // are none, but if we had nested blocks failures we may have
2617 // created some
2618 int err = lfs_fs_deorphan(lfs, false);
2619 if (err) {
2620 return err;
2621 }
2622 }
2623
2624 return 0;
2625 }
2626 #endif
2627
2628
2629 /// Top level directory operations ///
2630 #ifndef LFS_READONLY
lfs_mkdir_(lfs_t * lfs,const char * path)2631 static int lfs_mkdir_(lfs_t *lfs, const char *path) {
2632 // deorphan if we haven't yet, needed at most once after poweron
2633 int err = lfs_fs_forceconsistency(lfs);
2634 if (err) {
2635 return err;
2636 }
2637
2638 struct lfs_mlist cwd;
2639 cwd.next = lfs->mlist;
2640 uint16_t id;
2641 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2642 if (!(err == LFS_ERR_NOENT && lfs_path_islast(path))) {
2643 return (err < 0) ? err : LFS_ERR_EXIST;
2644 }
2645
2646 // check that name fits
2647 lfs_size_t nlen = lfs_path_namelen(path);
2648 if (nlen > lfs->name_max) {
2649 return LFS_ERR_NAMETOOLONG;
2650 }
2651
2652 // build up new directory
2653 lfs_alloc_ckpoint(lfs);
2654 lfs_mdir_t dir;
2655 err = lfs_dir_alloc(lfs, &dir);
2656 if (err) {
2657 return err;
2658 }
2659
2660 // find end of list
2661 lfs_mdir_t pred = cwd.m;
2662 while (pred.split) {
2663 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2664 if (err) {
2665 return err;
2666 }
2667 }
2668
2669 // setup dir
2670 lfs_pair_tole32(pred.tail);
2671 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2672 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2673 lfs_pair_fromle32(pred.tail);
2674 if (err) {
2675 return err;
2676 }
2677
2678 // current block not end of list?
2679 if (cwd.m.split) {
2680 // update tails, this creates a desync
2681 err = lfs_fs_preporphans(lfs, +1);
2682 if (err) {
2683 return err;
2684 }
2685
2686 // it's possible our predecessor has to be relocated, and if
2687 // our parent is our predecessor's predecessor, this could have
2688 // caused our parent to go out of date, fortunately we can hook
2689 // ourselves into littlefs to catch this
2690 cwd.type = 0;
2691 cwd.id = 0;
2692 lfs->mlist = &cwd;
2693
2694 lfs_pair_tole32(dir.pair);
2695 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2696 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2697 lfs_pair_fromle32(dir.pair);
2698 if (err) {
2699 lfs->mlist = cwd.next;
2700 return err;
2701 }
2702
2703 lfs->mlist = cwd.next;
2704 err = lfs_fs_preporphans(lfs, -1);
2705 if (err) {
2706 return err;
2707 }
2708 }
2709
2710 // now insert into our parent block
2711 lfs_pair_tole32(dir.pair);
2712 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2713 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2714 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2715 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2716 {LFS_MKTAG_IF(!cwd.m.split,
2717 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2718 lfs_pair_fromle32(dir.pair);
2719 if (err) {
2720 return err;
2721 }
2722
2723 return 0;
2724 }
2725 #endif
2726
lfs_dir_open_(lfs_t * lfs,lfs_dir_t * dir,const char * path)2727 static int lfs_dir_open_(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2728 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2729 if (tag < 0) {
2730 return tag;
2731 }
2732
2733 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2734 return LFS_ERR_NOTDIR;
2735 }
2736
2737 lfs_block_t pair[2];
2738 if (lfs_tag_id(tag) == 0x3ff) {
2739 // handle root dir separately
2740 pair[0] = lfs->root[0];
2741 pair[1] = lfs->root[1];
2742 } else {
2743 // get dir pair from parent
2744 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2745 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2746 if (res < 0) {
2747 return res;
2748 }
2749 lfs_pair_fromle32(pair);
2750 }
2751
2752 // fetch first pair
2753 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2754 if (err) {
2755 return err;
2756 }
2757
2758 // setup entry
2759 dir->head[0] = dir->m.pair[0];
2760 dir->head[1] = dir->m.pair[1];
2761 dir->id = 0;
2762 dir->pos = 0;
2763
2764 // add to list of mdirs
2765 dir->type = LFS_TYPE_DIR;
2766 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2767
2768 return 0;
2769 }
2770
lfs_dir_close_(lfs_t * lfs,lfs_dir_t * dir)2771 static int lfs_dir_close_(lfs_t *lfs, lfs_dir_t *dir) {
2772 // remove from list of mdirs
2773 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2774
2775 return 0;
2776 }
2777
lfs_dir_read_(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2778 static int lfs_dir_read_(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2779 memset(info, 0, sizeof(*info));
2780
2781 // special offset for '.' and '..'
2782 if (dir->pos == 0) {
2783 info->type = LFS_TYPE_DIR;
2784 strcpy(info->name, ".");
2785 dir->pos += 1;
2786 return true;
2787 } else if (dir->pos == 1) {
2788 info->type = LFS_TYPE_DIR;
2789 strcpy(info->name, "..");
2790 dir->pos += 1;
2791 return true;
2792 }
2793
2794 while (true) {
2795 if (dir->id == dir->m.count) {
2796 if (!dir->m.split) {
2797 return false;
2798 }
2799
2800 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2801 if (err) {
2802 return err;
2803 }
2804
2805 dir->id = 0;
2806 }
2807
2808 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2809 if (err && err != LFS_ERR_NOENT) {
2810 return err;
2811 }
2812
2813 dir->id += 1;
2814 if (err != LFS_ERR_NOENT) {
2815 break;
2816 }
2817 }
2818
2819 dir->pos += 1;
2820 return true;
2821 }
2822
lfs_dir_seek_(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2823 static int lfs_dir_seek_(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2824 // simply walk from head dir
2825 int err = lfs_dir_rewind_(lfs, dir);
2826 if (err) {
2827 return err;
2828 }
2829
2830 // first two for ./..
2831 dir->pos = lfs_min(2, off);
2832 off -= dir->pos;
2833
2834 // skip superblock entry
2835 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2836
2837 while (off > 0) {
2838 if (dir->id == dir->m.count) {
2839 if (!dir->m.split) {
2840 return LFS_ERR_INVAL;
2841 }
2842
2843 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2844 if (err) {
2845 return err;
2846 }
2847
2848 dir->id = 0;
2849 }
2850
2851 int diff = lfs_min(dir->m.count - dir->id, off);
2852 dir->id += diff;
2853 dir->pos += diff;
2854 off -= diff;
2855 }
2856
2857 return 0;
2858 }
2859
lfs_dir_tell_(lfs_t * lfs,lfs_dir_t * dir)2860 static lfs_soff_t lfs_dir_tell_(lfs_t *lfs, lfs_dir_t *dir) {
2861 (void)lfs;
2862 return dir->pos;
2863 }
2864
lfs_dir_rewind_(lfs_t * lfs,lfs_dir_t * dir)2865 static int lfs_dir_rewind_(lfs_t *lfs, lfs_dir_t *dir) {
2866 // reload the head dir
2867 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2868 if (err) {
2869 return err;
2870 }
2871
2872 dir->id = 0;
2873 dir->pos = 0;
2874 return 0;
2875 }
2876
2877
2878 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2879 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2880 lfs_off_t size = *off;
2881 lfs_off_t b = lfs->cfg->block_size - 2*4;
2882 lfs_off_t i = size / b;
2883 if (i == 0) {
2884 return 0;
2885 }
2886
2887 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2888 *off = size - b*i - 4*lfs_popc(i);
2889 return i;
2890 }
2891
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2892 static int lfs_ctz_find(lfs_t *lfs,
2893 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2894 lfs_block_t head, lfs_size_t size,
2895 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2896 if (size == 0) {
2897 *block = LFS_BLOCK_NULL;
2898 *off = 0;
2899 return 0;
2900 }
2901
2902 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2903 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2904
2905 while (current > target) {
2906 lfs_size_t skip = lfs_min(
2907 lfs_npw2(current-target+1) - 1,
2908 lfs_ctz(current));
2909
2910 int err = lfs_bd_read(lfs,
2911 pcache, rcache, sizeof(head),
2912 head, 4*skip, &head, sizeof(head));
2913 head = lfs_fromle32(head);
2914 if (err) {
2915 return err;
2916 }
2917
2918 current -= 1 << skip;
2919 }
2920
2921 *block = head;
2922 *off = pos;
2923 return 0;
2924 }
2925
2926 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2927 static int lfs_ctz_extend(lfs_t *lfs,
2928 lfs_cache_t *pcache, lfs_cache_t *rcache,
2929 lfs_block_t head, lfs_size_t size,
2930 lfs_block_t *block, lfs_off_t *off) {
2931 while (true) {
2932 // go ahead and grab a block
2933 lfs_block_t nblock;
2934 int err = lfs_alloc(lfs, &nblock);
2935 if (err) {
2936 return err;
2937 }
2938
2939 {
2940 err = lfs_bd_erase(lfs, nblock);
2941 if (err) {
2942 if (err == LFS_ERR_CORRUPT) {
2943 goto relocate;
2944 }
2945 return err;
2946 }
2947
2948 if (size == 0) {
2949 *block = nblock;
2950 *off = 0;
2951 return 0;
2952 }
2953
2954 lfs_size_t noff = size - 1;
2955 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2956 noff = noff + 1;
2957
2958 // just copy out the last block if it is incomplete
2959 if (noff != lfs->cfg->block_size) {
2960 for (lfs_off_t i = 0; i < noff; i++) {
2961 uint8_t data;
2962 err = lfs_bd_read(lfs,
2963 NULL, rcache, noff-i,
2964 head, i, &data, 1);
2965 if (err) {
2966 return err;
2967 }
2968
2969 err = lfs_bd_prog(lfs,
2970 pcache, rcache, true,
2971 nblock, i, &data, 1);
2972 if (err) {
2973 if (err == LFS_ERR_CORRUPT) {
2974 goto relocate;
2975 }
2976 return err;
2977 }
2978 }
2979
2980 *block = nblock;
2981 *off = noff;
2982 return 0;
2983 }
2984
2985 // append block
2986 index += 1;
2987 lfs_size_t skips = lfs_ctz(index) + 1;
2988 lfs_block_t nhead = head;
2989 for (lfs_off_t i = 0; i < skips; i++) {
2990 nhead = lfs_tole32(nhead);
2991 err = lfs_bd_prog(lfs, pcache, rcache, true,
2992 nblock, 4*i, &nhead, 4);
2993 nhead = lfs_fromle32(nhead);
2994 if (err) {
2995 if (err == LFS_ERR_CORRUPT) {
2996 goto relocate;
2997 }
2998 return err;
2999 }
3000
3001 if (i != skips-1) {
3002 err = lfs_bd_read(lfs,
3003 NULL, rcache, sizeof(nhead),
3004 nhead, 4*i, &nhead, sizeof(nhead));
3005 nhead = lfs_fromle32(nhead);
3006 if (err) {
3007 return err;
3008 }
3009 }
3010 }
3011
3012 *block = nblock;
3013 *off = 4*skips;
3014 return 0;
3015 }
3016
3017 relocate:
3018 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3019
3020 // just clear cache and try a new block
3021 lfs_cache_drop(lfs, pcache);
3022 }
3023 }
3024 #endif
3025
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)3026 static int lfs_ctz_traverse(lfs_t *lfs,
3027 const lfs_cache_t *pcache, lfs_cache_t *rcache,
3028 lfs_block_t head, lfs_size_t size,
3029 int (*cb)(void*, lfs_block_t), void *data) {
3030 if (size == 0) {
3031 return 0;
3032 }
3033
3034 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
3035
3036 while (true) {
3037 int err = cb(data, head);
3038 if (err) {
3039 return err;
3040 }
3041
3042 if (index == 0) {
3043 return 0;
3044 }
3045
3046 lfs_block_t heads[2];
3047 int count = 2 - (index & 1);
3048 err = lfs_bd_read(lfs,
3049 pcache, rcache, count*sizeof(head),
3050 head, 0, &heads, count*sizeof(head));
3051 heads[0] = lfs_fromle32(heads[0]);
3052 heads[1] = lfs_fromle32(heads[1]);
3053 if (err) {
3054 return err;
3055 }
3056
3057 for (int i = 0; i < count-1; i++) {
3058 err = cb(data, heads[i]);
3059 if (err) {
3060 return err;
3061 }
3062 }
3063
3064 head = heads[count-1];
3065 index -= count;
3066 }
3067 }
3068
3069
3070 /// Top level file operations ///
lfs_file_opencfg_(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)3071 static int lfs_file_opencfg_(lfs_t *lfs, lfs_file_t *file,
3072 const char *path, int flags,
3073 const struct lfs_file_config *cfg) {
3074 #ifndef LFS_READONLY
3075 // deorphan if we haven't yet, needed at most once after poweron
3076 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3077 int err = lfs_fs_forceconsistency(lfs);
3078 if (err) {
3079 return err;
3080 }
3081 }
3082 #else
3083 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3084 #endif
3085
3086 // setup simple file details
3087 int err;
3088 file->cfg = cfg;
3089 file->flags = flags;
3090 file->pos = 0;
3091 file->off = 0;
3092 file->cache.buffer = NULL;
3093
3094 // allocate entry for file if it doesn't exist
3095 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
3096 if (tag < 0 && !(tag == LFS_ERR_NOENT && lfs_path_islast(path))) {
3097 err = tag;
3098 goto cleanup;
3099 }
3100
3101 // get id, add to list of mdirs to catch update changes
3102 file->type = LFS_TYPE_REG;
3103 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
3104
3105 #ifdef LFS_READONLY
3106 if (tag == LFS_ERR_NOENT) {
3107 err = LFS_ERR_NOENT;
3108 goto cleanup;
3109 #else
3110 if (tag == LFS_ERR_NOENT) {
3111 if (!(flags & LFS_O_CREAT)) {
3112 err = LFS_ERR_NOENT;
3113 goto cleanup;
3114 }
3115
3116 // don't allow trailing slashes
3117 if (lfs_path_isdir(path)) {
3118 err = LFS_ERR_NOTDIR;
3119 goto cleanup;
3120 }
3121
3122 // check that name fits
3123 lfs_size_t nlen = lfs_path_namelen(path);
3124 if (nlen > lfs->name_max) {
3125 err = LFS_ERR_NAMETOOLONG;
3126 goto cleanup;
3127 }
3128
3129 // get next slot and create entry to remember name
3130 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3131 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
3132 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
3133 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
3134
3135 // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
3136 // not fit in a 128 byte block.
3137 err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
3138 if (err) {
3139 goto cleanup;
3140 }
3141
3142 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
3143 } else if (flags & LFS_O_EXCL) {
3144 err = LFS_ERR_EXIST;
3145 goto cleanup;
3146 #endif
3147 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
3148 err = LFS_ERR_ISDIR;
3149 goto cleanup;
3150 #ifndef LFS_READONLY
3151 } else if (flags & LFS_O_TRUNC) {
3152 // truncate if requested
3153 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
3154 file->flags |= LFS_F_DIRTY;
3155 #endif
3156 } else {
3157 // try to load what's on disk, if it's inlined we'll fix it later
3158 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
3159 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
3160 if (tag < 0) {
3161 err = tag;
3162 goto cleanup;
3163 }
3164 lfs_ctz_fromle32(&file->ctz);
3165 }
3166
3167 // fetch attrs
3168 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
3169 // if opened for read / read-write operations
3170 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
3171 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3172 LFS_MKTAG(0x7ff, 0x3ff, 0),
3173 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
3174 file->id, file->cfg->attrs[i].size),
3175 file->cfg->attrs[i].buffer);
3176 if (res < 0 && res != LFS_ERR_NOENT) {
3177 err = res;
3178 goto cleanup;
3179 }
3180 }
3181
3182 #ifndef LFS_READONLY
3183 // if opened for write / read-write operations
3184 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
3185 if (file->cfg->attrs[i].size > lfs->attr_max) {
3186 err = LFS_ERR_NOSPC;
3187 goto cleanup;
3188 }
3189
3190 file->flags |= LFS_F_DIRTY;
3191 }
3192 #endif
3193 }
3194
3195 // allocate buffer if needed
3196 if (file->cfg->buffer) {
3197 file->cache.buffer = file->cfg->buffer;
3198 } else {
3199 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
3200 if (!file->cache.buffer) {
3201 err = LFS_ERR_NOMEM;
3202 goto cleanup;
3203 }
3204 }
3205
3206 // zero to avoid information leak
3207 lfs_cache_zero(lfs, &file->cache);
3208
3209 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
3210 // load inline files
3211 file->ctz.head = LFS_BLOCK_INLINE;
3212 file->ctz.size = lfs_tag_size(tag);
3213 file->flags |= LFS_F_INLINE;
3214 file->cache.block = file->ctz.head;
3215 file->cache.off = 0;
3216 file->cache.size = lfs->cfg->cache_size;
3217
3218 // don't always read (may be new/trunc file)
3219 if (file->ctz.size > 0) {
3220 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
3221 LFS_MKTAG(0x700, 0x3ff, 0),
3222 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
3223 lfs_min(file->cache.size, 0x3fe)),
3224 file->cache.buffer);
3225 if (res < 0) {
3226 err = res;
3227 goto cleanup;
3228 }
3229 }
3230 }
3231
3232 return 0;
3233
3234 cleanup:
3235 // clean up lingering resources
3236 #ifndef LFS_READONLY
3237 file->flags |= LFS_F_ERRED;
3238 #endif
3239 lfs_file_close_(lfs, file);
3240 return err;
3241 }
3242
3243 #ifndef LFS_NO_MALLOC
3244 static int lfs_file_open_(lfs_t *lfs, lfs_file_t *file,
3245 const char *path, int flags) {
3246 static const struct lfs_file_config defaults = {0};
3247 int err = lfs_file_opencfg_(lfs, file, path, flags, &defaults);
3248 return err;
3249 }
3250 #endif
3251
3252 static int lfs_file_close_(lfs_t *lfs, lfs_file_t *file) {
3253 #ifndef LFS_READONLY
3254 int err = lfs_file_sync_(lfs, file);
3255 #else
3256 int err = 0;
3257 #endif
3258
3259 // remove from list of mdirs
3260 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3261
3262 // clean up memory
3263 if (!file->cfg->buffer) {
3264 lfs_free(file->cache.buffer);
3265 }
3266
3267 return err;
3268 }
3269
3270
3271 #ifndef LFS_READONLY
3272 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3273 while (true) {
3274 // just relocate what exists into new block
3275 lfs_block_t nblock;
3276 int err = lfs_alloc(lfs, &nblock);
3277 if (err) {
3278 return err;
3279 }
3280
3281 err = lfs_bd_erase(lfs, nblock);
3282 if (err) {
3283 if (err == LFS_ERR_CORRUPT) {
3284 goto relocate;
3285 }
3286 return err;
3287 }
3288
3289 // either read from dirty cache or disk
3290 for (lfs_off_t i = 0; i < file->off; i++) {
3291 uint8_t data;
3292 if (file->flags & LFS_F_INLINE) {
3293 err = lfs_dir_getread(lfs, &file->m,
3294 // note we evict inline files before they can be dirty
3295 NULL, &file->cache, file->off-i,
3296 LFS_MKTAG(0xfff, 0x1ff, 0),
3297 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3298 i, &data, 1);
3299 if (err) {
3300 return err;
3301 }
3302 } else {
3303 err = lfs_bd_read(lfs,
3304 &file->cache, &lfs->rcache, file->off-i,
3305 file->block, i, &data, 1);
3306 if (err) {
3307 return err;
3308 }
3309 }
3310
3311 err = lfs_bd_prog(lfs,
3312 &lfs->pcache, &lfs->rcache, true,
3313 nblock, i, &data, 1);
3314 if (err) {
3315 if (err == LFS_ERR_CORRUPT) {
3316 goto relocate;
3317 }
3318 return err;
3319 }
3320 }
3321
3322 // copy over new state of file
3323 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3324 file->cache.block = lfs->pcache.block;
3325 file->cache.off = lfs->pcache.off;
3326 file->cache.size = lfs->pcache.size;
3327 lfs_cache_zero(lfs, &lfs->pcache);
3328
3329 file->block = nblock;
3330 file->flags |= LFS_F_WRITING;
3331 return 0;
3332
3333 relocate:
3334 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3335
3336 // just clear cache and try a new block
3337 lfs_cache_drop(lfs, &lfs->pcache);
3338 }
3339 }
3340 #endif
3341
3342 #ifndef LFS_READONLY
3343 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3344 file->off = file->pos;
3345 lfs_alloc_ckpoint(lfs);
3346 int err = lfs_file_relocate(lfs, file);
3347 if (err) {
3348 return err;
3349 }
3350
3351 file->flags &= ~LFS_F_INLINE;
3352 return 0;
3353 }
3354 #endif
3355
3356 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3357 if (file->flags & LFS_F_READING) {
3358 if (!(file->flags & LFS_F_INLINE)) {
3359 lfs_cache_drop(lfs, &file->cache);
3360 }
3361 file->flags &= ~LFS_F_READING;
3362 }
3363
3364 #ifndef LFS_READONLY
3365 if (file->flags & LFS_F_WRITING) {
3366 lfs_off_t pos = file->pos;
3367
3368 if (!(file->flags & LFS_F_INLINE)) {
3369 // copy over anything after current branch
3370 lfs_file_t orig = {
3371 .ctz.head = file->ctz.head,
3372 .ctz.size = file->ctz.size,
3373 .flags = LFS_O_RDONLY,
3374 .pos = file->pos,
3375 .cache = lfs->rcache,
3376 };
3377 lfs_cache_drop(lfs, &lfs->rcache);
3378
3379 while (file->pos < file->ctz.size) {
3380 // copy over a byte at a time, leave it up to caching
3381 // to make this efficient
3382 uint8_t data;
3383 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3384 if (res < 0) {
3385 return res;
3386 }
3387
3388 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3389 if (res < 0) {
3390 return res;
3391 }
3392
3393 // keep our reference to the rcache in sync
3394 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3395 lfs_cache_drop(lfs, &orig.cache);
3396 lfs_cache_drop(lfs, &lfs->rcache);
3397 }
3398 }
3399
3400 // write out what we have
3401 while (true) {
3402 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3403 if (err) {
3404 if (err == LFS_ERR_CORRUPT) {
3405 goto relocate;
3406 }
3407 return err;
3408 }
3409
3410 break;
3411
3412 relocate:
3413 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3414 err = lfs_file_relocate(lfs, file);
3415 if (err) {
3416 return err;
3417 }
3418 }
3419 } else {
3420 file->pos = lfs_max(file->pos, file->ctz.size);
3421 }
3422
3423 // actual file updates
3424 file->ctz.head = file->block;
3425 file->ctz.size = file->pos;
3426 file->flags &= ~LFS_F_WRITING;
3427 file->flags |= LFS_F_DIRTY;
3428
3429 file->pos = pos;
3430 }
3431 #endif
3432
3433 return 0;
3434 }
3435
3436 #ifndef LFS_READONLY
3437 static int lfs_file_sync_(lfs_t *lfs, lfs_file_t *file) {
3438 if (file->flags & LFS_F_ERRED) {
3439 // it's not safe to do anything if our file errored
3440 return 0;
3441 }
3442
3443 int err = lfs_file_flush(lfs, file);
3444 if (err) {
3445 file->flags |= LFS_F_ERRED;
3446 return err;
3447 }
3448
3449
3450 if ((file->flags & LFS_F_DIRTY) &&
3451 !lfs_pair_isnull(file->m.pair)) {
3452 // before we commit metadata, we need sync the disk to make sure
3453 // data writes don't complete after metadata writes
3454 if (!(file->flags & LFS_F_INLINE)) {
3455 err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
3456 if (err) {
3457 return err;
3458 }
3459 }
3460
3461 // update dir entry
3462 uint16_t type;
3463 const void *buffer;
3464 lfs_size_t size;
3465 struct lfs_ctz ctz;
3466 if (file->flags & LFS_F_INLINE) {
3467 // inline the whole file
3468 type = LFS_TYPE_INLINESTRUCT;
3469 buffer = file->cache.buffer;
3470 size = file->ctz.size;
3471 } else {
3472 // update the ctz reference
3473 type = LFS_TYPE_CTZSTRUCT;
3474 // copy ctz so alloc will work during a relocate
3475 ctz = file->ctz;
3476 lfs_ctz_tole32(&ctz);
3477 buffer = &ctz;
3478 size = sizeof(ctz);
3479 }
3480
3481 // commit file data and attributes
3482 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3483 {LFS_MKTAG(type, file->id, size), buffer},
3484 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3485 file->cfg->attr_count), file->cfg->attrs}));
3486 if (err) {
3487 file->flags |= LFS_F_ERRED;
3488 return err;
3489 }
3490
3491 file->flags &= ~LFS_F_DIRTY;
3492 }
3493
3494 return 0;
3495 }
3496 #endif
3497
3498 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3499 void *buffer, lfs_size_t size) {
3500 uint8_t *data = buffer;
3501 lfs_size_t nsize = size;
3502
3503 if (file->pos >= file->ctz.size) {
3504 // eof if past end
3505 return 0;
3506 }
3507
3508 size = lfs_min(size, file->ctz.size - file->pos);
3509 nsize = size;
3510
3511 while (nsize > 0) {
3512 // check if we need a new block
3513 if (!(file->flags & LFS_F_READING) ||
3514 file->off == lfs->cfg->block_size) {
3515 if (!(file->flags & LFS_F_INLINE)) {
3516 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3517 file->ctz.head, file->ctz.size,
3518 file->pos, &file->block, &file->off);
3519 if (err) {
3520 return err;
3521 }
3522 } else {
3523 file->block = LFS_BLOCK_INLINE;
3524 file->off = file->pos;
3525 }
3526
3527 file->flags |= LFS_F_READING;
3528 }
3529
3530 // read as much as we can in current block
3531 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3532 if (file->flags & LFS_F_INLINE) {
3533 int err = lfs_dir_getread(lfs, &file->m,
3534 NULL, &file->cache, lfs->cfg->block_size,
3535 LFS_MKTAG(0xfff, 0x1ff, 0),
3536 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3537 file->off, data, diff);
3538 if (err) {
3539 return err;
3540 }
3541 } else {
3542 int err = lfs_bd_read(lfs,
3543 NULL, &file->cache, lfs->cfg->block_size,
3544 file->block, file->off, data, diff);
3545 if (err) {
3546 return err;
3547 }
3548 }
3549
3550 file->pos += diff;
3551 file->off += diff;
3552 data += diff;
3553 nsize -= diff;
3554 }
3555
3556 return size;
3557 }
3558
3559 static lfs_ssize_t lfs_file_read_(lfs_t *lfs, lfs_file_t *file,
3560 void *buffer, lfs_size_t size) {
3561 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3562
3563 #ifndef LFS_READONLY
3564 if (file->flags & LFS_F_WRITING) {
3565 // flush out any writes
3566 int err = lfs_file_flush(lfs, file);
3567 if (err) {
3568 return err;
3569 }
3570 }
3571 #endif
3572
3573 return lfs_file_flushedread(lfs, file, buffer, size);
3574 }
3575
3576
3577 #ifndef LFS_READONLY
3578 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3579 const void *buffer, lfs_size_t size) {
3580 const uint8_t *data = buffer;
3581 lfs_size_t nsize = size;
3582
3583 if ((file->flags & LFS_F_INLINE) &&
3584 lfs_max(file->pos+nsize, file->ctz.size) > lfs->inline_max) {
3585 // inline file doesn't fit anymore
3586 int err = lfs_file_outline(lfs, file);
3587 if (err) {
3588 file->flags |= LFS_F_ERRED;
3589 return err;
3590 }
3591 }
3592
3593 while (nsize > 0) {
3594 // check if we need a new block
3595 if (!(file->flags & LFS_F_WRITING) ||
3596 file->off == lfs->cfg->block_size) {
3597 if (!(file->flags & LFS_F_INLINE)) {
3598 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3599 // find out which block we're extending from
3600 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3601 file->ctz.head, file->ctz.size,
3602 file->pos-1, &file->block, &(lfs_off_t){0});
3603 if (err) {
3604 file->flags |= LFS_F_ERRED;
3605 return err;
3606 }
3607
3608 // mark cache as dirty since we may have read data into it
3609 lfs_cache_zero(lfs, &file->cache);
3610 }
3611
3612 // extend file with new blocks
3613 lfs_alloc_ckpoint(lfs);
3614 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3615 file->block, file->pos,
3616 &file->block, &file->off);
3617 if (err) {
3618 file->flags |= LFS_F_ERRED;
3619 return err;
3620 }
3621 } else {
3622 file->block = LFS_BLOCK_INLINE;
3623 file->off = file->pos;
3624 }
3625
3626 file->flags |= LFS_F_WRITING;
3627 }
3628
3629 // program as much as we can in current block
3630 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3631 while (true) {
3632 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3633 file->block, file->off, data, diff);
3634 if (err) {
3635 if (err == LFS_ERR_CORRUPT) {
3636 goto relocate;
3637 }
3638 file->flags |= LFS_F_ERRED;
3639 return err;
3640 }
3641
3642 break;
3643 relocate:
3644 err = lfs_file_relocate(lfs, file);
3645 if (err) {
3646 file->flags |= LFS_F_ERRED;
3647 return err;
3648 }
3649 }
3650
3651 file->pos += diff;
3652 file->off += diff;
3653 data += diff;
3654 nsize -= diff;
3655
3656 lfs_alloc_ckpoint(lfs);
3657 }
3658
3659 return size;
3660 }
3661
3662 static lfs_ssize_t lfs_file_write_(lfs_t *lfs, lfs_file_t *file,
3663 const void *buffer, lfs_size_t size) {
3664 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3665
3666 if (file->flags & LFS_F_READING) {
3667 // drop any reads
3668 int err = lfs_file_flush(lfs, file);
3669 if (err) {
3670 return err;
3671 }
3672 }
3673
3674 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3675 file->pos = file->ctz.size;
3676 }
3677
3678 if (file->pos + size > lfs->file_max) {
3679 // Larger than file limit?
3680 return LFS_ERR_FBIG;
3681 }
3682
3683 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3684 // fill with zeros
3685 lfs_off_t pos = file->pos;
3686 file->pos = file->ctz.size;
3687
3688 while (file->pos < pos) {
3689 lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3690 if (res < 0) {
3691 return res;
3692 }
3693 }
3694 }
3695
3696 lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3697 if (nsize < 0) {
3698 return nsize;
3699 }
3700
3701 file->flags &= ~LFS_F_ERRED;
3702 return nsize;
3703 }
3704 #endif
3705
3706 static lfs_soff_t lfs_file_seek_(lfs_t *lfs, lfs_file_t *file,
3707 lfs_soff_t off, int whence) {
3708 // find new pos
3709 //
3710 // fortunately for us, littlefs is limited to 31-bit file sizes, so we
3711 // don't have to worry too much about integer overflow
3712 lfs_off_t npos = file->pos;
3713 if (whence == LFS_SEEK_SET) {
3714 npos = off;
3715 } else if (whence == LFS_SEEK_CUR) {
3716 npos = file->pos + (lfs_off_t)off;
3717 } else if (whence == LFS_SEEK_END) {
3718 npos = (lfs_off_t)lfs_file_size_(lfs, file) + (lfs_off_t)off;
3719 }
3720
3721 if (npos > lfs->file_max) {
3722 // file position out of range
3723 return LFS_ERR_INVAL;
3724 }
3725
3726 if (file->pos == npos) {
3727 // noop - position has not changed
3728 return npos;
3729 }
3730
3731 // if we're only reading and our new offset is still in the file's cache
3732 // we can avoid flushing and needing to reread the data
3733 if ((file->flags & LFS_F_READING)
3734 && file->off != lfs->cfg->block_size) {
3735 int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3736 lfs_off_t noff = npos;
3737 int nindex = lfs_ctz_index(lfs, &noff);
3738 if (oindex == nindex
3739 && noff >= file->cache.off
3740 && noff < file->cache.off + file->cache.size) {
3741 file->pos = npos;
3742 file->off = noff;
3743 return npos;
3744 }
3745 }
3746
3747 // write out everything beforehand, may be noop if rdonly
3748 int err = lfs_file_flush(lfs, file);
3749 if (err) {
3750 return err;
3751 }
3752
3753 // update pos
3754 file->pos = npos;
3755 return npos;
3756 }
3757
3758 #ifndef LFS_READONLY
3759 static int lfs_file_truncate_(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3760 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3761
3762 if (size > LFS_FILE_MAX) {
3763 return LFS_ERR_INVAL;
3764 }
3765
3766 lfs_off_t pos = file->pos;
3767 lfs_off_t oldsize = lfs_file_size_(lfs, file);
3768 if (size < oldsize) {
3769 // revert to inline file?
3770 if (size <= lfs->inline_max) {
3771 // flush+seek to head
3772 lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
3773 if (res < 0) {
3774 return (int)res;
3775 }
3776
3777 // read our data into rcache temporarily
3778 lfs_cache_drop(lfs, &lfs->rcache);
3779 res = lfs_file_flushedread(lfs, file,
3780 lfs->rcache.buffer, size);
3781 if (res < 0) {
3782 return (int)res;
3783 }
3784
3785 file->ctz.head = LFS_BLOCK_INLINE;
3786 file->ctz.size = size;
3787 file->flags |= LFS_F_DIRTY | LFS_F_READING | LFS_F_INLINE;
3788 file->cache.block = file->ctz.head;
3789 file->cache.off = 0;
3790 file->cache.size = lfs->cfg->cache_size;
3791 memcpy(file->cache.buffer, lfs->rcache.buffer, size);
3792
3793 } else {
3794 // need to flush since directly changing metadata
3795 int err = lfs_file_flush(lfs, file);
3796 if (err) {
3797 return err;
3798 }
3799
3800 // lookup new head in ctz skip list
3801 err = lfs_ctz_find(lfs, NULL, &file->cache,
3802 file->ctz.head, file->ctz.size,
3803 size-1, &file->block, &(lfs_off_t){0});
3804 if (err) {
3805 return err;
3806 }
3807
3808 // need to set pos/block/off consistently so seeking back to
3809 // the old position does not get confused
3810 file->pos = size;
3811 file->ctz.head = file->block;
3812 file->ctz.size = size;
3813 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3814 }
3815 } else if (size > oldsize) {
3816 // flush+seek if not already at end
3817 lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_END);
3818 if (res < 0) {
3819 return (int)res;
3820 }
3821
3822 // fill with zeros
3823 while (file->pos < size) {
3824 res = lfs_file_write_(lfs, file, &(uint8_t){0}, 1);
3825 if (res < 0) {
3826 return (int)res;
3827 }
3828 }
3829 }
3830
3831 // restore pos
3832 lfs_soff_t res = lfs_file_seek_(lfs, file, pos, LFS_SEEK_SET);
3833 if (res < 0) {
3834 return (int)res;
3835 }
3836
3837 return 0;
3838 }
3839 #endif
3840
3841 static lfs_soff_t lfs_file_tell_(lfs_t *lfs, lfs_file_t *file) {
3842 (void)lfs;
3843 return file->pos;
3844 }
3845
3846 static int lfs_file_rewind_(lfs_t *lfs, lfs_file_t *file) {
3847 lfs_soff_t res = lfs_file_seek_(lfs, file, 0, LFS_SEEK_SET);
3848 if (res < 0) {
3849 return (int)res;
3850 }
3851
3852 return 0;
3853 }
3854
3855 static lfs_soff_t lfs_file_size_(lfs_t *lfs, lfs_file_t *file) {
3856 (void)lfs;
3857
3858 #ifndef LFS_READONLY
3859 if (file->flags & LFS_F_WRITING) {
3860 return lfs_max(file->pos, file->ctz.size);
3861 }
3862 #endif
3863
3864 return file->ctz.size;
3865 }
3866
3867
3868 /// General fs operations ///
3869 static int lfs_stat_(lfs_t *lfs, const char *path, struct lfs_info *info) {
3870 lfs_mdir_t cwd;
3871 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3872 if (tag < 0) {
3873 return (int)tag;
3874 }
3875
3876 // only allow trailing slashes on dirs
3877 if (strchr(path, '/') != NULL
3878 && lfs_tag_type3(tag) != LFS_TYPE_DIR) {
3879 return LFS_ERR_NOTDIR;
3880 }
3881
3882 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3883 }
3884
3885 #ifndef LFS_READONLY
3886 static int lfs_remove_(lfs_t *lfs, const char *path) {
3887 // deorphan if we haven't yet, needed at most once after poweron
3888 int err = lfs_fs_forceconsistency(lfs);
3889 if (err) {
3890 return err;
3891 }
3892
3893 lfs_mdir_t cwd;
3894 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3895 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3896 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3897 }
3898
3899 struct lfs_mlist dir;
3900 dir.next = lfs->mlist;
3901 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3902 // must be empty before removal
3903 lfs_block_t pair[2];
3904 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3905 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3906 if (res < 0) {
3907 return (int)res;
3908 }
3909 lfs_pair_fromle32(pair);
3910
3911 err = lfs_dir_fetch(lfs, &dir.m, pair);
3912 if (err) {
3913 return err;
3914 }
3915
3916 if (dir.m.count > 0 || dir.m.split) {
3917 return LFS_ERR_NOTEMPTY;
3918 }
3919
3920 // mark fs as orphaned
3921 err = lfs_fs_preporphans(lfs, +1);
3922 if (err) {
3923 return err;
3924 }
3925
3926 // I know it's crazy but yes, dir can be changed by our parent's
3927 // commit (if predecessor is child)
3928 dir.type = 0;
3929 dir.id = 0;
3930 lfs->mlist = &dir;
3931 }
3932
3933 // delete the entry
3934 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3935 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3936 if (err) {
3937 lfs->mlist = dir.next;
3938 return err;
3939 }
3940
3941 lfs->mlist = dir.next;
3942 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3943 // fix orphan
3944 err = lfs_fs_preporphans(lfs, -1);
3945 if (err) {
3946 return err;
3947 }
3948
3949 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3950 if (err) {
3951 return err;
3952 }
3953
3954 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3955 if (err) {
3956 return err;
3957 }
3958 }
3959
3960 return 0;
3961 }
3962 #endif
3963
3964 #ifndef LFS_READONLY
3965 static int lfs_rename_(lfs_t *lfs, const char *oldpath, const char *newpath) {
3966 // deorphan if we haven't yet, needed at most once after poweron
3967 int err = lfs_fs_forceconsistency(lfs);
3968 if (err) {
3969 return err;
3970 }
3971
3972 // find old entry
3973 lfs_mdir_t oldcwd;
3974 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3975 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3976 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3977 }
3978
3979 // find new entry
3980 lfs_mdir_t newcwd;
3981 uint16_t newid;
3982 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3983 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3984 !(prevtag == LFS_ERR_NOENT && lfs_path_islast(newpath))) {
3985 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3986 }
3987
3988 // if we're in the same pair there's a few special cases...
3989 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3990 uint16_t newoldid = lfs_tag_id(oldtag);
3991
3992 struct lfs_mlist prevdir;
3993 prevdir.next = lfs->mlist;
3994 if (prevtag == LFS_ERR_NOENT) {
3995 // if we're a file, don't allow trailing slashes
3996 if (lfs_path_isdir(newpath)
3997 && lfs_tag_type3(oldtag) != LFS_TYPE_DIR) {
3998 return LFS_ERR_NOTDIR;
3999 }
4000
4001 // check that name fits
4002 lfs_size_t nlen = lfs_path_namelen(newpath);
4003 if (nlen > lfs->name_max) {
4004 return LFS_ERR_NAMETOOLONG;
4005 }
4006
4007 // there is a small chance we are being renamed in the same
4008 // directory/ to an id less than our old id, the global update
4009 // to handle this is a bit messy
4010 if (samepair && newid <= newoldid) {
4011 newoldid += 1;
4012 }
4013 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
4014 return (lfs_tag_type3(prevtag) == LFS_TYPE_DIR)
4015 ? LFS_ERR_ISDIR
4016 : LFS_ERR_NOTDIR;
4017 } else if (samepair && newid == newoldid) {
4018 // we're renaming to ourselves??
4019 return 0;
4020 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
4021 // must be empty before removal
4022 lfs_block_t prevpair[2];
4023 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
4024 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
4025 if (res < 0) {
4026 return (int)res;
4027 }
4028 lfs_pair_fromle32(prevpair);
4029
4030 // must be empty before removal
4031 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
4032 if (err) {
4033 return err;
4034 }
4035
4036 if (prevdir.m.count > 0 || prevdir.m.split) {
4037 return LFS_ERR_NOTEMPTY;
4038 }
4039
4040 // mark fs as orphaned
4041 err = lfs_fs_preporphans(lfs, +1);
4042 if (err) {
4043 return err;
4044 }
4045
4046 // I know it's crazy but yes, dir can be changed by our parent's
4047 // commit (if predecessor is child)
4048 prevdir.type = 0;
4049 prevdir.id = 0;
4050 lfs->mlist = &prevdir;
4051 }
4052
4053 if (!samepair) {
4054 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
4055 }
4056
4057 // move over all attributes
4058 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
4059 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
4060 LFS_TYPE_DELETE, newid, 0), NULL},
4061 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
4062 {LFS_MKTAG(lfs_tag_type3(oldtag),
4063 newid, lfs_path_namelen(newpath)), newpath},
4064 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
4065 {LFS_MKTAG_IF(samepair,
4066 LFS_TYPE_DELETE, newoldid, 0), NULL}));
4067 if (err) {
4068 lfs->mlist = prevdir.next;
4069 return err;
4070 }
4071
4072 // let commit clean up after move (if we're different! otherwise move
4073 // logic already fixed it for us)
4074 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
4075 // prep gstate and delete move id
4076 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4077 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
4078 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
4079 if (err) {
4080 lfs->mlist = prevdir.next;
4081 return err;
4082 }
4083 }
4084
4085 lfs->mlist = prevdir.next;
4086 if (prevtag != LFS_ERR_NOENT
4087 && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
4088 // fix orphan
4089 err = lfs_fs_preporphans(lfs, -1);
4090 if (err) {
4091 return err;
4092 }
4093
4094 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
4095 if (err) {
4096 return err;
4097 }
4098
4099 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
4100 if (err) {
4101 return err;
4102 }
4103 }
4104
4105 return 0;
4106 }
4107 #endif
4108
4109 static lfs_ssize_t lfs_getattr_(lfs_t *lfs, const char *path,
4110 uint8_t type, void *buffer, lfs_size_t size) {
4111 lfs_mdir_t cwd;
4112 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4113 if (tag < 0) {
4114 return tag;
4115 }
4116
4117 uint16_t id = lfs_tag_id(tag);
4118 if (id == 0x3ff) {
4119 // special case for root
4120 id = 0;
4121 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4122 if (err) {
4123 return err;
4124 }
4125 }
4126
4127 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
4128 LFS_MKTAG(LFS_TYPE_USERATTR + type,
4129 id, lfs_min(size, lfs->attr_max)),
4130 buffer);
4131 if (tag < 0) {
4132 if (tag == LFS_ERR_NOENT) {
4133 return LFS_ERR_NOATTR;
4134 }
4135
4136 return tag;
4137 }
4138
4139 return lfs_tag_size(tag);
4140 }
4141
4142 #ifndef LFS_READONLY
4143 static int lfs_commitattr(lfs_t *lfs, const char *path,
4144 uint8_t type, const void *buffer, lfs_size_t size) {
4145 lfs_mdir_t cwd;
4146 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
4147 if (tag < 0) {
4148 return tag;
4149 }
4150
4151 uint16_t id = lfs_tag_id(tag);
4152 if (id == 0x3ff) {
4153 // special case for root
4154 id = 0;
4155 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
4156 if (err) {
4157 return err;
4158 }
4159 }
4160
4161 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
4162 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
4163 }
4164 #endif
4165
4166 #ifndef LFS_READONLY
4167 static int lfs_setattr_(lfs_t *lfs, const char *path,
4168 uint8_t type, const void *buffer, lfs_size_t size) {
4169 if (size > lfs->attr_max) {
4170 return LFS_ERR_NOSPC;
4171 }
4172
4173 return lfs_commitattr(lfs, path, type, buffer, size);
4174 }
4175 #endif
4176
4177 #ifndef LFS_READONLY
4178 static int lfs_removeattr_(lfs_t *lfs, const char *path, uint8_t type) {
4179 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
4180 }
4181 #endif
4182
4183
4184 /// Filesystem operations ///
4185
4186 // compile time checks, see lfs.h for why these limits exist
4187 #if LFS_NAME_MAX > 1022
4188 #error "Invalid LFS_NAME_MAX, must be <= 1022"
4189 #endif
4190
4191 #if LFS_FILE_MAX > 2147483647
4192 #error "Invalid LFS_FILE_MAX, must be <= 2147483647"
4193 #endif
4194
4195 #if LFS_ATTR_MAX > 1022
4196 #error "Invalid LFS_ATTR_MAX, must be <= 1022"
4197 #endif
4198
4199 // common filesystem initialization
4200 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
4201 lfs->cfg = cfg;
4202 lfs->block_count = cfg->block_count; // May be 0
4203 int err = 0;
4204
4205 #ifdef LFS_MULTIVERSION
4206 // this driver only supports minor version < current minor version
4207 LFS_ASSERT(!lfs->cfg->disk_version || (
4208 (0xffff & (lfs->cfg->disk_version >> 16))
4209 == LFS_DISK_VERSION_MAJOR
4210 && (0xffff & (lfs->cfg->disk_version >> 0))
4211 <= LFS_DISK_VERSION_MINOR));
4212 #endif
4213
4214 // check that bool is a truthy-preserving type
4215 //
4216 // note the most common reason for this failure is a before-c99 compiler,
4217 // which littlefs currently does not support
4218 LFS_ASSERT((bool)0x80000000);
4219
4220 // check that the required io functions are provided
4221 LFS_ASSERT(lfs->cfg->read != NULL);
4222 #ifndef LFS_READONLY
4223 LFS_ASSERT(lfs->cfg->prog != NULL);
4224 LFS_ASSERT(lfs->cfg->erase != NULL);
4225 LFS_ASSERT(lfs->cfg->sync != NULL);
4226 #endif
4227
4228 // validate that the lfs-cfg sizes were initiated properly before
4229 // performing any arithmetic logics with them
4230 LFS_ASSERT(lfs->cfg->read_size != 0);
4231 LFS_ASSERT(lfs->cfg->prog_size != 0);
4232 LFS_ASSERT(lfs->cfg->cache_size != 0);
4233
4234 // check that block size is a multiple of cache size is a multiple
4235 // of prog and read sizes
4236 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
4237 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
4238 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
4239
4240 // check that the block size is large enough to fit all ctz pointers
4241 LFS_ASSERT(lfs->cfg->block_size >= 128);
4242 // this is the exact calculation for all ctz pointers, if this fails
4243 // and the simpler assert above does not, math must be broken
4244 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
4245 <= lfs->cfg->block_size);
4246
4247 // block_cycles = 0 is no longer supported.
4248 //
4249 // block_cycles is the number of erase cycles before littlefs evicts
4250 // metadata logs as a part of wear leveling. Suggested values are in the
4251 // range of 100-1000, or set block_cycles to -1 to disable block-level
4252 // wear-leveling.
4253 LFS_ASSERT(lfs->cfg->block_cycles != 0);
4254
4255 // check that compact_thresh makes sense
4256 //
4257 // metadata can't be compacted below block_size/2, and metadata can't
4258 // exceed a block_size
4259 LFS_ASSERT(lfs->cfg->compact_thresh == 0
4260 || lfs->cfg->compact_thresh >= lfs->cfg->block_size/2);
4261 LFS_ASSERT(lfs->cfg->compact_thresh == (lfs_size_t)-1
4262 || lfs->cfg->compact_thresh <= lfs->cfg->block_size);
4263
4264 // check that metadata_max is a multiple of read_size and prog_size,
4265 // and a factor of the block_size
4266 LFS_ASSERT(!lfs->cfg->metadata_max
4267 || lfs->cfg->metadata_max % lfs->cfg->read_size == 0);
4268 LFS_ASSERT(!lfs->cfg->metadata_max
4269 || lfs->cfg->metadata_max % lfs->cfg->prog_size == 0);
4270 LFS_ASSERT(!lfs->cfg->metadata_max
4271 || lfs->cfg->block_size % lfs->cfg->metadata_max == 0);
4272
4273 // setup read cache
4274 if (lfs->cfg->read_buffer) {
4275 lfs->rcache.buffer = lfs->cfg->read_buffer;
4276 } else {
4277 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4278 if (!lfs->rcache.buffer) {
4279 err = LFS_ERR_NOMEM;
4280 goto cleanup;
4281 }
4282 }
4283
4284 // setup program cache
4285 if (lfs->cfg->prog_buffer) {
4286 lfs->pcache.buffer = lfs->cfg->prog_buffer;
4287 } else {
4288 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
4289 if (!lfs->pcache.buffer) {
4290 err = LFS_ERR_NOMEM;
4291 goto cleanup;
4292 }
4293 }
4294
4295 // zero to avoid information leaks
4296 lfs_cache_zero(lfs, &lfs->rcache);
4297 lfs_cache_zero(lfs, &lfs->pcache);
4298
4299 // setup lookahead buffer, note mount finishes initializing this after
4300 // we establish a decent pseudo-random seed
4301 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
4302 if (lfs->cfg->lookahead_buffer) {
4303 lfs->lookahead.buffer = lfs->cfg->lookahead_buffer;
4304 } else {
4305 lfs->lookahead.buffer = lfs_malloc(lfs->cfg->lookahead_size);
4306 if (!lfs->lookahead.buffer) {
4307 err = LFS_ERR_NOMEM;
4308 goto cleanup;
4309 }
4310 }
4311
4312 // check that the size limits are sane
4313 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
4314 lfs->name_max = lfs->cfg->name_max;
4315 if (!lfs->name_max) {
4316 lfs->name_max = LFS_NAME_MAX;
4317 }
4318
4319 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
4320 lfs->file_max = lfs->cfg->file_max;
4321 if (!lfs->file_max) {
4322 lfs->file_max = LFS_FILE_MAX;
4323 }
4324
4325 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
4326 lfs->attr_max = lfs->cfg->attr_max;
4327 if (!lfs->attr_max) {
4328 lfs->attr_max = LFS_ATTR_MAX;
4329 }
4330
4331 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
4332
4333 LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4334 || lfs->cfg->inline_max <= lfs->cfg->cache_size);
4335 LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4336 || lfs->cfg->inline_max <= lfs->attr_max);
4337 LFS_ASSERT(lfs->cfg->inline_max == (lfs_size_t)-1
4338 || lfs->cfg->inline_max <= ((lfs->cfg->metadata_max)
4339 ? lfs->cfg->metadata_max
4340 : lfs->cfg->block_size)/8);
4341 lfs->inline_max = lfs->cfg->inline_max;
4342 if (lfs->inline_max == (lfs_size_t)-1) {
4343 lfs->inline_max = 0;
4344 } else if (lfs->inline_max == 0) {
4345 lfs->inline_max = lfs_min(
4346 lfs->cfg->cache_size,
4347 lfs_min(
4348 lfs->attr_max,
4349 ((lfs->cfg->metadata_max)
4350 ? lfs->cfg->metadata_max
4351 : lfs->cfg->block_size)/8));
4352 }
4353
4354 // setup default state
4355 lfs->root[0] = LFS_BLOCK_NULL;
4356 lfs->root[1] = LFS_BLOCK_NULL;
4357 lfs->mlist = NULL;
4358 lfs->seed = 0;
4359 lfs->gdisk = (lfs_gstate_t){0};
4360 lfs->gstate = (lfs_gstate_t){0};
4361 lfs->gdelta = (lfs_gstate_t){0};
4362 #ifdef LFS_MIGRATE
4363 lfs->lfs1 = NULL;
4364 #endif
4365
4366 return 0;
4367
4368 cleanup:
4369 lfs_deinit(lfs);
4370 return err;
4371 }
4372
4373 static int lfs_deinit(lfs_t *lfs) {
4374 // free allocated memory
4375 if (!lfs->cfg->read_buffer) {
4376 lfs_free(lfs->rcache.buffer);
4377 }
4378
4379 if (!lfs->cfg->prog_buffer) {
4380 lfs_free(lfs->pcache.buffer);
4381 }
4382
4383 if (!lfs->cfg->lookahead_buffer) {
4384 lfs_free(lfs->lookahead.buffer);
4385 }
4386
4387 return 0;
4388 }
4389
4390
4391
4392 #ifndef LFS_READONLY
4393 static int lfs_format_(lfs_t *lfs, const struct lfs_config *cfg) {
4394 int err = 0;
4395 {
4396 err = lfs_init(lfs, cfg);
4397 if (err) {
4398 return err;
4399 }
4400
4401 LFS_ASSERT(cfg->block_count != 0);
4402
4403 // create free lookahead
4404 memset(lfs->lookahead.buffer, 0, lfs->cfg->lookahead_size);
4405 lfs->lookahead.start = 0;
4406 lfs->lookahead.size = lfs_min(8*lfs->cfg->lookahead_size,
4407 lfs->block_count);
4408 lfs->lookahead.next = 0;
4409 lfs_alloc_ckpoint(lfs);
4410
4411 // create root dir
4412 lfs_mdir_t root;
4413 err = lfs_dir_alloc(lfs, &root);
4414 if (err) {
4415 goto cleanup;
4416 }
4417
4418 // write one superblock
4419 lfs_superblock_t superblock = {
4420 .version = lfs_fs_disk_version(lfs),
4421 .block_size = lfs->cfg->block_size,
4422 .block_count = lfs->block_count,
4423 .name_max = lfs->name_max,
4424 .file_max = lfs->file_max,
4425 .attr_max = lfs->attr_max,
4426 };
4427
4428 lfs_superblock_tole32(&superblock);
4429 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4430 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4431 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4432 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4433 &superblock}));
4434 if (err) {
4435 goto cleanup;
4436 }
4437
4438 // force compaction to prevent accidentally mounting any
4439 // older version of littlefs that may live on disk
4440 root.erased = false;
4441 err = lfs_dir_commit(lfs, &root, NULL, 0);
4442 if (err) {
4443 goto cleanup;
4444 }
4445
4446 // sanity check that fetch works
4447 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4448 if (err) {
4449 goto cleanup;
4450 }
4451 }
4452
4453 cleanup:
4454 lfs_deinit(lfs);
4455 return err;
4456
4457 }
4458 #endif
4459
4460 struct lfs_tortoise_t {
4461 lfs_block_t pair[2];
4462 lfs_size_t i;
4463 lfs_size_t period;
4464 };
4465
4466 static int lfs_tortoise_detectcycles(
4467 const lfs_mdir_t *dir, struct lfs_tortoise_t *tortoise) {
4468 // detect cycles with Brent's algorithm
4469 if (lfs_pair_issync(dir->tail, tortoise->pair)) {
4470 LFS_WARN("Cycle detected in tail list");
4471 return LFS_ERR_CORRUPT;
4472 }
4473 if (tortoise->i == tortoise->period) {
4474 tortoise->pair[0] = dir->tail[0];
4475 tortoise->pair[1] = dir->tail[1];
4476 tortoise->i = 0;
4477 tortoise->period *= 2;
4478 }
4479 tortoise->i += 1;
4480
4481 return LFS_ERR_OK;
4482 }
4483
4484 static int lfs_mount_(lfs_t *lfs, const struct lfs_config *cfg) {
4485 int err = lfs_init(lfs, cfg);
4486 if (err) {
4487 return err;
4488 }
4489
4490 // scan directory blocks for superblock and any global updates
4491 lfs_mdir_t dir = {.tail = {0, 1}};
4492 struct lfs_tortoise_t tortoise = {
4493 .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
4494 .i = 1,
4495 .period = 1,
4496 };
4497 while (!lfs_pair_isnull(dir.tail)) {
4498 err = lfs_tortoise_detectcycles(&dir, &tortoise);
4499 if (err < 0) {
4500 goto cleanup;
4501 }
4502
4503 // fetch next block in tail list
4504 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4505 LFS_MKTAG(0x7ff, 0x3ff, 0),
4506 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4507 NULL,
4508 lfs_dir_find_match, &(struct lfs_dir_find_match){
4509 lfs, "littlefs", 8});
4510 if (tag < 0) {
4511 err = tag;
4512 goto cleanup;
4513 }
4514
4515 // has superblock?
4516 if (tag && !lfs_tag_isdelete(tag)) {
4517 // update root
4518 lfs->root[0] = dir.pair[0];
4519 lfs->root[1] = dir.pair[1];
4520
4521 // grab superblock
4522 lfs_superblock_t superblock;
4523 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4524 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4525 &superblock);
4526 if (tag < 0) {
4527 err = tag;
4528 goto cleanup;
4529 }
4530 lfs_superblock_fromle32(&superblock);
4531
4532 // check version
4533 uint16_t major_version = (0xffff & (superblock.version >> 16));
4534 uint16_t minor_version = (0xffff & (superblock.version >> 0));
4535 if (major_version != lfs_fs_disk_version_major(lfs)
4536 || minor_version > lfs_fs_disk_version_minor(lfs)) {
4537 LFS_ERROR("Invalid version "
4538 "v%"PRIu16".%"PRIu16" != v%"PRIu16".%"PRIu16,
4539 major_version,
4540 minor_version,
4541 lfs_fs_disk_version_major(lfs),
4542 lfs_fs_disk_version_minor(lfs));
4543 err = LFS_ERR_INVAL;
4544 goto cleanup;
4545 }
4546
4547 // found older minor version? set an in-device only bit in the
4548 // gstate so we know we need to rewrite the superblock before
4549 // the first write
4550 bool needssuperblock = false;
4551 if (minor_version < lfs_fs_disk_version_minor(lfs)) {
4552 LFS_DEBUG("Found older minor version "
4553 "v%"PRIu16".%"PRIu16" < v%"PRIu16".%"PRIu16,
4554 major_version,
4555 minor_version,
4556 lfs_fs_disk_version_major(lfs),
4557 lfs_fs_disk_version_minor(lfs));
4558 needssuperblock = true;
4559 }
4560 // note this bit is reserved on disk, so fetching more gstate
4561 // will not interfere here
4562 lfs_fs_prepsuperblock(lfs, needssuperblock);
4563
4564 // check superblock configuration
4565 if (superblock.name_max) {
4566 if (superblock.name_max > lfs->name_max) {
4567 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4568 superblock.name_max, lfs->name_max);
4569 err = LFS_ERR_INVAL;
4570 goto cleanup;
4571 }
4572
4573 lfs->name_max = superblock.name_max;
4574 }
4575
4576 if (superblock.file_max) {
4577 if (superblock.file_max > lfs->file_max) {
4578 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4579 superblock.file_max, lfs->file_max);
4580 err = LFS_ERR_INVAL;
4581 goto cleanup;
4582 }
4583
4584 lfs->file_max = superblock.file_max;
4585 }
4586
4587 if (superblock.attr_max) {
4588 if (superblock.attr_max > lfs->attr_max) {
4589 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4590 superblock.attr_max, lfs->attr_max);
4591 err = LFS_ERR_INVAL;
4592 goto cleanup;
4593 }
4594
4595 lfs->attr_max = superblock.attr_max;
4596
4597 // we also need to update inline_max in case attr_max changed
4598 lfs->inline_max = lfs_min(lfs->inline_max, lfs->attr_max);
4599 }
4600
4601 // this is where we get the block_count from disk if block_count=0
4602 if (lfs->cfg->block_count
4603 && superblock.block_count != lfs->cfg->block_count) {
4604 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4605 superblock.block_count, lfs->cfg->block_count);
4606 err = LFS_ERR_INVAL;
4607 goto cleanup;
4608 }
4609
4610 lfs->block_count = superblock.block_count;
4611
4612 if (superblock.block_size != lfs->cfg->block_size) {
4613 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4614 superblock.block_size, lfs->cfg->block_size);
4615 err = LFS_ERR_INVAL;
4616 goto cleanup;
4617 }
4618 }
4619
4620 // has gstate?
4621 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4622 if (err) {
4623 goto cleanup;
4624 }
4625 }
4626
4627 // update littlefs with gstate
4628 if (!lfs_gstate_iszero(&lfs->gstate)) {
4629 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4630 lfs->gstate.tag,
4631 lfs->gstate.pair[0],
4632 lfs->gstate.pair[1]);
4633 }
4634 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4635 lfs->gdisk = lfs->gstate;
4636
4637 // setup free lookahead, to distribute allocations uniformly across
4638 // boots, we start the allocator at a random location
4639 lfs->lookahead.start = lfs->seed % lfs->block_count;
4640 lfs_alloc_drop(lfs);
4641
4642 return 0;
4643
4644 cleanup:
4645 lfs_unmount_(lfs);
4646 return err;
4647 }
4648
4649 static int lfs_unmount_(lfs_t *lfs) {
4650 return lfs_deinit(lfs);
4651 }
4652
4653
4654 /// Filesystem filesystem operations ///
4655 static int lfs_fs_stat_(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
4656 // if the superblock is up-to-date, we must be on the most recent
4657 // minor version of littlefs
4658 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4659 fsinfo->disk_version = lfs_fs_disk_version(lfs);
4660
4661 // otherwise we need to read the minor version on disk
4662 } else {
4663 // fetch the superblock
4664 lfs_mdir_t dir;
4665 int err = lfs_dir_fetch(lfs, &dir, lfs->root);
4666 if (err) {
4667 return err;
4668 }
4669
4670 lfs_superblock_t superblock;
4671 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4672 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4673 &superblock);
4674 if (tag < 0) {
4675 return tag;
4676 }
4677 lfs_superblock_fromle32(&superblock);
4678
4679 // read the on-disk version
4680 fsinfo->disk_version = superblock.version;
4681 }
4682
4683 // filesystem geometry
4684 fsinfo->block_size = lfs->cfg->block_size;
4685 fsinfo->block_count = lfs->block_count;
4686
4687 // other on-disk configuration, we cache all of these for internal use
4688 fsinfo->name_max = lfs->name_max;
4689 fsinfo->file_max = lfs->file_max;
4690 fsinfo->attr_max = lfs->attr_max;
4691
4692 return 0;
4693 }
4694
4695 int lfs_fs_traverse_(lfs_t *lfs,
4696 int (*cb)(void *data, lfs_block_t block), void *data,
4697 bool includeorphans) {
4698 // iterate over metadata pairs
4699 lfs_mdir_t dir = {.tail = {0, 1}};
4700
4701 #ifdef LFS_MIGRATE
4702 // also consider v1 blocks during migration
4703 if (lfs->lfs1) {
4704 int err = lfs1_traverse(lfs, cb, data);
4705 if (err) {
4706 return err;
4707 }
4708
4709 dir.tail[0] = lfs->root[0];
4710 dir.tail[1] = lfs->root[1];
4711 }
4712 #endif
4713
4714 struct lfs_tortoise_t tortoise = {
4715 .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
4716 .i = 1,
4717 .period = 1,
4718 };
4719 int err = LFS_ERR_OK;
4720 while (!lfs_pair_isnull(dir.tail)) {
4721 err = lfs_tortoise_detectcycles(&dir, &tortoise);
4722 if (err < 0) {
4723 return LFS_ERR_CORRUPT;
4724 }
4725
4726 for (int i = 0; i < 2; i++) {
4727 int err = cb(data, dir.tail[i]);
4728 if (err) {
4729 return err;
4730 }
4731 }
4732
4733 // iterate through ids in directory
4734 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4735 if (err) {
4736 return err;
4737 }
4738
4739 for (uint16_t id = 0; id < dir.count; id++) {
4740 struct lfs_ctz ctz;
4741 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4742 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4743 if (tag < 0) {
4744 if (tag == LFS_ERR_NOENT) {
4745 continue;
4746 }
4747 return tag;
4748 }
4749 lfs_ctz_fromle32(&ctz);
4750
4751 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4752 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4753 ctz.head, ctz.size, cb, data);
4754 if (err) {
4755 return err;
4756 }
4757 } else if (includeorphans &&
4758 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4759 for (int i = 0; i < 2; i++) {
4760 err = cb(data, (&ctz.head)[i]);
4761 if (err) {
4762 return err;
4763 }
4764 }
4765 }
4766 }
4767 }
4768
4769 #ifndef LFS_READONLY
4770 // iterate over any open files
4771 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4772 if (f->type != LFS_TYPE_REG) {
4773 continue;
4774 }
4775
4776 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4777 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4778 f->ctz.head, f->ctz.size, cb, data);
4779 if (err) {
4780 return err;
4781 }
4782 }
4783
4784 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4785 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4786 f->block, f->pos, cb, data);
4787 if (err) {
4788 return err;
4789 }
4790 }
4791 }
4792 #endif
4793
4794 return 0;
4795 }
4796
4797 #ifndef LFS_READONLY
4798 static int lfs_fs_pred(lfs_t *lfs,
4799 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4800 // iterate over all directory directory entries
4801 pdir->tail[0] = 0;
4802 pdir->tail[1] = 1;
4803 struct lfs_tortoise_t tortoise = {
4804 .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
4805 .i = 1,
4806 .period = 1,
4807 };
4808 int err = LFS_ERR_OK;
4809 while (!lfs_pair_isnull(pdir->tail)) {
4810 err = lfs_tortoise_detectcycles(pdir, &tortoise);
4811 if (err < 0) {
4812 return LFS_ERR_CORRUPT;
4813 }
4814
4815 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4816 return 0;
4817 }
4818
4819 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4820 if (err) {
4821 return err;
4822 }
4823 }
4824
4825 return LFS_ERR_NOENT;
4826 }
4827 #endif
4828
4829 #ifndef LFS_READONLY
4830 struct lfs_fs_parent_match {
4831 lfs_t *lfs;
4832 const lfs_block_t pair[2];
4833 };
4834 #endif
4835
4836 #ifndef LFS_READONLY
4837 static int lfs_fs_parent_match(void *data,
4838 lfs_tag_t tag, const void *buffer) {
4839 struct lfs_fs_parent_match *find = data;
4840 lfs_t *lfs = find->lfs;
4841 const struct lfs_diskoff *disk = buffer;
4842 (void)tag;
4843
4844 lfs_block_t child[2];
4845 int err = lfs_bd_read(lfs,
4846 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4847 disk->block, disk->off, &child, sizeof(child));
4848 if (err) {
4849 return err;
4850 }
4851
4852 lfs_pair_fromle32(child);
4853 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4854 }
4855 #endif
4856
4857 #ifndef LFS_READONLY
4858 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4859 lfs_mdir_t *parent) {
4860 // use fetchmatch with callback to find pairs
4861 parent->tail[0] = 0;
4862 parent->tail[1] = 1;
4863 struct lfs_tortoise_t tortoise = {
4864 .pair = {LFS_BLOCK_NULL, LFS_BLOCK_NULL},
4865 .i = 1,
4866 .period = 1,
4867 };
4868 int err = LFS_ERR_OK;
4869 while (!lfs_pair_isnull(parent->tail)) {
4870 err = lfs_tortoise_detectcycles(parent, &tortoise);
4871 if (err < 0) {
4872 return err;
4873 }
4874
4875 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4876 LFS_MKTAG(0x7ff, 0, 0x3ff),
4877 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4878 NULL,
4879 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4880 lfs, {pair[0], pair[1]}});
4881 if (tag && tag != LFS_ERR_NOENT) {
4882 return tag;
4883 }
4884 }
4885
4886 return LFS_ERR_NOENT;
4887 }
4888 #endif
4889
4890 static void lfs_fs_prepsuperblock(lfs_t *lfs, bool needssuperblock) {
4891 lfs->gstate.tag = (lfs->gstate.tag & ~LFS_MKTAG(0, 0, 0x200))
4892 | (uint32_t)needssuperblock << 9;
4893 }
4894
4895 #ifndef LFS_READONLY
4896 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4897 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0x000 || orphans >= 0);
4898 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) < 0x1ff || orphans <= 0);
4899 lfs->gstate.tag += orphans;
4900 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4901 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4902
4903 return 0;
4904 }
4905 #endif
4906
4907 #ifndef LFS_READONLY
4908 static void lfs_fs_prepmove(lfs_t *lfs,
4909 uint16_t id, const lfs_block_t pair[2]) {
4910 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4911 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4912 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4913 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4914 }
4915 #endif
4916
4917 #ifndef LFS_READONLY
4918 static int lfs_fs_desuperblock(lfs_t *lfs) {
4919 if (!lfs_gstate_needssuperblock(&lfs->gstate)) {
4920 return 0;
4921 }
4922
4923 LFS_DEBUG("Rewriting superblock {0x%"PRIx32", 0x%"PRIx32"}",
4924 lfs->root[0],
4925 lfs->root[1]);
4926
4927 lfs_mdir_t root;
4928 int err = lfs_dir_fetch(lfs, &root, lfs->root);
4929 if (err) {
4930 return err;
4931 }
4932
4933 // write a new superblock
4934 lfs_superblock_t superblock = {
4935 .version = lfs_fs_disk_version(lfs),
4936 .block_size = lfs->cfg->block_size,
4937 .block_count = lfs->block_count,
4938 .name_max = lfs->name_max,
4939 .file_max = lfs->file_max,
4940 .attr_max = lfs->attr_max,
4941 };
4942
4943 lfs_superblock_tole32(&superblock);
4944 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4945 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4946 &superblock}));
4947 if (err) {
4948 return err;
4949 }
4950
4951 lfs_fs_prepsuperblock(lfs, false);
4952 return 0;
4953 }
4954 #endif
4955
4956 #ifndef LFS_READONLY
4957 static int lfs_fs_demove(lfs_t *lfs) {
4958 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4959 return 0;
4960 }
4961
4962 // Fix bad moves
4963 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4964 lfs->gdisk.pair[0],
4965 lfs->gdisk.pair[1],
4966 lfs_tag_id(lfs->gdisk.tag));
4967
4968 // no other gstate is supported at this time, so if we found something else
4969 // something most likely went wrong in gstate calculation
4970 LFS_ASSERT(lfs_tag_type3(lfs->gdisk.tag) == LFS_TYPE_DELETE);
4971
4972 // fetch and delete the moved entry
4973 lfs_mdir_t movedir;
4974 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4975 if (err) {
4976 return err;
4977 }
4978
4979 // prep gstate and delete move id
4980 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4981 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4982 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4983 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4984 if (err) {
4985 return err;
4986 }
4987
4988 return 0;
4989 }
4990 #endif
4991
4992 #ifndef LFS_READONLY
4993 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4994 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4995 return 0;
4996 }
4997
4998 // Check for orphans in two separate passes:
4999 // - 1 for half-orphans (relocations)
5000 // - 2 for full-orphans (removes/renames)
5001 //
5002 // Two separate passes are needed as half-orphans can contain outdated
5003 // references to full-orphans, effectively hiding them from the deorphan
5004 // search.
5005 //
5006 int pass = 0;
5007 while (pass < 2) {
5008 // Fix any orphans
5009 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
5010 lfs_mdir_t dir;
5011 bool moreorphans = false;
5012
5013 // iterate over all directory directory entries
5014 while (!lfs_pair_isnull(pdir.tail)) {
5015 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
5016 if (err) {
5017 return err;
5018 }
5019
5020 // check head blocks for orphans
5021 if (!pdir.split) {
5022 // check if we have a parent
5023 lfs_mdir_t parent;
5024 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
5025 if (tag < 0 && tag != LFS_ERR_NOENT) {
5026 return tag;
5027 }
5028
5029 if (pass == 0 && tag != LFS_ERR_NOENT) {
5030 lfs_block_t pair[2];
5031 lfs_stag_t state = lfs_dir_get(lfs, &parent,
5032 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
5033 if (state < 0) {
5034 return state;
5035 }
5036 lfs_pair_fromle32(pair);
5037
5038 if (!lfs_pair_issync(pair, pdir.tail)) {
5039 // we have desynced
5040 LFS_DEBUG("Fixing half-orphan "
5041 "{0x%"PRIx32", 0x%"PRIx32"} "
5042 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5043 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
5044
5045 // fix pending move in this pair? this looks like an
5046 // optimization but is in fact _required_ since
5047 // relocating may outdate the move.
5048 uint16_t moveid = 0x3ff;
5049 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
5050 moveid = lfs_tag_id(lfs->gstate.tag);
5051 LFS_DEBUG("Fixing move while fixing orphans "
5052 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
5053 pdir.pair[0], pdir.pair[1], moveid);
5054 lfs_fs_prepmove(lfs, 0x3ff, NULL);
5055 }
5056
5057 lfs_pair_tole32(pair);
5058 state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
5059 {LFS_MKTAG_IF(moveid != 0x3ff,
5060 LFS_TYPE_DELETE, moveid, 0), NULL},
5061 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
5062 pair}));
5063 lfs_pair_fromle32(pair);
5064 if (state < 0) {
5065 return state;
5066 }
5067
5068 // did our commit create more orphans?
5069 if (state == LFS_OK_ORPHANED) {
5070 moreorphans = true;
5071 }
5072
5073 // refetch tail
5074 continue;
5075 }
5076 }
5077
5078 // note we only check for full orphans if we may have had a
5079 // power-loss, otherwise orphans are created intentionally
5080 // during operations such as lfs_mkdir
5081 if (pass == 1 && tag == LFS_ERR_NOENT && powerloss) {
5082 // we are an orphan
5083 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
5084 pdir.tail[0], pdir.tail[1]);
5085
5086 // steal state
5087 err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
5088 if (err) {
5089 return err;
5090 }
5091
5092 // steal tail
5093 lfs_pair_tole32(dir.tail);
5094 int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
5095 {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
5096 dir.tail}));
5097 lfs_pair_fromle32(dir.tail);
5098 if (state < 0) {
5099 return state;
5100 }
5101
5102 // did our commit create more orphans?
5103 if (state == LFS_OK_ORPHANED) {
5104 moreorphans = true;
5105 }
5106
5107 // refetch tail
5108 continue;
5109 }
5110 }
5111
5112 pdir = dir;
5113 }
5114
5115 pass = moreorphans ? 0 : pass+1;
5116 }
5117
5118 // mark orphans as fixed
5119 return lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
5120 }
5121 #endif
5122
5123 #ifndef LFS_READONLY
5124 static int lfs_fs_forceconsistency(lfs_t *lfs) {
5125 int err = lfs_fs_desuperblock(lfs);
5126 if (err) {
5127 return err;
5128 }
5129
5130 err = lfs_fs_demove(lfs);
5131 if (err) {
5132 return err;
5133 }
5134
5135 err = lfs_fs_deorphan(lfs, true);
5136 if (err) {
5137 return err;
5138 }
5139
5140 return 0;
5141 }
5142 #endif
5143
5144 #ifndef LFS_READONLY
5145 static int lfs_fs_mkconsistent_(lfs_t *lfs) {
5146 // lfs_fs_forceconsistency does most of the work here
5147 int err = lfs_fs_forceconsistency(lfs);
5148 if (err) {
5149 return err;
5150 }
5151
5152 // do we have any pending gstate?
5153 lfs_gstate_t delta = {0};
5154 lfs_gstate_xor(&delta, &lfs->gdisk);
5155 lfs_gstate_xor(&delta, &lfs->gstate);
5156 if (!lfs_gstate_iszero(&delta)) {
5157 // lfs_dir_commit will implicitly write out any pending gstate
5158 lfs_mdir_t root;
5159 err = lfs_dir_fetch(lfs, &root, lfs->root);
5160 if (err) {
5161 return err;
5162 }
5163
5164 err = lfs_dir_commit(lfs, &root, NULL, 0);
5165 if (err) {
5166 return err;
5167 }
5168 }
5169
5170 return 0;
5171 }
5172 #endif
5173
5174 static int lfs_fs_size_count(void *p, lfs_block_t block) {
5175 (void)block;
5176 lfs_size_t *size = p;
5177 *size += 1;
5178 return 0;
5179 }
5180
5181 static lfs_ssize_t lfs_fs_size_(lfs_t *lfs) {
5182 lfs_size_t size = 0;
5183 int err = lfs_fs_traverse_(lfs, lfs_fs_size_count, &size, false);
5184 if (err) {
5185 return err;
5186 }
5187
5188 return size;
5189 }
5190
5191 // explicit garbage collection
5192 #ifndef LFS_READONLY
5193 static int lfs_fs_gc_(lfs_t *lfs) {
5194 // force consistency, even if we're not necessarily going to write,
5195 // because this function is supposed to take care of janitorial work
5196 // isn't it?
5197 int err = lfs_fs_forceconsistency(lfs);
5198 if (err) {
5199 return err;
5200 }
5201
5202 // try to compact metadata pairs, note we can't really accomplish
5203 // anything if compact_thresh doesn't at least leave a prog_size
5204 // available
5205 if (lfs->cfg->compact_thresh
5206 < lfs->cfg->block_size - lfs->cfg->prog_size) {
5207 // iterate over all mdirs
5208 lfs_mdir_t mdir = {.tail = {0, 1}};
5209 while (!lfs_pair_isnull(mdir.tail)) {
5210 err = lfs_dir_fetch(lfs, &mdir, mdir.tail);
5211 if (err) {
5212 return err;
5213 }
5214
5215 // not erased? exceeds our compaction threshold?
5216 if (!mdir.erased || ((lfs->cfg->compact_thresh == 0)
5217 ? mdir.off > lfs->cfg->block_size - lfs->cfg->block_size/8
5218 : mdir.off > lfs->cfg->compact_thresh)) {
5219 // the easiest way to trigger a compaction is to mark
5220 // the mdir as unerased and add an empty commit
5221 mdir.erased = false;
5222 err = lfs_dir_commit(lfs, &mdir, NULL, 0);
5223 if (err) {
5224 return err;
5225 }
5226 }
5227 }
5228 }
5229
5230 // try to populate the lookahead buffer, unless it's already full
5231 if (lfs->lookahead.size < 8*lfs->cfg->lookahead_size) {
5232 err = lfs_alloc_scan(lfs);
5233 if (err) {
5234 return err;
5235 }
5236 }
5237
5238 return 0;
5239 }
5240 #endif
5241
5242 #ifndef LFS_READONLY
5243 static int lfs_fs_grow_(lfs_t *lfs, lfs_size_t block_count) {
5244 // shrinking is not supported
5245 LFS_ASSERT(block_count >= lfs->block_count);
5246
5247 if (block_count > lfs->block_count) {
5248 lfs->block_count = block_count;
5249
5250 // fetch the root
5251 lfs_mdir_t root;
5252 int err = lfs_dir_fetch(lfs, &root, lfs->root);
5253 if (err) {
5254 return err;
5255 }
5256
5257 // update the superblock
5258 lfs_superblock_t superblock;
5259 lfs_stag_t tag = lfs_dir_get(lfs, &root, LFS_MKTAG(0x7ff, 0x3ff, 0),
5260 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5261 &superblock);
5262 if (tag < 0) {
5263 return tag;
5264 }
5265 lfs_superblock_fromle32(&superblock);
5266
5267 superblock.block_count = lfs->block_count;
5268
5269 lfs_superblock_tole32(&superblock);
5270 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
5271 {tag, &superblock}));
5272 if (err) {
5273 return err;
5274 }
5275 }
5276
5277 return 0;
5278 }
5279 #endif
5280
5281 #ifdef LFS_MIGRATE
5282 ////// Migration from littelfs v1 below this //////
5283
5284 /// Version info ///
5285
5286 // Software library version
5287 // Major (top-nibble), incremented on backwards incompatible changes
5288 // Minor (bottom-nibble), incremented on feature additions
5289 #define LFS1_VERSION 0x00010007
5290 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
5291 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
5292
5293 // Version of On-disk data structures
5294 // Major (top-nibble), incremented on backwards incompatible changes
5295 // Minor (bottom-nibble), incremented on feature additions
5296 #define LFS1_DISK_VERSION 0x00010001
5297 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
5298 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
5299
5300
5301 /// v1 Definitions ///
5302
5303 // File types
5304 enum lfs1_type {
5305 LFS1_TYPE_REG = 0x11,
5306 LFS1_TYPE_DIR = 0x22,
5307 LFS1_TYPE_SUPERBLOCK = 0x2e,
5308 };
5309
5310 typedef struct lfs1 {
5311 lfs_block_t root[2];
5312 } lfs1_t;
5313
5314 typedef struct lfs1_entry {
5315 lfs_off_t off;
5316
5317 struct lfs1_disk_entry {
5318 uint8_t type;
5319 uint8_t elen;
5320 uint8_t alen;
5321 uint8_t nlen;
5322 union {
5323 struct {
5324 lfs_block_t head;
5325 lfs_size_t size;
5326 } file;
5327 lfs_block_t dir[2];
5328 } u;
5329 } d;
5330 } lfs1_entry_t;
5331
5332 typedef struct lfs1_dir {
5333 struct lfs1_dir *next;
5334 lfs_block_t pair[2];
5335 lfs_off_t off;
5336
5337 lfs_block_t head[2];
5338 lfs_off_t pos;
5339
5340 struct lfs1_disk_dir {
5341 uint32_t rev;
5342 lfs_size_t size;
5343 lfs_block_t tail[2];
5344 } d;
5345 } lfs1_dir_t;
5346
5347 typedef struct lfs1_superblock {
5348 lfs_off_t off;
5349
5350 struct lfs1_disk_superblock {
5351 uint8_t type;
5352 uint8_t elen;
5353 uint8_t alen;
5354 uint8_t nlen;
5355 lfs_block_t root[2];
5356 uint32_t block_size;
5357 uint32_t block_count;
5358 uint32_t version;
5359 char magic[8];
5360 } d;
5361 } lfs1_superblock_t;
5362
5363
5364 /// Low-level wrappers v1->v2 ///
5365 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
5366 *crc = lfs_crc(*crc, buffer, size);
5367 }
5368
5369 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
5370 lfs_off_t off, void *buffer, lfs_size_t size) {
5371 // if we ever do more than writes to alternating pairs,
5372 // this may need to consider pcache
5373 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
5374 block, off, buffer, size);
5375 }
5376
5377 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
5378 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
5379 for (lfs_off_t i = 0; i < size; i++) {
5380 uint8_t c;
5381 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
5382 if (err) {
5383 return err;
5384 }
5385
5386 lfs1_crc(crc, &c, 1);
5387 }
5388
5389 return 0;
5390 }
5391
5392
5393 /// Endian swapping functions ///
5394 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
5395 d->rev = lfs_fromle32(d->rev);
5396 d->size = lfs_fromle32(d->size);
5397 d->tail[0] = lfs_fromle32(d->tail[0]);
5398 d->tail[1] = lfs_fromle32(d->tail[1]);
5399 }
5400
5401 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
5402 d->rev = lfs_tole32(d->rev);
5403 d->size = lfs_tole32(d->size);
5404 d->tail[0] = lfs_tole32(d->tail[0]);
5405 d->tail[1] = lfs_tole32(d->tail[1]);
5406 }
5407
5408 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
5409 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
5410 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
5411 }
5412
5413 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
5414 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
5415 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
5416 }
5417
5418 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
5419 d->root[0] = lfs_fromle32(d->root[0]);
5420 d->root[1] = lfs_fromle32(d->root[1]);
5421 d->block_size = lfs_fromle32(d->block_size);
5422 d->block_count = lfs_fromle32(d->block_count);
5423 d->version = lfs_fromle32(d->version);
5424 }
5425
5426
5427 ///// Metadata pair and directory operations ///
5428 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
5429 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
5430 }
5431
5432 static int lfs1_dir_fetch(lfs_t *lfs,
5433 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
5434 // copy out pair, otherwise may be aliasing dir
5435 const lfs_block_t tpair[2] = {pair[0], pair[1]};
5436 bool valid = false;
5437
5438 // check both blocks for the most recent revision
5439 for (int i = 0; i < 2; i++) {
5440 struct lfs1_disk_dir test;
5441 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
5442 lfs1_dir_fromle32(&test);
5443 if (err) {
5444 if (err == LFS_ERR_CORRUPT) {
5445 continue;
5446 }
5447 return err;
5448 }
5449
5450 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
5451 continue;
5452 }
5453
5454 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
5455 (0x7fffffff & test.size) > lfs->cfg->block_size) {
5456 continue;
5457 }
5458
5459 uint32_t crc = 0xffffffff;
5460 lfs1_dir_tole32(&test);
5461 lfs1_crc(&crc, &test, sizeof(test));
5462 lfs1_dir_fromle32(&test);
5463 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
5464 (0x7fffffff & test.size) - sizeof(test), &crc);
5465 if (err) {
5466 if (err == LFS_ERR_CORRUPT) {
5467 continue;
5468 }
5469 return err;
5470 }
5471
5472 if (crc != 0) {
5473 continue;
5474 }
5475
5476 valid = true;
5477
5478 // setup dir in case it's valid
5479 dir->pair[0] = tpair[(i+0) % 2];
5480 dir->pair[1] = tpair[(i+1) % 2];
5481 dir->off = sizeof(dir->d);
5482 dir->d = test;
5483 }
5484
5485 if (!valid) {
5486 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
5487 tpair[0], tpair[1]);
5488 return LFS_ERR_CORRUPT;
5489 }
5490
5491 return 0;
5492 }
5493
5494 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
5495 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
5496 if (!(0x80000000 & dir->d.size)) {
5497 entry->off = dir->off;
5498 return LFS_ERR_NOENT;
5499 }
5500
5501 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
5502 if (err) {
5503 return err;
5504 }
5505
5506 dir->off = sizeof(dir->d);
5507 dir->pos += sizeof(dir->d) + 4;
5508 }
5509
5510 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
5511 &entry->d, sizeof(entry->d));
5512 lfs1_entry_fromle32(&entry->d);
5513 if (err) {
5514 return err;
5515 }
5516
5517 entry->off = dir->off;
5518 dir->off += lfs1_entry_size(entry);
5519 dir->pos += lfs1_entry_size(entry);
5520 return 0;
5521 }
5522
5523 /// littlefs v1 specific operations ///
5524 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
5525 if (lfs_pair_isnull(lfs->lfs1->root)) {
5526 return 0;
5527 }
5528
5529 // iterate over metadata pairs
5530 lfs1_dir_t dir;
5531 lfs1_entry_t entry;
5532 lfs_block_t cwd[2] = {0, 1};
5533
5534 while (true) {
5535 for (int i = 0; i < 2; i++) {
5536 int err = cb(data, cwd[i]);
5537 if (err) {
5538 return err;
5539 }
5540 }
5541
5542 int err = lfs1_dir_fetch(lfs, &dir, cwd);
5543 if (err) {
5544 return err;
5545 }
5546
5547 // iterate over contents
5548 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
5549 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
5550 &entry.d, sizeof(entry.d));
5551 lfs1_entry_fromle32(&entry.d);
5552 if (err) {
5553 return err;
5554 }
5555
5556 dir.off += lfs1_entry_size(&entry);
5557 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
5558 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
5559 entry.d.u.file.head, entry.d.u.file.size, cb, data);
5560 if (err) {
5561 return err;
5562 }
5563 }
5564 }
5565
5566 // we also need to check if we contain a threaded v2 directory
5567 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
5568 while (dir2.split) {
5569 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5570 if (err) {
5571 break;
5572 }
5573
5574 for (int i = 0; i < 2; i++) {
5575 err = cb(data, dir2.pair[i]);
5576 if (err) {
5577 return err;
5578 }
5579 }
5580 }
5581
5582 cwd[0] = dir.d.tail[0];
5583 cwd[1] = dir.d.tail[1];
5584
5585 if (lfs_pair_isnull(cwd)) {
5586 break;
5587 }
5588 }
5589
5590 return 0;
5591 }
5592
5593 static int lfs1_moved(lfs_t *lfs, const void *e) {
5594 if (lfs_pair_isnull(lfs->lfs1->root)) {
5595 return 0;
5596 }
5597
5598 // skip superblock
5599 lfs1_dir_t cwd;
5600 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
5601 if (err) {
5602 return err;
5603 }
5604
5605 // iterate over all directory directory entries
5606 lfs1_entry_t entry;
5607 while (!lfs_pair_isnull(cwd.d.tail)) {
5608 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
5609 if (err) {
5610 return err;
5611 }
5612
5613 while (true) {
5614 err = lfs1_dir_next(lfs, &cwd, &entry);
5615 if (err && err != LFS_ERR_NOENT) {
5616 return err;
5617 }
5618
5619 if (err == LFS_ERR_NOENT) {
5620 break;
5621 }
5622
5623 if (!(0x80 & entry.d.type) &&
5624 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
5625 return true;
5626 }
5627 }
5628 }
5629
5630 return false;
5631 }
5632
5633 /// Filesystem operations ///
5634 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
5635 const struct lfs_config *cfg) {
5636 int err = 0;
5637 {
5638 err = lfs_init(lfs, cfg);
5639 if (err) {
5640 return err;
5641 }
5642
5643 lfs->lfs1 = lfs1;
5644 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5645 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5646
5647 // setup free lookahead
5648 lfs->lookahead.start = 0;
5649 lfs->lookahead.size = 0;
5650 lfs->lookahead.next = 0;
5651 lfs_alloc_ckpoint(lfs);
5652
5653 // load superblock
5654 lfs1_dir_t dir;
5655 lfs1_superblock_t superblock;
5656 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5657 if (err && err != LFS_ERR_CORRUPT) {
5658 goto cleanup;
5659 }
5660
5661 if (!err) {
5662 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5663 &superblock.d, sizeof(superblock.d));
5664 lfs1_superblock_fromle32(&superblock.d);
5665 if (err) {
5666 goto cleanup;
5667 }
5668
5669 lfs->lfs1->root[0] = superblock.d.root[0];
5670 lfs->lfs1->root[1] = superblock.d.root[1];
5671 }
5672
5673 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5674 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5675 0, 1);
5676 err = LFS_ERR_CORRUPT;
5677 goto cleanup;
5678 }
5679
5680 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5681 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
5682 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5683 minor_version > LFS1_DISK_VERSION_MINOR)) {
5684 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5685 err = LFS_ERR_INVAL;
5686 goto cleanup;
5687 }
5688
5689 return 0;
5690 }
5691
5692 cleanup:
5693 lfs_deinit(lfs);
5694 return err;
5695 }
5696
5697 static int lfs1_unmount(lfs_t *lfs) {
5698 return lfs_deinit(lfs);
5699 }
5700
5701 /// v1 migration ///
5702 static int lfs_migrate_(lfs_t *lfs, const struct lfs_config *cfg) {
5703 struct lfs1 lfs1;
5704
5705 // Indeterminate filesystem size not allowed for migration.
5706 LFS_ASSERT(cfg->block_count != 0);
5707
5708 int err = lfs1_mount(lfs, &lfs1, cfg);
5709 if (err) {
5710 return err;
5711 }
5712
5713 {
5714 // iterate through each directory, copying over entries
5715 // into new directory
5716 lfs1_dir_t dir1;
5717 lfs_mdir_t dir2;
5718 dir1.d.tail[0] = lfs->lfs1->root[0];
5719 dir1.d.tail[1] = lfs->lfs1->root[1];
5720 while (!lfs_pair_isnull(dir1.d.tail)) {
5721 // iterate old dir
5722 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5723 if (err) {
5724 goto cleanup;
5725 }
5726
5727 // create new dir and bind as temporary pretend root
5728 err = lfs_dir_alloc(lfs, &dir2);
5729 if (err) {
5730 goto cleanup;
5731 }
5732
5733 dir2.rev = dir1.d.rev;
5734 dir1.head[0] = dir1.pair[0];
5735 dir1.head[1] = dir1.pair[1];
5736 lfs->root[0] = dir2.pair[0];
5737 lfs->root[1] = dir2.pair[1];
5738
5739 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5740 if (err) {
5741 goto cleanup;
5742 }
5743
5744 while (true) {
5745 lfs1_entry_t entry1;
5746 err = lfs1_dir_next(lfs, &dir1, &entry1);
5747 if (err && err != LFS_ERR_NOENT) {
5748 goto cleanup;
5749 }
5750
5751 if (err == LFS_ERR_NOENT) {
5752 break;
5753 }
5754
5755 // check that entry has not been moved
5756 if (entry1.d.type & 0x80) {
5757 int moved = lfs1_moved(lfs, &entry1.d.u);
5758 if (moved < 0) {
5759 err = moved;
5760 goto cleanup;
5761 }
5762
5763 if (moved) {
5764 continue;
5765 }
5766
5767 entry1.d.type &= ~0x80;
5768 }
5769
5770 // also fetch name
5771 char name[LFS_NAME_MAX+1];
5772 memset(name, 0, sizeof(name));
5773 err = lfs1_bd_read(lfs, dir1.pair[0],
5774 entry1.off + 4+entry1.d.elen+entry1.d.alen,
5775 name, entry1.d.nlen);
5776 if (err) {
5777 goto cleanup;
5778 }
5779
5780 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5781
5782 // create entry in new dir
5783 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5784 if (err) {
5785 goto cleanup;
5786 }
5787
5788 uint16_t id;
5789 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5790 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5791 err = (err < 0) ? err : LFS_ERR_EXIST;
5792 goto cleanup;
5793 }
5794
5795 lfs1_entry_tole32(&entry1.d);
5796 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5797 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5798 {LFS_MKTAG_IF_ELSE(isdir,
5799 LFS_TYPE_DIR, id, entry1.d.nlen,
5800 LFS_TYPE_REG, id, entry1.d.nlen),
5801 name},
5802 {LFS_MKTAG_IF_ELSE(isdir,
5803 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5804 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5805 &entry1.d.u}));
5806 lfs1_entry_fromle32(&entry1.d);
5807 if (err) {
5808 goto cleanup;
5809 }
5810 }
5811
5812 if (!lfs_pair_isnull(dir1.d.tail)) {
5813 // find last block and update tail to thread into fs
5814 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5815 if (err) {
5816 goto cleanup;
5817 }
5818
5819 while (dir2.split) {
5820 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5821 if (err) {
5822 goto cleanup;
5823 }
5824 }
5825
5826 lfs_pair_tole32(dir2.pair);
5827 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5828 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5829 lfs_pair_fromle32(dir2.pair);
5830 if (err) {
5831 goto cleanup;
5832 }
5833 }
5834
5835 // Copy over first block to thread into fs. Unfortunately
5836 // if this fails there is not much we can do.
5837 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5838 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5839 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5840
5841 err = lfs_bd_erase(lfs, dir1.head[1]);
5842 if (err) {
5843 goto cleanup;
5844 }
5845
5846 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5847 if (err) {
5848 goto cleanup;
5849 }
5850
5851 for (lfs_off_t i = 0; i < dir2.off; i++) {
5852 uint8_t dat;
5853 err = lfs_bd_read(lfs,
5854 NULL, &lfs->rcache, dir2.off,
5855 dir2.pair[0], i, &dat, 1);
5856 if (err) {
5857 goto cleanup;
5858 }
5859
5860 err = lfs_bd_prog(lfs,
5861 &lfs->pcache, &lfs->rcache, true,
5862 dir1.head[1], i, &dat, 1);
5863 if (err) {
5864 goto cleanup;
5865 }
5866 }
5867
5868 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5869 if (err) {
5870 goto cleanup;
5871 }
5872 }
5873
5874 // Create new superblock. This marks a successful migration!
5875 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5876 if (err) {
5877 goto cleanup;
5878 }
5879
5880 dir2.pair[0] = dir1.pair[0];
5881 dir2.pair[1] = dir1.pair[1];
5882 dir2.rev = dir1.d.rev;
5883 dir2.off = sizeof(dir2.rev);
5884 dir2.etag = 0xffffffff;
5885 dir2.count = 0;
5886 dir2.tail[0] = lfs->lfs1->root[0];
5887 dir2.tail[1] = lfs->lfs1->root[1];
5888 dir2.erased = false;
5889 dir2.split = true;
5890
5891 lfs_superblock_t superblock = {
5892 .version = LFS_DISK_VERSION,
5893 .block_size = lfs->cfg->block_size,
5894 .block_count = lfs->cfg->block_count,
5895 .name_max = lfs->name_max,
5896 .file_max = lfs->file_max,
5897 .attr_max = lfs->attr_max,
5898 };
5899
5900 lfs_superblock_tole32(&superblock);
5901 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5902 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5903 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5904 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5905 &superblock}));
5906 if (err) {
5907 goto cleanup;
5908 }
5909
5910 // sanity check that fetch works
5911 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5912 if (err) {
5913 goto cleanup;
5914 }
5915
5916 // force compaction to prevent accidentally mounting v1
5917 dir2.erased = false;
5918 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5919 if (err) {
5920 goto cleanup;
5921 }
5922 }
5923
5924 cleanup:
5925 lfs1_unmount(lfs);
5926 return err;
5927 }
5928
5929 #endif
5930
5931
5932 /// Public API wrappers ///
5933
5934 // Here we can add tracing/thread safety easily
5935
5936 // Thread-safe wrappers if enabled
5937 #ifdef LFS_THREADSAFE
5938 #define LFS_LOCK(cfg) cfg->lock(cfg)
5939 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5940 #else
5941 #define LFS_LOCK(cfg) ((void)cfg, 0)
5942 #define LFS_UNLOCK(cfg) ((void)cfg)
5943 #endif
5944
5945 // Public API
5946 #ifndef LFS_READONLY
5947 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5948 int err = LFS_LOCK(cfg);
5949 if (err) {
5950 return err;
5951 }
5952 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5953 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5954 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5955 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5956 ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
5957 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5958 ".prog_buffer=%p, .lookahead_buffer=%p, "
5959 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5960 ".attr_max=%"PRIu32"})",
5961 (void*)lfs, (void*)cfg, cfg->context,
5962 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5963 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5964 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5965 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5966 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5967 cfg->name_max, cfg->file_max, cfg->attr_max);
5968
5969 err = lfs_format_(lfs, cfg);
5970
5971 LFS_TRACE("lfs_format -> %d", err);
5972 LFS_UNLOCK(cfg);
5973 return err;
5974 }
5975 #endif
5976
5977 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5978 int err = LFS_LOCK(cfg);
5979 if (err) {
5980 return err;
5981 }
5982 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5983 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5984 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5985 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5986 ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
5987 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5988 ".prog_buffer=%p, .lookahead_buffer=%p, "
5989 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5990 ".attr_max=%"PRIu32"})",
5991 (void*)lfs, (void*)cfg, cfg->context,
5992 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5993 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5994 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5995 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5996 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5997 cfg->name_max, cfg->file_max, cfg->attr_max);
5998
5999 err = lfs_mount_(lfs, cfg);
6000
6001 LFS_TRACE("lfs_mount -> %d", err);
6002 LFS_UNLOCK(cfg);
6003 return err;
6004 }
6005
6006 int lfs_unmount(lfs_t *lfs) {
6007 int err = LFS_LOCK(lfs->cfg);
6008 if (err) {
6009 return err;
6010 }
6011 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
6012
6013 err = lfs_unmount_(lfs);
6014
6015 LFS_TRACE("lfs_unmount -> %d", err);
6016 LFS_UNLOCK(lfs->cfg);
6017 return err;
6018 }
6019
6020 #ifndef LFS_READONLY
6021 int lfs_remove(lfs_t *lfs, const char *path) {
6022 int err = LFS_LOCK(lfs->cfg);
6023 if (err) {
6024 return err;
6025 }
6026 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
6027
6028 err = lfs_remove_(lfs, path);
6029
6030 LFS_TRACE("lfs_remove -> %d", err);
6031 LFS_UNLOCK(lfs->cfg);
6032 return err;
6033 }
6034 #endif
6035
6036 #ifndef LFS_READONLY
6037 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
6038 int err = LFS_LOCK(lfs->cfg);
6039 if (err) {
6040 return err;
6041 }
6042 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
6043
6044 err = lfs_rename_(lfs, oldpath, newpath);
6045
6046 LFS_TRACE("lfs_rename -> %d", err);
6047 LFS_UNLOCK(lfs->cfg);
6048 return err;
6049 }
6050 #endif
6051
6052 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
6053 int err = LFS_LOCK(lfs->cfg);
6054 if (err) {
6055 return err;
6056 }
6057 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
6058
6059 err = lfs_stat_(lfs, path, info);
6060
6061 LFS_TRACE("lfs_stat -> %d", err);
6062 LFS_UNLOCK(lfs->cfg);
6063 return err;
6064 }
6065
6066 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
6067 uint8_t type, void *buffer, lfs_size_t size) {
6068 int err = LFS_LOCK(lfs->cfg);
6069 if (err) {
6070 return err;
6071 }
6072 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
6073 (void*)lfs, path, type, buffer, size);
6074
6075 lfs_ssize_t res = lfs_getattr_(lfs, path, type, buffer, size);
6076
6077 LFS_TRACE("lfs_getattr -> %"PRId32, res);
6078 LFS_UNLOCK(lfs->cfg);
6079 return res;
6080 }
6081
6082 #ifndef LFS_READONLY
6083 int lfs_setattr(lfs_t *lfs, const char *path,
6084 uint8_t type, const void *buffer, lfs_size_t size) {
6085 int err = LFS_LOCK(lfs->cfg);
6086 if (err) {
6087 return err;
6088 }
6089 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
6090 (void*)lfs, path, type, buffer, size);
6091
6092 err = lfs_setattr_(lfs, path, type, buffer, size);
6093
6094 LFS_TRACE("lfs_setattr -> %d", err);
6095 LFS_UNLOCK(lfs->cfg);
6096 return err;
6097 }
6098 #endif
6099
6100 #ifndef LFS_READONLY
6101 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
6102 int err = LFS_LOCK(lfs->cfg);
6103 if (err) {
6104 return err;
6105 }
6106 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
6107
6108 err = lfs_removeattr_(lfs, path, type);
6109
6110 LFS_TRACE("lfs_removeattr -> %d", err);
6111 LFS_UNLOCK(lfs->cfg);
6112 return err;
6113 }
6114 #endif
6115
6116 #ifndef LFS_NO_MALLOC
6117 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
6118 int err = LFS_LOCK(lfs->cfg);
6119 if (err) {
6120 return err;
6121 }
6122 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
6123 (void*)lfs, (void*)file, path, (unsigned)flags);
6124 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6125
6126 err = lfs_file_open_(lfs, file, path, flags);
6127
6128 LFS_TRACE("lfs_file_open -> %d", err);
6129 LFS_UNLOCK(lfs->cfg);
6130 return err;
6131 }
6132 #endif
6133
6134 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
6135 const char *path, int flags,
6136 const struct lfs_file_config *cfg) {
6137 int err = LFS_LOCK(lfs->cfg);
6138 if (err) {
6139 return err;
6140 }
6141 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
6142 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
6143 (void*)lfs, (void*)file, path, (unsigned)flags,
6144 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
6145 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6146
6147 err = lfs_file_opencfg_(lfs, file, path, flags, cfg);
6148
6149 LFS_TRACE("lfs_file_opencfg -> %d", err);
6150 LFS_UNLOCK(lfs->cfg);
6151 return err;
6152 }
6153
6154 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
6155 int err = LFS_LOCK(lfs->cfg);
6156 if (err) {
6157 return err;
6158 }
6159 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
6160 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6161
6162 err = lfs_file_close_(lfs, file);
6163
6164 LFS_TRACE("lfs_file_close -> %d", err);
6165 LFS_UNLOCK(lfs->cfg);
6166 return err;
6167 }
6168
6169 #ifndef LFS_READONLY
6170 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
6171 int err = LFS_LOCK(lfs->cfg);
6172 if (err) {
6173 return err;
6174 }
6175 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
6176 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6177
6178 err = lfs_file_sync_(lfs, file);
6179
6180 LFS_TRACE("lfs_file_sync -> %d", err);
6181 LFS_UNLOCK(lfs->cfg);
6182 return err;
6183 }
6184 #endif
6185
6186 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
6187 void *buffer, lfs_size_t size) {
6188 int err = LFS_LOCK(lfs->cfg);
6189 if (err) {
6190 return err;
6191 }
6192 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
6193 (void*)lfs, (void*)file, buffer, size);
6194 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6195
6196 lfs_ssize_t res = lfs_file_read_(lfs, file, buffer, size);
6197
6198 LFS_TRACE("lfs_file_read -> %"PRId32, res);
6199 LFS_UNLOCK(lfs->cfg);
6200 return res;
6201 }
6202
6203 #ifndef LFS_READONLY
6204 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
6205 const void *buffer, lfs_size_t size) {
6206 int err = LFS_LOCK(lfs->cfg);
6207 if (err) {
6208 return err;
6209 }
6210 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
6211 (void*)lfs, (void*)file, buffer, size);
6212 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6213
6214 lfs_ssize_t res = lfs_file_write_(lfs, file, buffer, size);
6215
6216 LFS_TRACE("lfs_file_write -> %"PRId32, res);
6217 LFS_UNLOCK(lfs->cfg);
6218 return res;
6219 }
6220 #endif
6221
6222 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
6223 lfs_soff_t off, int whence) {
6224 int err = LFS_LOCK(lfs->cfg);
6225 if (err) {
6226 return err;
6227 }
6228 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
6229 (void*)lfs, (void*)file, off, whence);
6230 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6231
6232 lfs_soff_t res = lfs_file_seek_(lfs, file, off, whence);
6233
6234 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
6235 LFS_UNLOCK(lfs->cfg);
6236 return res;
6237 }
6238
6239 #ifndef LFS_READONLY
6240 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
6241 int err = LFS_LOCK(lfs->cfg);
6242 if (err) {
6243 return err;
6244 }
6245 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
6246 (void*)lfs, (void*)file, size);
6247 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6248
6249 err = lfs_file_truncate_(lfs, file, size);
6250
6251 LFS_TRACE("lfs_file_truncate -> %d", err);
6252 LFS_UNLOCK(lfs->cfg);
6253 return err;
6254 }
6255 #endif
6256
6257 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
6258 int err = LFS_LOCK(lfs->cfg);
6259 if (err) {
6260 return err;
6261 }
6262 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
6263 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6264
6265 lfs_soff_t res = lfs_file_tell_(lfs, file);
6266
6267 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
6268 LFS_UNLOCK(lfs->cfg);
6269 return res;
6270 }
6271
6272 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
6273 int err = LFS_LOCK(lfs->cfg);
6274 if (err) {
6275 return err;
6276 }
6277 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
6278
6279 err = lfs_file_rewind_(lfs, file);
6280
6281 LFS_TRACE("lfs_file_rewind -> %d", err);
6282 LFS_UNLOCK(lfs->cfg);
6283 return err;
6284 }
6285
6286 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
6287 int err = LFS_LOCK(lfs->cfg);
6288 if (err) {
6289 return err;
6290 }
6291 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
6292 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
6293
6294 lfs_soff_t res = lfs_file_size_(lfs, file);
6295
6296 LFS_TRACE("lfs_file_size -> %"PRId32, res);
6297 LFS_UNLOCK(lfs->cfg);
6298 return res;
6299 }
6300
6301 #ifndef LFS_READONLY
6302 int lfs_mkdir(lfs_t *lfs, const char *path) {
6303 int err = LFS_LOCK(lfs->cfg);
6304 if (err) {
6305 return err;
6306 }
6307 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
6308
6309 err = lfs_mkdir_(lfs, path);
6310
6311 LFS_TRACE("lfs_mkdir -> %d", err);
6312 LFS_UNLOCK(lfs->cfg);
6313 return err;
6314 }
6315 #endif
6316
6317 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
6318 int err = LFS_LOCK(lfs->cfg);
6319 if (err) {
6320 return err;
6321 }
6322 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
6323 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
6324
6325 err = lfs_dir_open_(lfs, dir, path);
6326
6327 LFS_TRACE("lfs_dir_open -> %d", err);
6328 LFS_UNLOCK(lfs->cfg);
6329 return err;
6330 }
6331
6332 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
6333 int err = LFS_LOCK(lfs->cfg);
6334 if (err) {
6335 return err;
6336 }
6337 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
6338
6339 err = lfs_dir_close_(lfs, dir);
6340
6341 LFS_TRACE("lfs_dir_close -> %d", err);
6342 LFS_UNLOCK(lfs->cfg);
6343 return err;
6344 }
6345
6346 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
6347 int err = LFS_LOCK(lfs->cfg);
6348 if (err) {
6349 return err;
6350 }
6351 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
6352 (void*)lfs, (void*)dir, (void*)info);
6353
6354 err = lfs_dir_read_(lfs, dir, info);
6355
6356 LFS_TRACE("lfs_dir_read -> %d", err);
6357 LFS_UNLOCK(lfs->cfg);
6358 return err;
6359 }
6360
6361 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
6362 int err = LFS_LOCK(lfs->cfg);
6363 if (err) {
6364 return err;
6365 }
6366 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
6367 (void*)lfs, (void*)dir, off);
6368
6369 err = lfs_dir_seek_(lfs, dir, off);
6370
6371 LFS_TRACE("lfs_dir_seek -> %d", err);
6372 LFS_UNLOCK(lfs->cfg);
6373 return err;
6374 }
6375
6376 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
6377 int err = LFS_LOCK(lfs->cfg);
6378 if (err) {
6379 return err;
6380 }
6381 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
6382
6383 lfs_soff_t res = lfs_dir_tell_(lfs, dir);
6384
6385 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
6386 LFS_UNLOCK(lfs->cfg);
6387 return res;
6388 }
6389
6390 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
6391 int err = LFS_LOCK(lfs->cfg);
6392 if (err) {
6393 return err;
6394 }
6395 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
6396
6397 err = lfs_dir_rewind_(lfs, dir);
6398
6399 LFS_TRACE("lfs_dir_rewind -> %d", err);
6400 LFS_UNLOCK(lfs->cfg);
6401 return err;
6402 }
6403
6404 int lfs_fs_stat(lfs_t *lfs, struct lfs_fsinfo *fsinfo) {
6405 int err = LFS_LOCK(lfs->cfg);
6406 if (err) {
6407 return err;
6408 }
6409 LFS_TRACE("lfs_fs_stat(%p, %p)", (void*)lfs, (void*)fsinfo);
6410
6411 err = lfs_fs_stat_(lfs, fsinfo);
6412
6413 LFS_TRACE("lfs_fs_stat -> %d", err);
6414 LFS_UNLOCK(lfs->cfg);
6415 return err;
6416 }
6417
6418 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
6419 int err = LFS_LOCK(lfs->cfg);
6420 if (err) {
6421 return err;
6422 }
6423 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
6424
6425 lfs_ssize_t res = lfs_fs_size_(lfs);
6426
6427 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
6428 LFS_UNLOCK(lfs->cfg);
6429 return res;
6430 }
6431
6432 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
6433 int err = LFS_LOCK(lfs->cfg);
6434 if (err) {
6435 return err;
6436 }
6437 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
6438 (void*)lfs, (void*)(uintptr_t)cb, data);
6439
6440 err = lfs_fs_traverse_(lfs, cb, data, true);
6441
6442 LFS_TRACE("lfs_fs_traverse -> %d", err);
6443 LFS_UNLOCK(lfs->cfg);
6444 return err;
6445 }
6446
6447 #ifndef LFS_READONLY
6448 int lfs_fs_mkconsistent(lfs_t *lfs) {
6449 int err = LFS_LOCK(lfs->cfg);
6450 if (err) {
6451 return err;
6452 }
6453 LFS_TRACE("lfs_fs_mkconsistent(%p)", (void*)lfs);
6454
6455 err = lfs_fs_mkconsistent_(lfs);
6456
6457 LFS_TRACE("lfs_fs_mkconsistent -> %d", err);
6458 LFS_UNLOCK(lfs->cfg);
6459 return err;
6460 }
6461 #endif
6462
6463 #ifndef LFS_READONLY
6464 int lfs_fs_gc(lfs_t *lfs) {
6465 int err = LFS_LOCK(lfs->cfg);
6466 if (err) {
6467 return err;
6468 }
6469 LFS_TRACE("lfs_fs_gc(%p)", (void*)lfs);
6470
6471 err = lfs_fs_gc_(lfs);
6472
6473 LFS_TRACE("lfs_fs_gc -> %d", err);
6474 LFS_UNLOCK(lfs->cfg);
6475 return err;
6476 }
6477 #endif
6478
6479 #ifndef LFS_READONLY
6480 int lfs_fs_grow(lfs_t *lfs, lfs_size_t block_count) {
6481 int err = LFS_LOCK(lfs->cfg);
6482 if (err) {
6483 return err;
6484 }
6485 LFS_TRACE("lfs_fs_grow(%p, %"PRIu32")", (void*)lfs, block_count);
6486
6487 err = lfs_fs_grow_(lfs, block_count);
6488
6489 LFS_TRACE("lfs_fs_grow -> %d", err);
6490 LFS_UNLOCK(lfs->cfg);
6491 return err;
6492 }
6493 #endif
6494
6495 #ifdef LFS_MIGRATE
6496 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
6497 int err = LFS_LOCK(cfg);
6498 if (err) {
6499 return err;
6500 }
6501 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
6502 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
6503 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
6504 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
6505 ".block_cycles=%"PRId32", .cache_size=%"PRIu32", "
6506 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
6507 ".prog_buffer=%p, .lookahead_buffer=%p, "
6508 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
6509 ".attr_max=%"PRIu32"})",
6510 (void*)lfs, (void*)cfg, cfg->context,
6511 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
6512 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
6513 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
6514 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
6515 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
6516 cfg->name_max, cfg->file_max, cfg->attr_max);
6517
6518 err = lfs_migrate_(lfs, cfg);
6519
6520 LFS_TRACE("lfs_migrate -> %d", err);
6521 LFS_UNLOCK(cfg);
6522 return err;
6523 }
6524 #endif
6525
6526