1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2022, The littlefs authors.
5 * Copyright (c) 2017, Arm Limited. All rights reserved.
6 * SPDX-License-Identifier: BSD-3-Clause
7 */
8 #include "lfs.h"
9 #include "lfs_util.h"
10
11 #ifdef __ZEPHYR__
12 #include <zephyr/logging/log.h>
13 LOG_MODULE_DECLARE(littlefs, CONFIG_FS_LOG_LEVEL);
14 #endif
15
16 // some constants used throughout the code
17 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
18 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
19
20 enum {
21 LFS_OK_RELOCATED = 1,
22 LFS_OK_DROPPED = 2,
23 LFS_OK_ORPHANED = 3,
24 };
25
26 enum {
27 LFS_CMP_EQ = 0,
28 LFS_CMP_LT = 1,
29 LFS_CMP_GT = 2,
30 };
31
32
33 /// Caching block device operations ///
34
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)35 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
36 // do not zero, cheaper if cache is readonly or only going to be
37 // written with identical data (during relocates)
38 (void)lfs;
39 rcache->block = LFS_BLOCK_NULL;
40 }
41
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)42 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
43 // zero to avoid information leak
44 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
45 pcache->block = LFS_BLOCK_NULL;
46 }
47
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)48 static int lfs_bd_read(lfs_t *lfs,
49 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
50 lfs_block_t block, lfs_off_t off,
51 void *buffer, lfs_size_t size) {
52 uint8_t *data = buffer;
53 if (block >= lfs->cfg->block_count ||
54 off+size > lfs->cfg->block_size) {
55 return LFS_ERR_CORRUPT;
56 }
57
58 while (size > 0) {
59 lfs_size_t diff = size;
60
61 if (pcache && block == pcache->block &&
62 off < pcache->off + pcache->size) {
63 if (off >= pcache->off) {
64 // is already in pcache?
65 diff = lfs_min(diff, pcache->size - (off-pcache->off));
66 memcpy(data, &pcache->buffer[off-pcache->off], diff);
67
68 data += diff;
69 off += diff;
70 size -= diff;
71 continue;
72 }
73
74 // pcache takes priority
75 diff = lfs_min(diff, pcache->off-off);
76 }
77
78 if (block == rcache->block &&
79 off < rcache->off + rcache->size) {
80 if (off >= rcache->off) {
81 // is already in rcache?
82 diff = lfs_min(diff, rcache->size - (off-rcache->off));
83 memcpy(data, &rcache->buffer[off-rcache->off], diff);
84
85 data += diff;
86 off += diff;
87 size -= diff;
88 continue;
89 }
90
91 // rcache takes priority
92 diff = lfs_min(diff, rcache->off-off);
93 }
94
95 if (size >= hint && off % lfs->cfg->read_size == 0 &&
96 size >= lfs->cfg->read_size) {
97 // bypass cache?
98 diff = lfs_aligndown(diff, lfs->cfg->read_size);
99 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
100 if (err) {
101 return err;
102 }
103
104 data += diff;
105 off += diff;
106 size -= diff;
107 continue;
108 }
109
110 // load to cache, first condition can no longer fail
111 LFS_ASSERT(block < lfs->cfg->block_count);
112 rcache->block = block;
113 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
114 rcache->size = lfs_min(
115 lfs_min(
116 lfs_alignup(off+hint, lfs->cfg->read_size),
117 lfs->cfg->block_size)
118 - rcache->off,
119 lfs->cfg->cache_size);
120 int err = lfs->cfg->read(lfs->cfg, rcache->block,
121 rcache->off, rcache->buffer, rcache->size);
122 LFS_ASSERT(err <= 0);
123 if (err) {
124 return err;
125 }
126 }
127
128 return 0;
129 }
130
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)131 static int lfs_bd_cmp(lfs_t *lfs,
132 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
133 lfs_block_t block, lfs_off_t off,
134 const void *buffer, lfs_size_t size) {
135 const uint8_t *data = buffer;
136 lfs_size_t diff = 0;
137
138 for (lfs_off_t i = 0; i < size; i += diff) {
139 uint8_t dat[8];
140
141 diff = lfs_min(size-i, sizeof(dat));
142 int res = lfs_bd_read(lfs,
143 pcache, rcache, hint-i,
144 block, off+i, &dat, diff);
145 if (res) {
146 return res;
147 }
148
149 res = memcmp(dat, data + i, diff);
150 if (res) {
151 return res < 0 ? LFS_CMP_LT : LFS_CMP_GT;
152 }
153 }
154
155 return LFS_CMP_EQ;
156 }
157
158 #ifndef LFS_READONLY
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)159 static int lfs_bd_flush(lfs_t *lfs,
160 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
161 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
162 LFS_ASSERT(pcache->block < lfs->cfg->block_count);
163 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
164 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
165 pcache->off, pcache->buffer, diff);
166 LFS_ASSERT(err <= 0);
167 if (err) {
168 return err;
169 }
170
171 if (validate) {
172 // check data on disk
173 lfs_cache_drop(lfs, rcache);
174 int res = lfs_bd_cmp(lfs,
175 NULL, rcache, diff,
176 pcache->block, pcache->off, pcache->buffer, diff);
177 if (res < 0) {
178 return res;
179 }
180
181 if (res != LFS_CMP_EQ) {
182 return LFS_ERR_CORRUPT;
183 }
184 }
185
186 lfs_cache_zero(lfs, pcache);
187 }
188
189 return 0;
190 }
191 #endif
192
193 #ifndef LFS_READONLY
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)194 static int lfs_bd_sync(lfs_t *lfs,
195 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
196 lfs_cache_drop(lfs, rcache);
197
198 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
199 if (err) {
200 return err;
201 }
202
203 err = lfs->cfg->sync(lfs->cfg);
204 LFS_ASSERT(err <= 0);
205 return err;
206 }
207 #endif
208
209 #ifndef LFS_READONLY
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)210 static int lfs_bd_prog(lfs_t *lfs,
211 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
212 lfs_block_t block, lfs_off_t off,
213 const void *buffer, lfs_size_t size) {
214 const uint8_t *data = buffer;
215 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
216 LFS_ASSERT(off + size <= lfs->cfg->block_size);
217
218 while (size > 0) {
219 if (block == pcache->block &&
220 off >= pcache->off &&
221 off < pcache->off + lfs->cfg->cache_size) {
222 // already fits in pcache?
223 lfs_size_t diff = lfs_min(size,
224 lfs->cfg->cache_size - (off-pcache->off));
225 memcpy(&pcache->buffer[off-pcache->off], data, diff);
226
227 data += diff;
228 off += diff;
229 size -= diff;
230
231 pcache->size = lfs_max(pcache->size, off - pcache->off);
232 if (pcache->size == lfs->cfg->cache_size) {
233 // eagerly flush out pcache if we fill up
234 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
235 if (err) {
236 return err;
237 }
238 }
239
240 continue;
241 }
242
243 // pcache must have been flushed, either by programming and
244 // entire block or manually flushing the pcache
245 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
246
247 // prepare pcache, first condition can no longer fail
248 pcache->block = block;
249 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
250 pcache->size = 0;
251 }
252
253 return 0;
254 }
255 #endif
256
257 #ifndef LFS_READONLY
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)258 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
259 LFS_ASSERT(block < lfs->cfg->block_count);
260 int err = lfs->cfg->erase(lfs->cfg, block);
261 LFS_ASSERT(err <= 0);
262 return err;
263 }
264 #endif
265
266
267 /// Small type-level utilities ///
268 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])269 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
270 lfs_block_t t = pair[0];
271 pair[0] = pair[1];
272 pair[1] = t;
273 }
274
lfs_pair_isnull(const lfs_block_t pair[2])275 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
276 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
277 }
278
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])279 static inline int lfs_pair_cmp(
280 const lfs_block_t paira[2],
281 const lfs_block_t pairb[2]) {
282 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
283 paira[0] == pairb[1] || paira[1] == pairb[0]);
284 }
285
286 #ifndef LFS_READONLY
lfs_pair_sync(const lfs_block_t paira[2],const lfs_block_t pairb[2])287 static inline bool lfs_pair_sync(
288 const lfs_block_t paira[2],
289 const lfs_block_t pairb[2]) {
290 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
291 (paira[0] == pairb[1] && paira[1] == pairb[0]);
292 }
293 #endif
294
lfs_pair_fromle32(lfs_block_t pair[2])295 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
296 pair[0] = lfs_fromle32(pair[0]);
297 pair[1] = lfs_fromle32(pair[1]);
298 }
299
300 #ifndef LFS_READONLY
lfs_pair_tole32(lfs_block_t pair[2])301 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
302 pair[0] = lfs_tole32(pair[0]);
303 pair[1] = lfs_tole32(pair[1]);
304 }
305 #endif
306
307 // operations on 32-bit entry tags
308 typedef uint32_t lfs_tag_t;
309 typedef int32_t lfs_stag_t;
310
311 #define LFS_MKTAG(type, id, size) \
312 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
313
314 #define LFS_MKTAG_IF(cond, type, id, size) \
315 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
316
317 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
318 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
319
lfs_tag_isvalid(lfs_tag_t tag)320 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
321 return !(tag & 0x80000000);
322 }
323
lfs_tag_isdelete(lfs_tag_t tag)324 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
325 return ((int32_t)(tag << 22) >> 22) == -1;
326 }
327
lfs_tag_type1(lfs_tag_t tag)328 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
329 return (tag & 0x70000000) >> 20;
330 }
331
lfs_tag_type3(lfs_tag_t tag)332 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
333 return (tag & 0x7ff00000) >> 20;
334 }
335
lfs_tag_chunk(lfs_tag_t tag)336 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
337 return (tag & 0x0ff00000) >> 20;
338 }
339
lfs_tag_splice(lfs_tag_t tag)340 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
341 return (int8_t)lfs_tag_chunk(tag);
342 }
343
lfs_tag_id(lfs_tag_t tag)344 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
345 return (tag & 0x000ffc00) >> 10;
346 }
347
lfs_tag_size(lfs_tag_t tag)348 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
349 return tag & 0x000003ff;
350 }
351
lfs_tag_dsize(lfs_tag_t tag)352 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
353 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
354 }
355
356 // operations on attributes in attribute lists
357 struct lfs_mattr {
358 lfs_tag_t tag;
359 const void *buffer;
360 };
361
362 struct lfs_diskoff {
363 lfs_block_t block;
364 lfs_off_t off;
365 };
366
367 #define LFS_MKATTRS(...) \
368 (struct lfs_mattr[]){__VA_ARGS__}, \
369 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
370
371 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)372 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
373 for (int i = 0; i < 3; i++) {
374 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
375 }
376 }
377
lfs_gstate_iszero(const lfs_gstate_t * a)378 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
379 for (int i = 0; i < 3; i++) {
380 if (((uint32_t*)a)[i] != 0) {
381 return false;
382 }
383 }
384 return true;
385 }
386
387 #ifndef LFS_READONLY
lfs_gstate_hasorphans(const lfs_gstate_t * a)388 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
389 return lfs_tag_size(a->tag);
390 }
391
lfs_gstate_getorphans(const lfs_gstate_t * a)392 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
393 return lfs_tag_size(a->tag);
394 }
395
lfs_gstate_hasmove(const lfs_gstate_t * a)396 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
397 return lfs_tag_type1(a->tag);
398 }
399 #endif
400
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)401 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
402 const lfs_block_t *pair) {
403 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
404 }
405
lfs_gstate_fromle32(lfs_gstate_t * a)406 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
407 a->tag = lfs_fromle32(a->tag);
408 a->pair[0] = lfs_fromle32(a->pair[0]);
409 a->pair[1] = lfs_fromle32(a->pair[1]);
410 }
411
412 #ifndef LFS_READONLY
lfs_gstate_tole32(lfs_gstate_t * a)413 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
414 a->tag = lfs_tole32(a->tag);
415 a->pair[0] = lfs_tole32(a->pair[0]);
416 a->pair[1] = lfs_tole32(a->pair[1]);
417 }
418 #endif
419
420 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)421 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
422 ctz->head = lfs_fromle32(ctz->head);
423 ctz->size = lfs_fromle32(ctz->size);
424 }
425
426 #ifndef LFS_READONLY
lfs_ctz_tole32(struct lfs_ctz * ctz)427 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
428 ctz->head = lfs_tole32(ctz->head);
429 ctz->size = lfs_tole32(ctz->size);
430 }
431 #endif
432
lfs_superblock_fromle32(lfs_superblock_t * superblock)433 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
434 superblock->version = lfs_fromle32(superblock->version);
435 superblock->block_size = lfs_fromle32(superblock->block_size);
436 superblock->block_count = lfs_fromle32(superblock->block_count);
437 superblock->name_max = lfs_fromle32(superblock->name_max);
438 superblock->file_max = lfs_fromle32(superblock->file_max);
439 superblock->attr_max = lfs_fromle32(superblock->attr_max);
440 }
441
442 #ifndef LFS_READONLY
lfs_superblock_tole32(lfs_superblock_t * superblock)443 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
444 superblock->version = lfs_tole32(superblock->version);
445 superblock->block_size = lfs_tole32(superblock->block_size);
446 superblock->block_count = lfs_tole32(superblock->block_count);
447 superblock->name_max = lfs_tole32(superblock->name_max);
448 superblock->file_max = lfs_tole32(superblock->file_max);
449 superblock->attr_max = lfs_tole32(superblock->attr_max);
450 }
451 #endif
452
453 #ifndef LFS_NO_ASSERT
454 #if __ASSERT_ON
lfs_mlist_isopen(struct lfs_mlist * head,struct lfs_mlist * node)455 static bool lfs_mlist_isopen(struct lfs_mlist *head,
456 struct lfs_mlist *node) {
457 for (struct lfs_mlist **p = &head; *p; p = &(*p)->next) {
458 if (*p == (struct lfs_mlist*)node) {
459 return true;
460 }
461 }
462
463 return false;
464 }
465 #endif
466 #endif
467
lfs_mlist_remove(lfs_t * lfs,struct lfs_mlist * mlist)468 static void lfs_mlist_remove(lfs_t *lfs, struct lfs_mlist *mlist) {
469 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
470 if (*p == mlist) {
471 *p = (*p)->next;
472 break;
473 }
474 }
475 }
476
lfs_mlist_append(lfs_t * lfs,struct lfs_mlist * mlist)477 static void lfs_mlist_append(lfs_t *lfs, struct lfs_mlist *mlist) {
478 mlist->next = lfs->mlist;
479 lfs->mlist = mlist;
480 }
481
482
483 /// Internal operations predeclared here ///
484 #ifndef LFS_READONLY
485 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
486 const struct lfs_mattr *attrs, int attrcount);
487 static int lfs_dir_compact(lfs_t *lfs,
488 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
489 lfs_mdir_t *source, uint16_t begin, uint16_t end);
490 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
491 const void *buffer, lfs_size_t size);
492 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
493 const void *buffer, lfs_size_t size);
494 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file);
495 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
496 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
497
498 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss);
499 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
500 static void lfs_fs_prepmove(lfs_t *lfs,
501 uint16_t id, const lfs_block_t pair[2]);
502 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
503 lfs_mdir_t *pdir);
504 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
505 lfs_mdir_t *parent);
506 static int lfs_fs_forceconsistency(lfs_t *lfs);
507 #endif
508
509 #ifdef LFS_MIGRATE
510 static int lfs1_traverse(lfs_t *lfs,
511 int (*cb)(void*, lfs_block_t), void *data);
512 #endif
513
514 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir);
515
516 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
517 void *buffer, lfs_size_t size);
518 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
519 void *buffer, lfs_size_t size);
520 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file);
521 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file);
522
523 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs);
524 static int lfs_fs_rawtraverse(lfs_t *lfs,
525 int (*cb)(void *data, lfs_block_t block), void *data,
526 bool includeorphans);
527
528 static int lfs_deinit(lfs_t *lfs);
529 static int lfs_rawunmount(lfs_t *lfs);
530
531
532 /// Block allocator ///
533 #ifndef LFS_READONLY
lfs_alloc_lookahead(void * p,lfs_block_t block)534 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
535 lfs_t *lfs = (lfs_t*)p;
536 lfs_block_t off = ((block - lfs->free.off)
537 + lfs->cfg->block_count) % lfs->cfg->block_count;
538
539 if (off < lfs->free.size) {
540 lfs->free.buffer[off / 32] |= 1U << (off % 32);
541 }
542
543 return 0;
544 }
545 #endif
546
547 // indicate allocated blocks have been committed into the filesystem, this
548 // is to prevent blocks from being garbage collected in the middle of a
549 // commit operation
lfs_alloc_ack(lfs_t * lfs)550 static void lfs_alloc_ack(lfs_t *lfs) {
551 lfs->free.ack = lfs->cfg->block_count;
552 }
553
554 // drop the lookahead buffer, this is done during mounting and failed
555 // traversals in order to avoid invalid lookahead state
lfs_alloc_drop(lfs_t * lfs)556 static void lfs_alloc_drop(lfs_t *lfs) {
557 lfs->free.size = 0;
558 lfs->free.i = 0;
559 lfs_alloc_ack(lfs);
560 }
561
562 #ifndef LFS_READONLY
lfs_alloc(lfs_t * lfs,lfs_block_t * block)563 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
564 while (true) {
565 while (lfs->free.i != lfs->free.size) {
566 lfs_block_t off = lfs->free.i;
567 lfs->free.i += 1;
568 lfs->free.ack -= 1;
569
570 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
571 // found a free block
572 *block = (lfs->free.off + off) % lfs->cfg->block_count;
573
574 // eagerly find next off so an alloc ack can
575 // discredit old lookahead blocks
576 while (lfs->free.i != lfs->free.size &&
577 (lfs->free.buffer[lfs->free.i / 32]
578 & (1U << (lfs->free.i % 32)))) {
579 lfs->free.i += 1;
580 lfs->free.ack -= 1;
581 }
582
583 return 0;
584 }
585 }
586
587 // check if we have looked at all blocks since last ack
588 if (lfs->free.ack == 0) {
589 LFS_ERROR("No more free space %"PRIu32,
590 lfs->free.i + lfs->free.off);
591 return LFS_ERR_NOSPC;
592 }
593
594 lfs->free.off = (lfs->free.off + lfs->free.size)
595 % lfs->cfg->block_count;
596 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
597 lfs->free.i = 0;
598
599 // find mask of free blocks from tree
600 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
601 int err = lfs_fs_rawtraverse(lfs, lfs_alloc_lookahead, lfs, true);
602 if (err) {
603 lfs_alloc_drop(lfs);
604 return err;
605 }
606 }
607 }
608 #endif
609
610 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)611 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
612 lfs_tag_t gmask, lfs_tag_t gtag,
613 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
614 lfs_off_t off = dir->off;
615 lfs_tag_t ntag = dir->etag;
616 lfs_stag_t gdiff = 0;
617
618 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
619 lfs_tag_id(gmask) != 0 &&
620 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
621 // synthetic moves
622 gdiff -= LFS_MKTAG(0, 1, 0);
623 }
624
625 // iterate over dir block backwards (for faster lookups)
626 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
627 off -= lfs_tag_dsize(ntag);
628 lfs_tag_t tag = ntag;
629 int err = lfs_bd_read(lfs,
630 NULL, &lfs->rcache, sizeof(ntag),
631 dir->pair[0], off, &ntag, sizeof(ntag));
632 if (err) {
633 return err;
634 }
635
636 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
637
638 if (lfs_tag_id(gmask) != 0 &&
639 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
640 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
641 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
642 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
643 // found where we were created
644 return LFS_ERR_NOENT;
645 }
646
647 // move around splices
648 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
649 }
650
651 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
652 if (lfs_tag_isdelete(tag)) {
653 return LFS_ERR_NOENT;
654 }
655
656 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
657 err = lfs_bd_read(lfs,
658 NULL, &lfs->rcache, diff,
659 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
660 if (err) {
661 return err;
662 }
663
664 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
665
666 return tag + gdiff;
667 }
668 }
669
670 return LFS_ERR_NOENT;
671 }
672
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)673 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
674 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
675 return lfs_dir_getslice(lfs, dir,
676 gmask, gtag,
677 0, buffer, lfs_tag_size(gtag));
678 }
679
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)680 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
681 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
682 lfs_tag_t gmask, lfs_tag_t gtag,
683 lfs_off_t off, void *buffer, lfs_size_t size) {
684 uint8_t *data = buffer;
685 if (off+size > lfs->cfg->block_size) {
686 return LFS_ERR_CORRUPT;
687 }
688
689 while (size > 0) {
690 lfs_size_t diff = size;
691
692 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
693 off < pcache->off + pcache->size) {
694 if (off >= pcache->off) {
695 // is already in pcache?
696 diff = lfs_min(diff, pcache->size - (off-pcache->off));
697 memcpy(data, &pcache->buffer[off-pcache->off], diff);
698
699 data += diff;
700 off += diff;
701 size -= diff;
702 continue;
703 }
704
705 // pcache takes priority
706 diff = lfs_min(diff, pcache->off-off);
707 }
708
709 if (rcache->block == LFS_BLOCK_INLINE &&
710 off < rcache->off + rcache->size) {
711 if (off >= rcache->off) {
712 // is already in rcache?
713 diff = lfs_min(diff, rcache->size - (off-rcache->off));
714 memcpy(data, &rcache->buffer[off-rcache->off], diff);
715
716 data += diff;
717 off += diff;
718 size -= diff;
719 continue;
720 }
721
722 // rcache takes priority
723 diff = lfs_min(diff, rcache->off-off);
724 }
725
726 // load to cache, first condition can no longer fail
727 rcache->block = LFS_BLOCK_INLINE;
728 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
729 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
730 lfs->cfg->cache_size);
731 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
732 rcache->off, rcache->buffer, rcache->size);
733 if (err < 0) {
734 return err;
735 }
736 }
737
738 return 0;
739 }
740
741 #ifndef LFS_READONLY
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)742 static int lfs_dir_traverse_filter(void *p,
743 lfs_tag_t tag, const void *buffer) {
744 lfs_tag_t *filtertag = p;
745 (void)buffer;
746
747 // which mask depends on unique bit in tag structure
748 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
749 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
750 : LFS_MKTAG(0x700, 0x3ff, 0);
751
752 // check for redundancy
753 if ((mask & tag) == (mask & *filtertag) ||
754 lfs_tag_isdelete(*filtertag) ||
755 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
756 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
757 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
758 *filtertag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0);
759 return true;
760 }
761
762 // check if we need to adjust for created/deleted tags
763 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
764 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
765 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
766 }
767
768 return false;
769 }
770 #endif
771
772 #ifndef LFS_READONLY
773 // maximum recursive depth of lfs_dir_traverse, the deepest call:
774 //
775 // traverse with commit
776 // '-> traverse with move
777 // '-> traverse with filter
778 //
779 #define LFS_DIR_TRAVERSE_DEPTH 3
780
781 struct lfs_dir_traverse {
782 const lfs_mdir_t *dir;
783 lfs_off_t off;
784 lfs_tag_t ptag;
785 const struct lfs_mattr *attrs;
786 int attrcount;
787
788 lfs_tag_t tmask;
789 lfs_tag_t ttag;
790 uint16_t begin;
791 uint16_t end;
792 int16_t diff;
793
794 int (*cb)(void *data, lfs_tag_t tag, const void *buffer);
795 void *data;
796
797 lfs_tag_t tag;
798 const void *buffer;
799 struct lfs_diskoff disk;
800 };
801
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)802 static int lfs_dir_traverse(lfs_t *lfs,
803 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
804 const struct lfs_mattr *attrs, int attrcount,
805 lfs_tag_t tmask, lfs_tag_t ttag,
806 uint16_t begin, uint16_t end, int16_t diff,
807 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
808 // This function in inherently recursive, but bounded. To allow tool-based
809 // analysis without unnecessary code-cost we use an explicit stack
810 struct lfs_dir_traverse stack[LFS_DIR_TRAVERSE_DEPTH-1];
811 unsigned sp = 0;
812 int res;
813
814 // iterate over directory and attrs
815 lfs_tag_t tag;
816 const void *buffer;
817 struct lfs_diskoff disk;
818 while (true) {
819 {
820 if (off+lfs_tag_dsize(ptag) < dir->off) {
821 off += lfs_tag_dsize(ptag);
822 int err = lfs_bd_read(lfs,
823 NULL, &lfs->rcache, sizeof(tag),
824 dir->pair[0], off, &tag, sizeof(tag));
825 if (err) {
826 return err;
827 }
828
829 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
830 disk.block = dir->pair[0];
831 disk.off = off+sizeof(lfs_tag_t);
832 buffer = &disk;
833 ptag = tag;
834 } else if (attrcount > 0) {
835 tag = attrs[0].tag;
836 buffer = attrs[0].buffer;
837 attrs += 1;
838 attrcount -= 1;
839 } else {
840 // finished traversal, pop from stack?
841 res = 0;
842 break;
843 }
844
845 // do we need to filter?
846 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
847 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
848 continue;
849 }
850
851 if (lfs_tag_id(tmask) != 0) {
852 LFS_ASSERT(sp < LFS_DIR_TRAVERSE_DEPTH);
853 // recurse, scan for duplicates, and update tag based on
854 // creates/deletes
855 stack[sp] = (struct lfs_dir_traverse){
856 .dir = dir,
857 .off = off,
858 .ptag = ptag,
859 .attrs = attrs,
860 .attrcount = attrcount,
861 .tmask = tmask,
862 .ttag = ttag,
863 .begin = begin,
864 .end = end,
865 .diff = diff,
866 .cb = cb,
867 .data = data,
868 .tag = tag,
869 .buffer = buffer,
870 .disk = disk,
871 };
872 sp += 1;
873
874 dir = dir;
875 off = off;
876 ptag = ptag;
877 attrs = attrs;
878 attrcount = attrcount;
879 tmask = 0;
880 ttag = 0;
881 begin = 0;
882 end = 0;
883 diff = 0;
884 cb = lfs_dir_traverse_filter;
885 data = &stack[sp-1].tag;
886 continue;
887 }
888 }
889
890 popped:
891 // in filter range?
892 if (lfs_tag_id(tmask) != 0 &&
893 !(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
894 continue;
895 }
896
897 // handle special cases for mcu-side operations
898 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
899 // do nothing
900 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
901 // Without this condition, lfs_dir_traverse can exhibit an
902 // extremely expensive O(n^3) of nested loops when renaming.
903 // This happens because lfs_dir_traverse tries to filter tags by
904 // the tags in the source directory, triggering a second
905 // lfs_dir_traverse with its own filter operation.
906 //
907 // traverse with commit
908 // '-> traverse with filter
909 // '-> traverse with move
910 // '-> traverse with filter
911 //
912 // However we don't actually care about filtering the second set of
913 // tags, since duplicate tags have no effect when filtering.
914 //
915 // This check skips this unnecessary recursive filtering explicitly,
916 // reducing this runtime from O(n^3) to O(n^2).
917 if (cb == lfs_dir_traverse_filter) {
918 continue;
919 }
920
921 // recurse into move
922 stack[sp] = (struct lfs_dir_traverse){
923 .dir = dir,
924 .off = off,
925 .ptag = ptag,
926 .attrs = attrs,
927 .attrcount = attrcount,
928 .tmask = tmask,
929 .ttag = ttag,
930 .begin = begin,
931 .end = end,
932 .diff = diff,
933 .cb = cb,
934 .data = data,
935 .tag = LFS_MKTAG(LFS_FROM_NOOP, 0, 0),
936 };
937 sp += 1;
938
939 uint16_t fromid = lfs_tag_size(tag);
940 uint16_t toid = lfs_tag_id(tag);
941 dir = buffer;
942 off = 0;
943 ptag = 0xffffffff;
944 attrs = NULL;
945 attrcount = 0;
946 tmask = LFS_MKTAG(0x600, 0x3ff, 0);
947 ttag = LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0);
948 begin = fromid;
949 end = fromid+1;
950 diff = toid-fromid+diff;
951 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
952 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
953 const struct lfs_attr *a = buffer;
954 res = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
955 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
956 if (res < 0) {
957 return res;
958 }
959
960 if (res) {
961 break;
962 }
963 }
964 } else {
965 res = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
966 if (res < 0) {
967 return res;
968 }
969
970 if (res) {
971 break;
972 }
973 }
974 }
975
976 if (sp > 0) {
977 // pop from the stack and return, fortunately all pops share
978 // a destination
979 dir = stack[sp-1].dir;
980 off = stack[sp-1].off;
981 ptag = stack[sp-1].ptag;
982 attrs = stack[sp-1].attrs;
983 attrcount = stack[sp-1].attrcount;
984 tmask = stack[sp-1].tmask;
985 ttag = stack[sp-1].ttag;
986 begin = stack[sp-1].begin;
987 end = stack[sp-1].end;
988 diff = stack[sp-1].diff;
989 cb = stack[sp-1].cb;
990 data = stack[sp-1].data;
991 tag = stack[sp-1].tag;
992 buffer = stack[sp-1].buffer;
993 disk = stack[sp-1].disk;
994 sp -= 1;
995 goto popped;
996 } else {
997 return res;
998 }
999 }
1000 #endif
1001
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)1002 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
1003 lfs_mdir_t *dir, const lfs_block_t pair[2],
1004 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
1005 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
1006 // we can find tag very efficiently during a fetch, since we're already
1007 // scanning the entire directory
1008 lfs_stag_t besttag = -1;
1009
1010 // if either block address is invalid we return LFS_ERR_CORRUPT here,
1011 // otherwise later writes to the pair could fail
1012 if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
1013 return LFS_ERR_CORRUPT;
1014 }
1015
1016 // find the block with the most recent revision
1017 uint32_t revs[2] = {0, 0};
1018 int r = 0;
1019 for (int i = 0; i < 2; i++) {
1020 int err = lfs_bd_read(lfs,
1021 NULL, &lfs->rcache, sizeof(revs[i]),
1022 pair[i], 0, &revs[i], sizeof(revs[i]));
1023 revs[i] = lfs_fromle32(revs[i]);
1024 if (err && err != LFS_ERR_CORRUPT) {
1025 return err;
1026 }
1027
1028 if (err != LFS_ERR_CORRUPT &&
1029 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
1030 r = i;
1031 }
1032 }
1033
1034 dir->pair[0] = pair[(r+0)%2];
1035 dir->pair[1] = pair[(r+1)%2];
1036 dir->rev = revs[(r+0)%2];
1037 dir->off = 0; // nonzero = found some commits
1038
1039 // now scan tags to fetch the actual dir and find possible match
1040 for (int i = 0; i < 2; i++) {
1041 lfs_off_t off = 0;
1042 lfs_tag_t ptag = 0xffffffff;
1043
1044 uint16_t tempcount = 0;
1045 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
1046 bool tempsplit = false;
1047 lfs_stag_t tempbesttag = besttag;
1048
1049 dir->rev = lfs_tole32(dir->rev);
1050 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
1051 dir->rev = lfs_fromle32(dir->rev);
1052
1053 while (true) {
1054 // extract next tag
1055 lfs_tag_t tag;
1056 off += lfs_tag_dsize(ptag);
1057 int err = lfs_bd_read(lfs,
1058 NULL, &lfs->rcache, lfs->cfg->block_size,
1059 dir->pair[0], off, &tag, sizeof(tag));
1060 if (err) {
1061 if (err == LFS_ERR_CORRUPT) {
1062 // can't continue?
1063 dir->erased = false;
1064 break;
1065 }
1066 return err;
1067 }
1068
1069 crc = lfs_crc(crc, &tag, sizeof(tag));
1070 tag = lfs_frombe32(tag) ^ ptag;
1071
1072 // next commit not yet programmed or we're not in valid range
1073 if (!lfs_tag_isvalid(tag)) {
1074 dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
1075 dir->off % lfs->cfg->prog_size == 0);
1076 break;
1077 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
1078 dir->erased = false;
1079 break;
1080 }
1081
1082 ptag = tag;
1083
1084 if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
1085 // check the crc attr
1086 uint32_t dcrc;
1087 err = lfs_bd_read(lfs,
1088 NULL, &lfs->rcache, lfs->cfg->block_size,
1089 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
1090 if (err) {
1091 if (err == LFS_ERR_CORRUPT) {
1092 dir->erased = false;
1093 break;
1094 }
1095 return err;
1096 }
1097 dcrc = lfs_fromle32(dcrc);
1098
1099 if (crc != dcrc) {
1100 dir->erased = false;
1101 break;
1102 }
1103
1104 // reset the next bit if we need to
1105 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
1106
1107 // toss our crc into the filesystem seed for
1108 // pseudorandom numbers, note we use another crc here
1109 // as a collection function because it is sufficiently
1110 // random and convenient
1111 lfs->seed = lfs_crc(lfs->seed, &crc, sizeof(crc));
1112
1113 // update with what's found so far
1114 besttag = tempbesttag;
1115 dir->off = off + lfs_tag_dsize(tag);
1116 dir->etag = ptag;
1117 dir->count = tempcount;
1118 dir->tail[0] = temptail[0];
1119 dir->tail[1] = temptail[1];
1120 dir->split = tempsplit;
1121
1122 // reset crc
1123 crc = 0xffffffff;
1124 continue;
1125 }
1126
1127 // crc the entry first, hopefully leaving it in the cache
1128 for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
1129 uint8_t dat;
1130 err = lfs_bd_read(lfs,
1131 NULL, &lfs->rcache, lfs->cfg->block_size,
1132 dir->pair[0], off+j, &dat, 1);
1133 if (err) {
1134 if (err == LFS_ERR_CORRUPT) {
1135 dir->erased = false;
1136 break;
1137 }
1138 return err;
1139 }
1140
1141 crc = lfs_crc(crc, &dat, 1);
1142 }
1143
1144 // directory modification tags?
1145 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
1146 // increase count of files if necessary
1147 if (lfs_tag_id(tag) >= tempcount) {
1148 tempcount = lfs_tag_id(tag) + 1;
1149 }
1150 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
1151 tempcount += lfs_tag_splice(tag);
1152
1153 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
1154 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
1155 tempbesttag |= 0x80000000;
1156 } else if (tempbesttag != -1 &&
1157 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1158 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
1159 }
1160 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
1161 tempsplit = (lfs_tag_chunk(tag) & 1);
1162
1163 err = lfs_bd_read(lfs,
1164 NULL, &lfs->rcache, lfs->cfg->block_size,
1165 dir->pair[0], off+sizeof(tag), &temptail, 8);
1166 if (err) {
1167 if (err == LFS_ERR_CORRUPT) {
1168 dir->erased = false;
1169 break;
1170 }
1171 }
1172 lfs_pair_fromle32(temptail);
1173 }
1174
1175 // found a match for our fetcher?
1176 if ((fmask & tag) == (fmask & ftag)) {
1177 int res = cb(data, tag, &(struct lfs_diskoff){
1178 dir->pair[0], off+sizeof(tag)});
1179 if (res < 0) {
1180 if (res == LFS_ERR_CORRUPT) {
1181 dir->erased = false;
1182 break;
1183 }
1184 return res;
1185 }
1186
1187 if (res == LFS_CMP_EQ) {
1188 // found a match
1189 tempbesttag = tag;
1190 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
1191 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
1192 // found an identical tag, but contents didn't match
1193 // this must mean that our besttag has been overwritten
1194 tempbesttag = -1;
1195 } else if (res == LFS_CMP_GT &&
1196 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
1197 // found a greater match, keep track to keep things sorted
1198 tempbesttag = tag | 0x80000000;
1199 }
1200 }
1201 }
1202
1203 // consider what we have good enough
1204 if (dir->off > 0) {
1205 // synthetic move
1206 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
1207 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
1208 besttag |= 0x80000000;
1209 } else if (besttag != -1 &&
1210 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
1211 besttag -= LFS_MKTAG(0, 1, 0);
1212 }
1213 }
1214
1215 // found tag? or found best id?
1216 if (id) {
1217 *id = lfs_min(lfs_tag_id(besttag), dir->count);
1218 }
1219
1220 if (lfs_tag_isvalid(besttag)) {
1221 return besttag;
1222 } else if (lfs_tag_id(besttag) < dir->count) {
1223 return LFS_ERR_NOENT;
1224 } else {
1225 return 0;
1226 }
1227 }
1228
1229 // failed, try the other block?
1230 lfs_pair_swap(dir->pair);
1231 dir->rev = revs[(r+1)%2];
1232 }
1233
1234 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
1235 dir->pair[0], dir->pair[1]);
1236 return LFS_ERR_CORRUPT;
1237 }
1238
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1239 static int lfs_dir_fetch(lfs_t *lfs,
1240 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1241 // note, mask=-1, tag=-1 can never match a tag since this
1242 // pattern has the invalid bit set
1243 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1244 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1245 }
1246
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1247 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1248 lfs_gstate_t *gstate) {
1249 lfs_gstate_t temp;
1250 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1251 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1252 if (res < 0 && res != LFS_ERR_NOENT) {
1253 return res;
1254 }
1255
1256 if (res != LFS_ERR_NOENT) {
1257 // xor together to find resulting gstate
1258 lfs_gstate_fromle32(&temp);
1259 lfs_gstate_xor(gstate, &temp);
1260 }
1261
1262 return 0;
1263 }
1264
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1265 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1266 uint16_t id, struct lfs_info *info) {
1267 if (id == 0x3ff) {
1268 // special case for root
1269 strcpy(info->name, "/");
1270 info->type = LFS_TYPE_DIR;
1271 return 0;
1272 }
1273
1274 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1275 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1276 if (tag < 0) {
1277 return (int)tag;
1278 }
1279
1280 info->type = lfs_tag_type3(tag);
1281
1282 struct lfs_ctz ctz;
1283 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1284 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1285 if (tag < 0) {
1286 return (int)tag;
1287 }
1288 lfs_ctz_fromle32(&ctz);
1289
1290 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1291 info->size = ctz.size;
1292 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1293 info->size = lfs_tag_size(tag);
1294 }
1295
1296 return 0;
1297 }
1298
1299 struct lfs_dir_find_match {
1300 lfs_t *lfs;
1301 const void *name;
1302 lfs_size_t size;
1303 };
1304
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1305 static int lfs_dir_find_match(void *data,
1306 lfs_tag_t tag, const void *buffer) {
1307 struct lfs_dir_find_match *name = data;
1308 lfs_t *lfs = name->lfs;
1309 const struct lfs_diskoff *disk = buffer;
1310
1311 // compare with disk
1312 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1313 int res = lfs_bd_cmp(lfs,
1314 NULL, &lfs->rcache, diff,
1315 disk->block, disk->off, name->name, diff);
1316 if (res != LFS_CMP_EQ) {
1317 return res;
1318 }
1319
1320 // only equal if our size is still the same
1321 if (name->size != lfs_tag_size(tag)) {
1322 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1323 }
1324
1325 // found a match!
1326 return LFS_CMP_EQ;
1327 }
1328
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1329 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1330 const char **path, uint16_t *id) {
1331 // we reduce path to a single name if we can find it
1332 const char *name = *path;
1333 if (id) {
1334 *id = 0x3ff;
1335 }
1336
1337 // default to root dir
1338 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1339 dir->tail[0] = lfs->root[0];
1340 dir->tail[1] = lfs->root[1];
1341
1342 while (true) {
1343 nextname:
1344 // skip slashes
1345 name += strspn(name, "/");
1346 lfs_size_t namelen = strcspn(name, "/");
1347
1348 // skip '.' and root '..'
1349 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1350 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1351 name += namelen;
1352 goto nextname;
1353 }
1354
1355 // skip if matched by '..' in name
1356 const char *suffix = name + namelen;
1357 lfs_size_t sufflen;
1358 int depth = 1;
1359 while (true) {
1360 suffix += strspn(suffix, "/");
1361 sufflen = strcspn(suffix, "/");
1362 if (sufflen == 0) {
1363 break;
1364 }
1365
1366 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1367 depth -= 1;
1368 if (depth == 0) {
1369 name = suffix + sufflen;
1370 goto nextname;
1371 }
1372 } else {
1373 depth += 1;
1374 }
1375
1376 suffix += sufflen;
1377 }
1378
1379 // found path
1380 if (name[0] == '\0') {
1381 return tag;
1382 }
1383
1384 // update what we've found so far
1385 *path = name;
1386
1387 // only continue if we hit a directory
1388 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1389 return LFS_ERR_NOTDIR;
1390 }
1391
1392 // grab the entry data
1393 if (lfs_tag_id(tag) != 0x3ff) {
1394 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1395 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1396 if (res < 0) {
1397 return res;
1398 }
1399 lfs_pair_fromle32(dir->tail);
1400 }
1401
1402 // find entry matching name
1403 while (true) {
1404 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1405 LFS_MKTAG(0x780, 0, 0),
1406 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1407 // are we last name?
1408 (strchr(name, '/') == NULL) ? id : NULL,
1409 lfs_dir_find_match, &(struct lfs_dir_find_match){
1410 lfs, name, namelen});
1411 if (tag < 0) {
1412 return tag;
1413 }
1414
1415 if (tag) {
1416 break;
1417 }
1418
1419 if (!dir->split) {
1420 return LFS_ERR_NOENT;
1421 }
1422 }
1423
1424 // to next name
1425 name += namelen;
1426 }
1427 }
1428
1429 // commit logic
1430 struct lfs_commit {
1431 lfs_block_t block;
1432 lfs_off_t off;
1433 lfs_tag_t ptag;
1434 uint32_t crc;
1435
1436 lfs_off_t begin;
1437 lfs_off_t end;
1438 };
1439
1440 #ifndef LFS_READONLY
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1441 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1442 const void *buffer, lfs_size_t size) {
1443 int err = lfs_bd_prog(lfs,
1444 &lfs->pcache, &lfs->rcache, false,
1445 commit->block, commit->off ,
1446 (const uint8_t*)buffer, size);
1447 if (err) {
1448 return err;
1449 }
1450
1451 commit->crc = lfs_crc(commit->crc, buffer, size);
1452 commit->off += size;
1453 return 0;
1454 }
1455 #endif
1456
1457 #ifndef LFS_READONLY
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1458 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1459 lfs_tag_t tag, const void *buffer) {
1460 // check if we fit
1461 lfs_size_t dsize = lfs_tag_dsize(tag);
1462 if (commit->off + dsize > commit->end) {
1463 return LFS_ERR_NOSPC;
1464 }
1465
1466 // write out tag
1467 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1468 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1469 if (err) {
1470 return err;
1471 }
1472
1473 if (!(tag & 0x80000000)) {
1474 // from memory
1475 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1476 if (err) {
1477 return err;
1478 }
1479 } else {
1480 // from disk
1481 const struct lfs_diskoff *disk = buffer;
1482 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1483 // rely on caching to make this efficient
1484 uint8_t dat;
1485 err = lfs_bd_read(lfs,
1486 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1487 disk->block, disk->off+i, &dat, 1);
1488 if (err) {
1489 return err;
1490 }
1491
1492 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1493 if (err) {
1494 return err;
1495 }
1496 }
1497 }
1498
1499 commit->ptag = tag & 0x7fffffff;
1500 return 0;
1501 }
1502 #endif
1503
1504 #ifndef LFS_READONLY
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1505 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1506 // align to program units
1507 const lfs_off_t end = lfs_alignup(commit->off + 2*sizeof(uint32_t),
1508 lfs->cfg->prog_size);
1509
1510 lfs_off_t off1 = 0;
1511 uint32_t crc1 = 0;
1512
1513 // create crc tags to fill up remainder of commit, note that
1514 // padding is not crced, which lets fetches skip padding but
1515 // makes committing a bit more complicated
1516 while (commit->off < end) {
1517 lfs_off_t off = commit->off + sizeof(lfs_tag_t);
1518 lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
1519 if (noff < end) {
1520 noff = lfs_min(noff, end - 2*sizeof(uint32_t));
1521 }
1522
1523 // read erased state from next program unit
1524 lfs_tag_t tag = 0xffffffff;
1525 int err = lfs_bd_read(lfs,
1526 NULL, &lfs->rcache, sizeof(tag),
1527 commit->block, noff, &tag, sizeof(tag));
1528 if (err && err != LFS_ERR_CORRUPT) {
1529 return err;
1530 }
1531
1532 // build crc tag
1533 bool reset = ~lfs_frombe32(tag) >> 31;
1534 tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
1535
1536 // write out crc
1537 uint32_t footer[2];
1538 footer[0] = lfs_tobe32(tag ^ commit->ptag);
1539 commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
1540 footer[1] = lfs_tole32(commit->crc);
1541 err = lfs_bd_prog(lfs,
1542 &lfs->pcache, &lfs->rcache, false,
1543 commit->block, commit->off, &footer, sizeof(footer));
1544 if (err) {
1545 return err;
1546 }
1547
1548 // keep track of non-padding checksum to verify
1549 if (off1 == 0) {
1550 off1 = commit->off + sizeof(uint32_t);
1551 crc1 = commit->crc;
1552 }
1553
1554 commit->off += sizeof(tag)+lfs_tag_size(tag);
1555 commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
1556 commit->crc = 0xffffffff; // reset crc for next "commit"
1557 }
1558
1559 // flush buffers
1560 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1561 if (err) {
1562 return err;
1563 }
1564
1565 // successful commit, check checksums to make sure
1566 lfs_off_t off = commit->begin;
1567 lfs_off_t noff = off1;
1568 while (off < end) {
1569 uint32_t crc = 0xffffffff;
1570 for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
1571 // check against written crc, may catch blocks that
1572 // become readonly and match our commit size exactly
1573 if (i == off1 && crc != crc1) {
1574 return LFS_ERR_CORRUPT;
1575 }
1576
1577 // leave it up to caching to make this efficient
1578 uint8_t dat;
1579 err = lfs_bd_read(lfs,
1580 NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
1581 commit->block, i, &dat, 1);
1582 if (err) {
1583 return err;
1584 }
1585
1586 crc = lfs_crc(crc, &dat, 1);
1587 }
1588
1589 // detected write error?
1590 if (crc != 0) {
1591 return LFS_ERR_CORRUPT;
1592 }
1593
1594 // skip padding
1595 off = lfs_min(end - noff, 0x3fe) + noff;
1596 if (off < end) {
1597 off = lfs_min(off, end - 2*sizeof(uint32_t));
1598 }
1599 noff = off + sizeof(uint32_t);
1600 }
1601
1602 return 0;
1603 }
1604 #endif
1605
1606 #ifndef LFS_READONLY
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1607 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1608 // allocate pair of dir blocks (backwards, so we write block 1 first)
1609 for (int i = 0; i < 2; i++) {
1610 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1611 if (err) {
1612 return err;
1613 }
1614 }
1615
1616 // zero for reproducibility in case initial block is unreadable
1617 dir->rev = 0;
1618
1619 // rather than clobbering one of the blocks we just pretend
1620 // the revision may be valid
1621 int err = lfs_bd_read(lfs,
1622 NULL, &lfs->rcache, sizeof(dir->rev),
1623 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1624 dir->rev = lfs_fromle32(dir->rev);
1625 if (err && err != LFS_ERR_CORRUPT) {
1626 return err;
1627 }
1628
1629 // to make sure we don't immediately evict, align the new revision count
1630 // to our block_cycles modulus, see lfs_dir_compact for why our modulus
1631 // is tweaked this way
1632 if (lfs->cfg->block_cycles > 0) {
1633 dir->rev = lfs_alignup(dir->rev, ((lfs->cfg->block_cycles+1)|1));
1634 }
1635
1636 // set defaults
1637 dir->off = sizeof(dir->rev);
1638 dir->etag = 0xffffffff;
1639 dir->count = 0;
1640 dir->tail[0] = LFS_BLOCK_NULL;
1641 dir->tail[1] = LFS_BLOCK_NULL;
1642 dir->erased = false;
1643 dir->split = false;
1644
1645 // don't write out yet, let caller take care of that
1646 return 0;
1647 }
1648 #endif
1649
1650 #ifndef LFS_READONLY
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1651 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1652 // steal state
1653 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1654 if (err) {
1655 return err;
1656 }
1657
1658 // steal tail
1659 lfs_pair_tole32(tail->tail);
1660 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1661 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1662 lfs_pair_fromle32(tail->tail);
1663 if (err) {
1664 return err;
1665 }
1666
1667 return 0;
1668 }
1669 #endif
1670
1671 #ifndef LFS_READONLY
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1672 static int lfs_dir_split(lfs_t *lfs,
1673 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1674 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1675 // create tail metadata pair
1676 lfs_mdir_t tail;
1677 int err = lfs_dir_alloc(lfs, &tail);
1678 if (err) {
1679 return err;
1680 }
1681
1682 tail.split = dir->split;
1683 tail.tail[0] = dir->tail[0];
1684 tail.tail[1] = dir->tail[1];
1685
1686 // note we don't care about LFS_OK_RELOCATED
1687 int res = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1688 if (res < 0) {
1689 return res;
1690 }
1691
1692 dir->tail[0] = tail.pair[0];
1693 dir->tail[1] = tail.pair[1];
1694 dir->split = true;
1695
1696 // update root if needed
1697 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1698 lfs->root[0] = tail.pair[0];
1699 lfs->root[1] = tail.pair[1];
1700 }
1701
1702 return 0;
1703 }
1704 #endif
1705
1706 #ifndef LFS_READONLY
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1707 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1708 lfs_size_t *size = p;
1709 (void)buffer;
1710
1711 *size += lfs_tag_dsize(tag);
1712 return 0;
1713 }
1714 #endif
1715
1716 #ifndef LFS_READONLY
1717 struct lfs_dir_commit_commit {
1718 lfs_t *lfs;
1719 struct lfs_commit *commit;
1720 };
1721 #endif
1722
1723 #ifndef LFS_READONLY
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1724 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1725 struct lfs_dir_commit_commit *commit = p;
1726 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1727 }
1728 #endif
1729
1730 #ifndef LFS_READONLY
lfs_dir_needsrelocation(lfs_t * lfs,lfs_mdir_t * dir)1731 static bool lfs_dir_needsrelocation(lfs_t *lfs, lfs_mdir_t *dir) {
1732 // If our revision count == n * block_cycles, we should force a relocation,
1733 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1734 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1735 // 1. block_cycles = 1, which would prevent relocations from terminating
1736 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1737 // one metadata block in the pair, effectively making this useless
1738 return (lfs->cfg->block_cycles > 0
1739 && ((dir->rev + 1) % ((lfs->cfg->block_cycles+1)|1) == 0));
1740 }
1741 #endif
1742
1743 #ifndef LFS_READONLY
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1744 static int lfs_dir_compact(lfs_t *lfs,
1745 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1746 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1747 // save some state in case block is bad
1748 bool relocated = false;
1749 bool tired = lfs_dir_needsrelocation(lfs, dir);
1750
1751 // increment revision count
1752 dir->rev += 1;
1753
1754 // do not proactively relocate blocks during migrations, this
1755 // can cause a number of failure states such: clobbering the
1756 // v1 superblock if we relocate root, and invalidating directory
1757 // pointers if we relocate the head of a directory. On top of
1758 // this, relocations increase the overall complexity of
1759 // lfs_migration, which is already a delicate operation.
1760 #ifdef LFS_MIGRATE
1761 if (lfs->lfs1) {
1762 tired = false;
1763 }
1764 #endif
1765
1766 if (tired && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) != 0) {
1767 // we're writing too much, time to relocate
1768 goto relocate;
1769 }
1770
1771 // begin loop to commit compaction to blocks until a compact sticks
1772 while (true) {
1773 {
1774 // setup commit state
1775 struct lfs_commit commit = {
1776 .block = dir->pair[1],
1777 .off = 0,
1778 .ptag = 0xffffffff,
1779 .crc = 0xffffffff,
1780
1781 .begin = 0,
1782 .end = (lfs->cfg->metadata_max ?
1783 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
1784 };
1785
1786 // erase block to write to
1787 int err = lfs_bd_erase(lfs, dir->pair[1]);
1788 if (err) {
1789 if (err == LFS_ERR_CORRUPT) {
1790 goto relocate;
1791 }
1792 return err;
1793 }
1794
1795 // write out header
1796 dir->rev = lfs_tole32(dir->rev);
1797 err = lfs_dir_commitprog(lfs, &commit,
1798 &dir->rev, sizeof(dir->rev));
1799 dir->rev = lfs_fromle32(dir->rev);
1800 if (err) {
1801 if (err == LFS_ERR_CORRUPT) {
1802 goto relocate;
1803 }
1804 return err;
1805 }
1806
1807 // traverse the directory, this time writing out all unique tags
1808 err = lfs_dir_traverse(lfs,
1809 source, 0, 0xffffffff, attrs, attrcount,
1810 LFS_MKTAG(0x400, 0x3ff, 0),
1811 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1812 begin, end, -begin,
1813 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1814 lfs, &commit});
1815 if (err) {
1816 if (err == LFS_ERR_CORRUPT) {
1817 goto relocate;
1818 }
1819 return err;
1820 }
1821
1822 // commit tail, which may be new after last size check
1823 if (!lfs_pair_isnull(dir->tail)) {
1824 lfs_pair_tole32(dir->tail);
1825 err = lfs_dir_commitattr(lfs, &commit,
1826 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1827 dir->tail);
1828 lfs_pair_fromle32(dir->tail);
1829 if (err) {
1830 if (err == LFS_ERR_CORRUPT) {
1831 goto relocate;
1832 }
1833 return err;
1834 }
1835 }
1836
1837 // bring over gstate?
1838 lfs_gstate_t delta = {0};
1839 if (!relocated) {
1840 lfs_gstate_xor(&delta, &lfs->gdisk);
1841 lfs_gstate_xor(&delta, &lfs->gstate);
1842 }
1843 lfs_gstate_xor(&delta, &lfs->gdelta);
1844 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1845
1846 err = lfs_dir_getgstate(lfs, dir, &delta);
1847 if (err) {
1848 return err;
1849 }
1850
1851 if (!lfs_gstate_iszero(&delta)) {
1852 lfs_gstate_tole32(&delta);
1853 err = lfs_dir_commitattr(lfs, &commit,
1854 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1855 sizeof(delta)), &delta);
1856 if (err) {
1857 if (err == LFS_ERR_CORRUPT) {
1858 goto relocate;
1859 }
1860 return err;
1861 }
1862 }
1863
1864 // complete commit with crc
1865 err = lfs_dir_commitcrc(lfs, &commit);
1866 if (err) {
1867 if (err == LFS_ERR_CORRUPT) {
1868 goto relocate;
1869 }
1870 return err;
1871 }
1872
1873 // successful compaction, swap dir pair to indicate most recent
1874 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1875 lfs_pair_swap(dir->pair);
1876 dir->count = end - begin;
1877 dir->off = commit.off;
1878 dir->etag = commit.ptag;
1879 // update gstate
1880 lfs->gdelta = (lfs_gstate_t){0};
1881 if (!relocated) {
1882 lfs->gdisk = lfs->gstate;
1883 }
1884 }
1885 break;
1886
1887 relocate:
1888 // commit was corrupted, drop caches and prepare to relocate block
1889 relocated = true;
1890 lfs_cache_drop(lfs, &lfs->pcache);
1891 if (!tired) {
1892 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
1893 }
1894
1895 // can't relocate superblock, filesystem is now frozen
1896 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1897 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
1898 dir->pair[1]);
1899 return LFS_ERR_NOSPC;
1900 }
1901
1902 // relocate half of pair
1903 int err = lfs_alloc(lfs, &dir->pair[1]);
1904 if (err && (err != LFS_ERR_NOSPC || !tired)) {
1905 return err;
1906 }
1907
1908 tired = false;
1909 continue;
1910 }
1911
1912 return relocated ? LFS_OK_RELOCATED : 0;
1913 }
1914 #endif
1915
1916 #ifndef LFS_READONLY
lfs_dir_splittingcompact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1917 static int lfs_dir_splittingcompact(lfs_t *lfs, lfs_mdir_t *dir,
1918 const struct lfs_mattr *attrs, int attrcount,
1919 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1920 while (true) {
1921 // find size of first split, we do this by halving the split until
1922 // the metadata is guaranteed to fit
1923 //
1924 // Note that this isn't a true binary search, we never increase the
1925 // split size. This may result in poorly distributed metadata but isn't
1926 // worth the extra code size or performance hit to fix.
1927 lfs_size_t split = begin;
1928 while (end - split > 1) {
1929 lfs_size_t size = 0;
1930 int err = lfs_dir_traverse(lfs,
1931 source, 0, 0xffffffff, attrs, attrcount,
1932 LFS_MKTAG(0x400, 0x3ff, 0),
1933 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1934 split, end, -split,
1935 lfs_dir_commit_size, &size);
1936 if (err) {
1937 return err;
1938 }
1939
1940 // space is complicated, we need room for tail, crc, gstate,
1941 // cleanup delete, and we cap at half a block to give room
1942 // for metadata updates.
1943 if (end - split < 0xff
1944 && size <= lfs_min(lfs->cfg->block_size - 36,
1945 lfs_alignup(
1946 (lfs->cfg->metadata_max
1947 ? lfs->cfg->metadata_max
1948 : lfs->cfg->block_size)/2,
1949 lfs->cfg->prog_size))) {
1950 break;
1951 }
1952
1953 split = split + ((end - split) / 2);
1954 }
1955
1956 if (split == begin) {
1957 // no split needed
1958 break;
1959 }
1960
1961 // split into two metadata pairs and continue
1962 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1963 source, split, end);
1964 if (err && err != LFS_ERR_NOSPC) {
1965 return err;
1966 }
1967
1968 if (err) {
1969 // we can't allocate a new block, try to compact with degraded
1970 // performance
1971 LFS_WARN("Unable to split {0x%"PRIx32", 0x%"PRIx32"}",
1972 dir->pair[0], dir->pair[1]);
1973 break;
1974 } else {
1975 end = split;
1976 }
1977 }
1978
1979 if (lfs_dir_needsrelocation(lfs, dir)
1980 && lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1981 // oh no! we're writing too much to the superblock,
1982 // should we expand?
1983 lfs_ssize_t size = lfs_fs_rawsize(lfs);
1984 if (size < 0) {
1985 return size;
1986 }
1987
1988 // do we have extra space? littlefs can't reclaim this space
1989 // by itself, so expand cautiously
1990 if ((lfs_size_t)size < lfs->cfg->block_count/2) {
1991 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
1992 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1993 source, begin, end);
1994 if (err && err != LFS_ERR_NOSPC) {
1995 return err;
1996 }
1997
1998 if (err) {
1999 // welp, we tried, if we ran out of space there's not much
2000 // we can do, we'll error later if we've become frozen
2001 LFS_WARN("Unable to expand superblock");
2002 } else {
2003 end = begin;
2004 }
2005 }
2006 }
2007
2008 return lfs_dir_compact(lfs, dir, attrs, attrcount, source, begin, end);
2009 }
2010 #endif
2011
2012 #ifndef LFS_READONLY
lfs_dir_relocatingcommit(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * pdir)2013 static int lfs_dir_relocatingcommit(lfs_t *lfs, lfs_mdir_t *dir,
2014 const lfs_block_t pair[2],
2015 const struct lfs_mattr *attrs, int attrcount,
2016 lfs_mdir_t *pdir) {
2017 int state = 0;
2018
2019 // calculate changes to the directory
2020 bool hasdelete = false;
2021 for (int i = 0; i < attrcount; i++) {
2022 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
2023 dir->count += 1;
2024 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
2025 LFS_ASSERT(dir->count > 0);
2026 dir->count -= 1;
2027 hasdelete = true;
2028 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
2029 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
2030 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
2031 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
2032 lfs_pair_fromle32(dir->tail);
2033 }
2034 }
2035
2036 // should we actually drop the directory block?
2037 if (hasdelete && dir->count == 0) {
2038 LFS_ASSERT(pdir);
2039 int err = lfs_fs_pred(lfs, dir->pair, pdir);
2040 if (err && err != LFS_ERR_NOENT) {
2041 return err;
2042 }
2043
2044 if (err != LFS_ERR_NOENT && pdir->split) {
2045 state = LFS_OK_DROPPED;
2046 goto fixmlist;
2047 }
2048 }
2049
2050 if (dir->erased) {
2051 // try to commit
2052 struct lfs_commit commit = {
2053 .block = dir->pair[0],
2054 .off = dir->off,
2055 .ptag = dir->etag,
2056 .crc = 0xffffffff,
2057
2058 .begin = dir->off,
2059 .end = (lfs->cfg->metadata_max ?
2060 lfs->cfg->metadata_max : lfs->cfg->block_size) - 8,
2061 };
2062
2063 // traverse attrs that need to be written out
2064 lfs_pair_tole32(dir->tail);
2065 int err = lfs_dir_traverse(lfs,
2066 dir, dir->off, dir->etag, attrs, attrcount,
2067 0, 0, 0, 0, 0,
2068 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
2069 lfs, &commit});
2070 lfs_pair_fromle32(dir->tail);
2071 if (err) {
2072 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2073 goto compact;
2074 }
2075 return err;
2076 }
2077
2078 // commit any global diffs if we have any
2079 lfs_gstate_t delta = {0};
2080 lfs_gstate_xor(&delta, &lfs->gstate);
2081 lfs_gstate_xor(&delta, &lfs->gdisk);
2082 lfs_gstate_xor(&delta, &lfs->gdelta);
2083 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
2084 if (!lfs_gstate_iszero(&delta)) {
2085 err = lfs_dir_getgstate(lfs, dir, &delta);
2086 if (err) {
2087 return err;
2088 }
2089
2090 lfs_gstate_tole32(&delta);
2091 err = lfs_dir_commitattr(lfs, &commit,
2092 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
2093 sizeof(delta)), &delta);
2094 if (err) {
2095 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2096 goto compact;
2097 }
2098 return err;
2099 }
2100 }
2101
2102 // finalize commit with the crc
2103 err = lfs_dir_commitcrc(lfs, &commit);
2104 if (err) {
2105 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
2106 goto compact;
2107 }
2108 return err;
2109 }
2110
2111 // successful commit, update dir
2112 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
2113 dir->off = commit.off;
2114 dir->etag = commit.ptag;
2115 // and update gstate
2116 lfs->gdisk = lfs->gstate;
2117 lfs->gdelta = (lfs_gstate_t){0};
2118
2119 goto fixmlist;
2120 }
2121
2122 compact:
2123 // fall back to compaction
2124 lfs_cache_drop(lfs, &lfs->pcache);
2125
2126 state = lfs_dir_splittingcompact(lfs, dir, attrs, attrcount,
2127 dir, 0, dir->count);
2128 if (state < 0) {
2129 return state;
2130 }
2131
2132 goto fixmlist;
2133
2134 fixmlist:;
2135 // this complicated bit of logic is for fixing up any active
2136 // metadata-pairs that we may have affected
2137 //
2138 // note we have to make two passes since the mdir passed to
2139 // lfs_dir_commit could also be in this list, and even then
2140 // we need to copy the pair so they don't get clobbered if we refetch
2141 // our mdir.
2142 lfs_block_t oldpair[2] = {pair[0], pair[1]};
2143 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2144 if (lfs_pair_cmp(d->m.pair, oldpair) == 0) {
2145 d->m = *dir;
2146 if (d->m.pair != pair) {
2147 for (int i = 0; i < attrcount; i++) {
2148 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2149 d->id == lfs_tag_id(attrs[i].tag)) {
2150 d->m.pair[0] = LFS_BLOCK_NULL;
2151 d->m.pair[1] = LFS_BLOCK_NULL;
2152 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
2153 d->id > lfs_tag_id(attrs[i].tag)) {
2154 d->id -= 1;
2155 if (d->type == LFS_TYPE_DIR) {
2156 ((lfs_dir_t*)d)->pos -= 1;
2157 }
2158 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
2159 d->id >= lfs_tag_id(attrs[i].tag)) {
2160 d->id += 1;
2161 if (d->type == LFS_TYPE_DIR) {
2162 ((lfs_dir_t*)d)->pos += 1;
2163 }
2164 }
2165 }
2166 }
2167
2168 while (d->id >= d->m.count && d->m.split) {
2169 // we split and id is on tail now
2170 d->id -= d->m.count;
2171 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
2172 if (err) {
2173 return err;
2174 }
2175 }
2176 }
2177 }
2178
2179 return state;
2180 }
2181 #endif
2182
2183 #ifndef LFS_READONLY
lfs_dir_orphaningcommit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2184 static int lfs_dir_orphaningcommit(lfs_t *lfs, lfs_mdir_t *dir,
2185 const struct lfs_mattr *attrs, int attrcount) {
2186 // check for any inline files that aren't RAM backed and
2187 // forcefully evict them, needed for filesystem consistency
2188 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
2189 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
2190 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
2191 f->ctz.size > lfs->cfg->cache_size) {
2192 int err = lfs_file_outline(lfs, f);
2193 if (err) {
2194 return err;
2195 }
2196
2197 err = lfs_file_flush(lfs, f);
2198 if (err) {
2199 return err;
2200 }
2201 }
2202 }
2203
2204 lfs_block_t lpair[2] = {dir->pair[0], dir->pair[1]};
2205 lfs_mdir_t ldir = *dir;
2206 lfs_mdir_t pdir;
2207 int state = lfs_dir_relocatingcommit(lfs, &ldir, dir->pair,
2208 attrs, attrcount, &pdir);
2209 if (state < 0) {
2210 return state;
2211 }
2212
2213 // update if we're not in mlist, note we may have already been
2214 // updated if we are in mlist
2215 if (lfs_pair_cmp(dir->pair, lpair) == 0) {
2216 *dir = ldir;
2217 }
2218
2219 // commit was successful, but may require other changes in the
2220 // filesystem, these would normally be tail recursive, but we have
2221 // flattened them here avoid unbounded stack usage
2222
2223 // need to drop?
2224 if (state == LFS_OK_DROPPED) {
2225 // steal state
2226 int err = lfs_dir_getgstate(lfs, dir, &lfs->gdelta);
2227 if (err) {
2228 return err;
2229 }
2230
2231 // steal tail, note that this can't create a recursive drop
2232 lpair[0] = pdir.pair[0];
2233 lpair[1] = pdir.pair[1];
2234 lfs_pair_tole32(dir->tail);
2235 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2236 {LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
2237 dir->tail}),
2238 NULL);
2239 lfs_pair_fromle32(dir->tail);
2240 if (state < 0) {
2241 return state;
2242 }
2243
2244 ldir = pdir;
2245 }
2246
2247 // need to relocate?
2248 bool orphans = false;
2249 while (state == LFS_OK_RELOCATED) {
2250 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
2251 "-> {0x%"PRIx32", 0x%"PRIx32"}",
2252 lpair[0], lpair[1], ldir.pair[0], ldir.pair[1]);
2253 state = 0;
2254
2255 // update internal root
2256 if (lfs_pair_cmp(lpair, lfs->root) == 0) {
2257 lfs->root[0] = ldir.pair[0];
2258 lfs->root[1] = ldir.pair[1];
2259 }
2260
2261 // update internally tracked dirs
2262 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
2263 if (lfs_pair_cmp(lpair, d->m.pair) == 0) {
2264 d->m.pair[0] = ldir.pair[0];
2265 d->m.pair[1] = ldir.pair[1];
2266 }
2267
2268 if (d->type == LFS_TYPE_DIR &&
2269 lfs_pair_cmp(lpair, ((lfs_dir_t*)d)->head) == 0) {
2270 ((lfs_dir_t*)d)->head[0] = ldir.pair[0];
2271 ((lfs_dir_t*)d)->head[1] = ldir.pair[1];
2272 }
2273 }
2274
2275 // find parent
2276 lfs_stag_t tag = lfs_fs_parent(lfs, lpair, &pdir);
2277 if (tag < 0 && tag != LFS_ERR_NOENT) {
2278 return tag;
2279 }
2280
2281 bool hasparent = (tag != LFS_ERR_NOENT);
2282 if (tag != LFS_ERR_NOENT) {
2283 // note that if we have a parent, we must have a pred, so this will
2284 // always create an orphan
2285 int err = lfs_fs_preporphans(lfs, +1);
2286 if (err) {
2287 return err;
2288 }
2289
2290 // fix pending move in this pair? this looks like an optimization but
2291 // is in fact _required_ since relocating may outdate the move.
2292 uint16_t moveid = 0x3ff;
2293 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2294 moveid = lfs_tag_id(lfs->gstate.tag);
2295 LFS_DEBUG("Fixing move while relocating "
2296 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2297 pdir.pair[0], pdir.pair[1], moveid);
2298 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2299 if (moveid < lfs_tag_id(tag)) {
2300 tag -= LFS_MKTAG(0, 1, 0);
2301 }
2302 }
2303
2304 lfs_block_t ppair[2] = {pdir.pair[0], pdir.pair[1]};
2305 lfs_pair_tole32(ldir.pair);
2306 state = lfs_dir_relocatingcommit(lfs, &pdir, ppair, LFS_MKATTRS(
2307 {LFS_MKTAG_IF(moveid != 0x3ff,
2308 LFS_TYPE_DELETE, moveid, 0), NULL},
2309 {tag, ldir.pair}),
2310 NULL);
2311 lfs_pair_fromle32(ldir.pair);
2312 if (state < 0) {
2313 return state;
2314 }
2315
2316 if (state == LFS_OK_RELOCATED) {
2317 lpair[0] = ppair[0];
2318 lpair[1] = ppair[1];
2319 ldir = pdir;
2320 orphans = true;
2321 continue;
2322 }
2323 }
2324
2325 // find pred
2326 int err = lfs_fs_pred(lfs, lpair, &pdir);
2327 if (err && err != LFS_ERR_NOENT) {
2328 return err;
2329 }
2330 LFS_ASSERT(!(hasparent && err == LFS_ERR_NOENT));
2331
2332 // if we can't find dir, it must be new
2333 if (err != LFS_ERR_NOENT) {
2334 if (lfs_gstate_hasorphans(&lfs->gstate)) {
2335 // next step, clean up orphans
2336 err = lfs_fs_preporphans(lfs, -hasparent);
2337 if (err) {
2338 return err;
2339 }
2340 }
2341
2342 // fix pending move in this pair? this looks like an optimization
2343 // but is in fact _required_ since relocating may outdate the move.
2344 uint16_t moveid = 0x3ff;
2345 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
2346 moveid = lfs_tag_id(lfs->gstate.tag);
2347 LFS_DEBUG("Fixing move while relocating "
2348 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
2349 pdir.pair[0], pdir.pair[1], moveid);
2350 lfs_fs_prepmove(lfs, 0x3ff, NULL);
2351 }
2352
2353 // replace bad pair, either we clean up desync, or no desync occured
2354 lpair[0] = pdir.pair[0];
2355 lpair[1] = pdir.pair[1];
2356 lfs_pair_tole32(ldir.pair);
2357 state = lfs_dir_relocatingcommit(lfs, &pdir, lpair, LFS_MKATTRS(
2358 {LFS_MKTAG_IF(moveid != 0x3ff,
2359 LFS_TYPE_DELETE, moveid, 0), NULL},
2360 {LFS_MKTAG(LFS_TYPE_TAIL + pdir.split, 0x3ff, 8),
2361 ldir.pair}),
2362 NULL);
2363 lfs_pair_fromle32(ldir.pair);
2364 if (state < 0) {
2365 return state;
2366 }
2367
2368 ldir = pdir;
2369 }
2370 }
2371
2372 return orphans ? LFS_OK_ORPHANED : 0;
2373 }
2374 #endif
2375
2376 #ifndef LFS_READONLY
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)2377 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
2378 const struct lfs_mattr *attrs, int attrcount) {
2379 int orphans = lfs_dir_orphaningcommit(lfs, dir, attrs, attrcount);
2380 if (orphans < 0) {
2381 return orphans;
2382 }
2383
2384 if (orphans) {
2385 // make sure we've removed all orphans, this is a noop if there
2386 // are none, but if we had nested blocks failures we may have
2387 // created some
2388 int err = lfs_fs_deorphan(lfs, false);
2389 if (err) {
2390 return err;
2391 }
2392 }
2393
2394 return 0;
2395 }
2396 #endif
2397
2398
2399 /// Top level directory operations ///
2400 #ifndef LFS_READONLY
lfs_rawmkdir(lfs_t * lfs,const char * path)2401 static int lfs_rawmkdir(lfs_t *lfs, const char *path) {
2402 // deorphan if we haven't yet, needed at most once after poweron
2403 int err = lfs_fs_forceconsistency(lfs);
2404 if (err) {
2405 return err;
2406 }
2407
2408 struct lfs_mlist cwd;
2409 cwd.next = lfs->mlist;
2410 uint16_t id;
2411 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
2412 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
2413 return (err < 0) ? err : LFS_ERR_EXIST;
2414 }
2415
2416 // check that name fits
2417 lfs_size_t nlen = strlen(path);
2418 if (nlen > lfs->name_max) {
2419 return LFS_ERR_NAMETOOLONG;
2420 }
2421
2422 // build up new directory
2423 lfs_alloc_ack(lfs);
2424 lfs_mdir_t dir;
2425 err = lfs_dir_alloc(lfs, &dir);
2426 if (err) {
2427 return err;
2428 }
2429
2430 // find end of list
2431 lfs_mdir_t pred = cwd.m;
2432 while (pred.split) {
2433 err = lfs_dir_fetch(lfs, &pred, pred.tail);
2434 if (err) {
2435 return err;
2436 }
2437 }
2438
2439 // setup dir
2440 lfs_pair_tole32(pred.tail);
2441 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
2442 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
2443 lfs_pair_fromle32(pred.tail);
2444 if (err) {
2445 return err;
2446 }
2447
2448 // current block not end of list?
2449 if (cwd.m.split) {
2450 // update tails, this creates a desync
2451 err = lfs_fs_preporphans(lfs, +1);
2452 if (err) {
2453 return err;
2454 }
2455
2456 // it's possible our predecessor has to be relocated, and if
2457 // our parent is our predecessor's predecessor, this could have
2458 // caused our parent to go out of date, fortunately we can hook
2459 // ourselves into littlefs to catch this
2460 cwd.type = 0;
2461 cwd.id = 0;
2462 lfs->mlist = &cwd;
2463
2464 lfs_pair_tole32(dir.pair);
2465 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
2466 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2467 lfs_pair_fromle32(dir.pair);
2468 if (err) {
2469 lfs->mlist = cwd.next;
2470 return err;
2471 }
2472
2473 lfs->mlist = cwd.next;
2474 err = lfs_fs_preporphans(lfs, -1);
2475 if (err) {
2476 return err;
2477 }
2478 }
2479
2480 // now insert into our parent block
2481 lfs_pair_tole32(dir.pair);
2482 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
2483 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
2484 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
2485 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
2486 {LFS_MKTAG_IF(!cwd.m.split,
2487 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
2488 lfs_pair_fromle32(dir.pair);
2489 if (err) {
2490 return err;
2491 }
2492
2493 return 0;
2494 }
2495 #endif
2496
lfs_dir_rawopen(lfs_t * lfs,lfs_dir_t * dir,const char * path)2497 static int lfs_dir_rawopen(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2498 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2499 if (tag < 0) {
2500 return tag;
2501 }
2502
2503 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2504 return LFS_ERR_NOTDIR;
2505 }
2506
2507 lfs_block_t pair[2];
2508 if (lfs_tag_id(tag) == 0x3ff) {
2509 // handle root dir separately
2510 pair[0] = lfs->root[0];
2511 pair[1] = lfs->root[1];
2512 } else {
2513 // get dir pair from parent
2514 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2515 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2516 if (res < 0) {
2517 return res;
2518 }
2519 lfs_pair_fromle32(pair);
2520 }
2521
2522 // fetch first pair
2523 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2524 if (err) {
2525 return err;
2526 }
2527
2528 // setup entry
2529 dir->head[0] = dir->m.pair[0];
2530 dir->head[1] = dir->m.pair[1];
2531 dir->id = 0;
2532 dir->pos = 0;
2533
2534 // add to list of mdirs
2535 dir->type = LFS_TYPE_DIR;
2536 lfs_mlist_append(lfs, (struct lfs_mlist *)dir);
2537
2538 return 0;
2539 }
2540
lfs_dir_rawclose(lfs_t * lfs,lfs_dir_t * dir)2541 static int lfs_dir_rawclose(lfs_t *lfs, lfs_dir_t *dir) {
2542 // remove from list of mdirs
2543 lfs_mlist_remove(lfs, (struct lfs_mlist *)dir);
2544
2545 return 0;
2546 }
2547
lfs_dir_rawread(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2548 static int lfs_dir_rawread(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2549 memset(info, 0, sizeof(*info));
2550
2551 // special offset for '.' and '..'
2552 if (dir->pos == 0) {
2553 info->type = LFS_TYPE_DIR;
2554 strcpy(info->name, ".");
2555 dir->pos += 1;
2556 return true;
2557 } else if (dir->pos == 1) {
2558 info->type = LFS_TYPE_DIR;
2559 strcpy(info->name, "..");
2560 dir->pos += 1;
2561 return true;
2562 }
2563
2564 while (true) {
2565 if (dir->id == dir->m.count) {
2566 if (!dir->m.split) {
2567 return false;
2568 }
2569
2570 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2571 if (err) {
2572 return err;
2573 }
2574
2575 dir->id = 0;
2576 }
2577
2578 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2579 if (err && err != LFS_ERR_NOENT) {
2580 return err;
2581 }
2582
2583 dir->id += 1;
2584 if (err != LFS_ERR_NOENT) {
2585 break;
2586 }
2587 }
2588
2589 dir->pos += 1;
2590 return true;
2591 }
2592
lfs_dir_rawseek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2593 static int lfs_dir_rawseek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2594 // simply walk from head dir
2595 int err = lfs_dir_rawrewind(lfs, dir);
2596 if (err) {
2597 return err;
2598 }
2599
2600 // first two for ./..
2601 dir->pos = lfs_min(2, off);
2602 off -= dir->pos;
2603
2604 // skip superblock entry
2605 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2606
2607 while (off > 0) {
2608 int diff = lfs_min(dir->m.count - dir->id, off);
2609 dir->id += diff;
2610 dir->pos += diff;
2611 off -= diff;
2612
2613 if (dir->id == dir->m.count) {
2614 if (!dir->m.split) {
2615 return LFS_ERR_INVAL;
2616 }
2617
2618 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2619 if (err) {
2620 return err;
2621 }
2622
2623 dir->id = 0;
2624 }
2625 }
2626
2627 return 0;
2628 }
2629
lfs_dir_rawtell(lfs_t * lfs,lfs_dir_t * dir)2630 static lfs_soff_t lfs_dir_rawtell(lfs_t *lfs, lfs_dir_t *dir) {
2631 (void)lfs;
2632 return dir->pos;
2633 }
2634
lfs_dir_rawrewind(lfs_t * lfs,lfs_dir_t * dir)2635 static int lfs_dir_rawrewind(lfs_t *lfs, lfs_dir_t *dir) {
2636 // reload the head dir
2637 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2638 if (err) {
2639 return err;
2640 }
2641
2642 dir->id = 0;
2643 dir->pos = 0;
2644 return 0;
2645 }
2646
2647
2648 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2649 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2650 lfs_off_t size = *off;
2651 lfs_off_t b = lfs->cfg->block_size - 2*4;
2652 lfs_off_t i = size / b;
2653 if (i == 0) {
2654 return 0;
2655 }
2656
2657 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2658 *off = size - b*i - 4*lfs_popc(i);
2659 return i;
2660 }
2661
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2662 static int lfs_ctz_find(lfs_t *lfs,
2663 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2664 lfs_block_t head, lfs_size_t size,
2665 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2666 if (size == 0) {
2667 *block = LFS_BLOCK_NULL;
2668 *off = 0;
2669 return 0;
2670 }
2671
2672 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2673 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2674
2675 while (current > target) {
2676 lfs_size_t skip = lfs_min(
2677 lfs_npw2(current-target+1) - 1,
2678 lfs_ctz(current));
2679
2680 int err = lfs_bd_read(lfs,
2681 pcache, rcache, sizeof(head),
2682 head, 4*skip, &head, sizeof(head));
2683 head = lfs_fromle32(head);
2684 if (err) {
2685 return err;
2686 }
2687
2688 current -= 1 << skip;
2689 }
2690
2691 *block = head;
2692 *off = pos;
2693 return 0;
2694 }
2695
2696 #ifndef LFS_READONLY
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2697 static int lfs_ctz_extend(lfs_t *lfs,
2698 lfs_cache_t *pcache, lfs_cache_t *rcache,
2699 lfs_block_t head, lfs_size_t size,
2700 lfs_block_t *block, lfs_off_t *off) {
2701 while (true) {
2702 // go ahead and grab a block
2703 lfs_block_t nblock;
2704 int err = lfs_alloc(lfs, &nblock);
2705 if (err) {
2706 return err;
2707 }
2708
2709 {
2710 err = lfs_bd_erase(lfs, nblock);
2711 if (err) {
2712 if (err == LFS_ERR_CORRUPT) {
2713 goto relocate;
2714 }
2715 return err;
2716 }
2717
2718 if (size == 0) {
2719 *block = nblock;
2720 *off = 0;
2721 return 0;
2722 }
2723
2724 lfs_size_t noff = size - 1;
2725 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2726 noff = noff + 1;
2727
2728 // just copy out the last block if it is incomplete
2729 if (noff != lfs->cfg->block_size) {
2730 for (lfs_off_t i = 0; i < noff; i++) {
2731 uint8_t data;
2732 err = lfs_bd_read(lfs,
2733 NULL, rcache, noff-i,
2734 head, i, &data, 1);
2735 if (err) {
2736 return err;
2737 }
2738
2739 err = lfs_bd_prog(lfs,
2740 pcache, rcache, true,
2741 nblock, i, &data, 1);
2742 if (err) {
2743 if (err == LFS_ERR_CORRUPT) {
2744 goto relocate;
2745 }
2746 return err;
2747 }
2748 }
2749
2750 *block = nblock;
2751 *off = noff;
2752 return 0;
2753 }
2754
2755 // append block
2756 index += 1;
2757 lfs_size_t skips = lfs_ctz(index) + 1;
2758 lfs_block_t nhead = head;
2759 for (lfs_off_t i = 0; i < skips; i++) {
2760 nhead = lfs_tole32(nhead);
2761 err = lfs_bd_prog(lfs, pcache, rcache, true,
2762 nblock, 4*i, &nhead, 4);
2763 nhead = lfs_fromle32(nhead);
2764 if (err) {
2765 if (err == LFS_ERR_CORRUPT) {
2766 goto relocate;
2767 }
2768 return err;
2769 }
2770
2771 if (i != skips-1) {
2772 err = lfs_bd_read(lfs,
2773 NULL, rcache, sizeof(nhead),
2774 nhead, 4*i, &nhead, sizeof(nhead));
2775 nhead = lfs_fromle32(nhead);
2776 if (err) {
2777 return err;
2778 }
2779 }
2780 }
2781
2782 *block = nblock;
2783 *off = 4*skips;
2784 return 0;
2785 }
2786
2787 relocate:
2788 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2789
2790 // just clear cache and try a new block
2791 lfs_cache_drop(lfs, pcache);
2792 }
2793 }
2794 #endif
2795
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2796 static int lfs_ctz_traverse(lfs_t *lfs,
2797 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2798 lfs_block_t head, lfs_size_t size,
2799 int (*cb)(void*, lfs_block_t), void *data) {
2800 if (size == 0) {
2801 return 0;
2802 }
2803
2804 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2805
2806 while (true) {
2807 int err = cb(data, head);
2808 if (err) {
2809 return err;
2810 }
2811
2812 if (index == 0) {
2813 return 0;
2814 }
2815
2816 lfs_block_t heads[2];
2817 int count = 2 - (index & 1);
2818 err = lfs_bd_read(lfs,
2819 pcache, rcache, count*sizeof(head),
2820 head, 0, &heads, count*sizeof(head));
2821 heads[0] = lfs_fromle32(heads[0]);
2822 heads[1] = lfs_fromle32(heads[1]);
2823 if (err) {
2824 return err;
2825 }
2826
2827 for (int i = 0; i < count-1; i++) {
2828 err = cb(data, heads[i]);
2829 if (err) {
2830 return err;
2831 }
2832 }
2833
2834 head = heads[count-1];
2835 index -= count;
2836 }
2837 }
2838
2839
2840 /// Top level file operations ///
lfs_file_rawopencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)2841 static int lfs_file_rawopencfg(lfs_t *lfs, lfs_file_t *file,
2842 const char *path, int flags,
2843 const struct lfs_file_config *cfg) {
2844 #ifndef LFS_READONLY
2845 // deorphan if we haven't yet, needed at most once after poweron
2846 if ((flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2847 int err = lfs_fs_forceconsistency(lfs);
2848 if (err) {
2849 return err;
2850 }
2851 }
2852 #else
2853 LFS_ASSERT((flags & LFS_O_RDONLY) == LFS_O_RDONLY);
2854 #endif
2855
2856 // setup simple file details
2857 int err;
2858 file->cfg = cfg;
2859 file->flags = flags;
2860 file->pos = 0;
2861 file->off = 0;
2862 file->cache.buffer = NULL;
2863
2864 // allocate entry for file if it doesn't exist
2865 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
2866 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
2867 err = tag;
2868 goto cleanup;
2869 }
2870
2871 // get id, add to list of mdirs to catch update changes
2872 file->type = LFS_TYPE_REG;
2873 lfs_mlist_append(lfs, (struct lfs_mlist *)file);
2874
2875 #ifdef LFS_READONLY
2876 if (tag == LFS_ERR_NOENT) {
2877 err = LFS_ERR_NOENT;
2878 goto cleanup;
2879 #else
2880 if (tag == LFS_ERR_NOENT) {
2881 if (!(flags & LFS_O_CREAT)) {
2882 err = LFS_ERR_NOENT;
2883 goto cleanup;
2884 }
2885
2886 // check that name fits
2887 lfs_size_t nlen = strlen(path);
2888 if (nlen > lfs->name_max) {
2889 err = LFS_ERR_NAMETOOLONG;
2890 goto cleanup;
2891 }
2892
2893 // get next slot and create entry to remember name
2894 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2895 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0), NULL},
2896 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
2897 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0), NULL}));
2898
2899 // it may happen that the file name doesn't fit in the metadata blocks, e.g., a 256 byte file name will
2900 // not fit in a 128 byte block.
2901 err = (err == LFS_ERR_NOSPC) ? LFS_ERR_NAMETOOLONG : err;
2902 if (err) {
2903 goto cleanup;
2904 }
2905
2906 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
2907 } else if (flags & LFS_O_EXCL) {
2908 err = LFS_ERR_EXIST;
2909 goto cleanup;
2910 #endif
2911 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
2912 err = LFS_ERR_ISDIR;
2913 goto cleanup;
2914 #ifndef LFS_READONLY
2915 } else if (flags & LFS_O_TRUNC) {
2916 // truncate if requested
2917 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
2918 file->flags |= LFS_F_DIRTY;
2919 #endif
2920 } else {
2921 // try to load what's on disk, if it's inlined we'll fix it later
2922 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
2923 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
2924 if (tag < 0) {
2925 err = tag;
2926 goto cleanup;
2927 }
2928 lfs_ctz_fromle32(&file->ctz);
2929 }
2930
2931 // fetch attrs
2932 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
2933 // if opened for read / read-write operations
2934 if ((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY) {
2935 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2936 LFS_MKTAG(0x7ff, 0x3ff, 0),
2937 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
2938 file->id, file->cfg->attrs[i].size),
2939 file->cfg->attrs[i].buffer);
2940 if (res < 0 && res != LFS_ERR_NOENT) {
2941 err = res;
2942 goto cleanup;
2943 }
2944 }
2945
2946 #ifndef LFS_READONLY
2947 // if opened for write / read-write operations
2948 if ((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY) {
2949 if (file->cfg->attrs[i].size > lfs->attr_max) {
2950 err = LFS_ERR_NOSPC;
2951 goto cleanup;
2952 }
2953
2954 file->flags |= LFS_F_DIRTY;
2955 }
2956 #endif
2957 }
2958
2959 // allocate buffer if needed
2960 if (file->cfg->buffer) {
2961 file->cache.buffer = file->cfg->buffer;
2962 } else {
2963 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
2964 if (!file->cache.buffer) {
2965 err = LFS_ERR_NOMEM;
2966 goto cleanup;
2967 }
2968 }
2969
2970 // zero to avoid information leak
2971 lfs_cache_zero(lfs, &file->cache);
2972
2973 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
2974 // load inline files
2975 file->ctz.head = LFS_BLOCK_INLINE;
2976 file->ctz.size = lfs_tag_size(tag);
2977 file->flags |= LFS_F_INLINE;
2978 file->cache.block = file->ctz.head;
2979 file->cache.off = 0;
2980 file->cache.size = lfs->cfg->cache_size;
2981
2982 // don't always read (may be new/trunc file)
2983 if (file->ctz.size > 0) {
2984 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2985 LFS_MKTAG(0x700, 0x3ff, 0),
2986 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
2987 lfs_min(file->cache.size, 0x3fe)),
2988 file->cache.buffer);
2989 if (res < 0) {
2990 err = res;
2991 goto cleanup;
2992 }
2993 }
2994 }
2995
2996 return 0;
2997
2998 cleanup:
2999 // clean up lingering resources
3000 #ifndef LFS_READONLY
3001 file->flags |= LFS_F_ERRED;
3002 #endif
3003 lfs_file_rawclose(lfs, file);
3004 return err;
3005 }
3006
3007 static int lfs_file_rawopen(lfs_t *lfs, lfs_file_t *file,
3008 const char *path, int flags) {
3009 static const struct lfs_file_config defaults = {0};
3010 int err = lfs_file_rawopencfg(lfs, file, path, flags, &defaults);
3011 return err;
3012 }
3013
3014 static int lfs_file_rawclose(lfs_t *lfs, lfs_file_t *file) {
3015 #ifndef LFS_READONLY
3016 int err = lfs_file_rawsync(lfs, file);
3017 #else
3018 int err = 0;
3019 #endif
3020
3021 // remove from list of mdirs
3022 lfs_mlist_remove(lfs, (struct lfs_mlist*)file);
3023
3024 // clean up memory
3025 if (!file->cfg->buffer) {
3026 lfs_free(file->cache.buffer);
3027 }
3028
3029 return err;
3030 }
3031
3032
3033 #ifndef LFS_READONLY
3034 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
3035 while (true) {
3036 // just relocate what exists into new block
3037 lfs_block_t nblock;
3038 int err = lfs_alloc(lfs, &nblock);
3039 if (err) {
3040 return err;
3041 }
3042
3043 err = lfs_bd_erase(lfs, nblock);
3044 if (err) {
3045 if (err == LFS_ERR_CORRUPT) {
3046 goto relocate;
3047 }
3048 return err;
3049 }
3050
3051 // either read from dirty cache or disk
3052 for (lfs_off_t i = 0; i < file->off; i++) {
3053 uint8_t data;
3054 if (file->flags & LFS_F_INLINE) {
3055 err = lfs_dir_getread(lfs, &file->m,
3056 // note we evict inline files before they can be dirty
3057 NULL, &file->cache, file->off-i,
3058 LFS_MKTAG(0xfff, 0x1ff, 0),
3059 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3060 i, &data, 1);
3061 if (err) {
3062 return err;
3063 }
3064 } else {
3065 err = lfs_bd_read(lfs,
3066 &file->cache, &lfs->rcache, file->off-i,
3067 file->block, i, &data, 1);
3068 if (err) {
3069 return err;
3070 }
3071 }
3072
3073 err = lfs_bd_prog(lfs,
3074 &lfs->pcache, &lfs->rcache, true,
3075 nblock, i, &data, 1);
3076 if (err) {
3077 if (err == LFS_ERR_CORRUPT) {
3078 goto relocate;
3079 }
3080 return err;
3081 }
3082 }
3083
3084 // copy over new state of file
3085 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
3086 file->cache.block = lfs->pcache.block;
3087 file->cache.off = lfs->pcache.off;
3088 file->cache.size = lfs->pcache.size;
3089 lfs_cache_zero(lfs, &lfs->pcache);
3090
3091 file->block = nblock;
3092 file->flags |= LFS_F_WRITING;
3093 return 0;
3094
3095 relocate:
3096 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
3097
3098 // just clear cache and try a new block
3099 lfs_cache_drop(lfs, &lfs->pcache);
3100 }
3101 }
3102 #endif
3103
3104 #ifndef LFS_READONLY
3105 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
3106 file->off = file->pos;
3107 lfs_alloc_ack(lfs);
3108 int err = lfs_file_relocate(lfs, file);
3109 if (err) {
3110 return err;
3111 }
3112
3113 file->flags &= ~LFS_F_INLINE;
3114 return 0;
3115 }
3116 #endif
3117
3118 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
3119 if (file->flags & LFS_F_READING) {
3120 if (!(file->flags & LFS_F_INLINE)) {
3121 lfs_cache_drop(lfs, &file->cache);
3122 }
3123 file->flags &= ~LFS_F_READING;
3124 }
3125
3126 #ifndef LFS_READONLY
3127 if (file->flags & LFS_F_WRITING) {
3128 lfs_off_t pos = file->pos;
3129
3130 if (!(file->flags & LFS_F_INLINE)) {
3131 // copy over anything after current branch
3132 lfs_file_t orig = {
3133 .ctz.head = file->ctz.head,
3134 .ctz.size = file->ctz.size,
3135 .flags = LFS_O_RDONLY,
3136 .pos = file->pos,
3137 .cache = lfs->rcache,
3138 };
3139 lfs_cache_drop(lfs, &lfs->rcache);
3140
3141 while (file->pos < file->ctz.size) {
3142 // copy over a byte at a time, leave it up to caching
3143 // to make this efficient
3144 uint8_t data;
3145 lfs_ssize_t res = lfs_file_flushedread(lfs, &orig, &data, 1);
3146 if (res < 0) {
3147 return res;
3148 }
3149
3150 res = lfs_file_flushedwrite(lfs, file, &data, 1);
3151 if (res < 0) {
3152 return res;
3153 }
3154
3155 // keep our reference to the rcache in sync
3156 if (lfs->rcache.block != LFS_BLOCK_NULL) {
3157 lfs_cache_drop(lfs, &orig.cache);
3158 lfs_cache_drop(lfs, &lfs->rcache);
3159 }
3160 }
3161
3162 // write out what we have
3163 while (true) {
3164 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
3165 if (err) {
3166 if (err == LFS_ERR_CORRUPT) {
3167 goto relocate;
3168 }
3169 return err;
3170 }
3171
3172 break;
3173
3174 relocate:
3175 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
3176 err = lfs_file_relocate(lfs, file);
3177 if (err) {
3178 return err;
3179 }
3180 }
3181 } else {
3182 file->pos = lfs_max(file->pos, file->ctz.size);
3183 }
3184
3185 // actual file updates
3186 file->ctz.head = file->block;
3187 file->ctz.size = file->pos;
3188 file->flags &= ~LFS_F_WRITING;
3189 file->flags |= LFS_F_DIRTY;
3190
3191 file->pos = pos;
3192 }
3193 #endif
3194
3195 return 0;
3196 }
3197
3198 #ifndef LFS_READONLY
3199 static int lfs_file_rawsync(lfs_t *lfs, lfs_file_t *file) {
3200 if (file->flags & LFS_F_ERRED) {
3201 // it's not safe to do anything if our file errored
3202 return 0;
3203 }
3204
3205 int err = lfs_file_flush(lfs, file);
3206 if (err) {
3207 file->flags |= LFS_F_ERRED;
3208 return err;
3209 }
3210
3211
3212 if ((file->flags & LFS_F_DIRTY) &&
3213 !lfs_pair_isnull(file->m.pair)) {
3214 // update dir entry
3215 uint16_t type;
3216 const void *buffer;
3217 lfs_size_t size;
3218 struct lfs_ctz ctz;
3219 if (file->flags & LFS_F_INLINE) {
3220 // inline the whole file
3221 type = LFS_TYPE_INLINESTRUCT;
3222 buffer = file->cache.buffer;
3223 size = file->ctz.size;
3224 } else {
3225 // update the ctz reference
3226 type = LFS_TYPE_CTZSTRUCT;
3227 // copy ctz so alloc will work during a relocate
3228 ctz = file->ctz;
3229 lfs_ctz_tole32(&ctz);
3230 buffer = &ctz;
3231 size = sizeof(ctz);
3232 }
3233
3234 // commit file data and attributes
3235 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
3236 {LFS_MKTAG(type, file->id, size), buffer},
3237 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
3238 file->cfg->attr_count), file->cfg->attrs}));
3239 if (err) {
3240 file->flags |= LFS_F_ERRED;
3241 return err;
3242 }
3243
3244 file->flags &= ~LFS_F_DIRTY;
3245 }
3246
3247 return 0;
3248 }
3249 #endif
3250
3251 static lfs_ssize_t lfs_file_flushedread(lfs_t *lfs, lfs_file_t *file,
3252 void *buffer, lfs_size_t size) {
3253 uint8_t *data = buffer;
3254 lfs_size_t nsize = size;
3255
3256 if (file->pos >= file->ctz.size) {
3257 // eof if past end
3258 return 0;
3259 }
3260
3261 size = lfs_min(size, file->ctz.size - file->pos);
3262 nsize = size;
3263
3264 while (nsize > 0) {
3265 // check if we need a new block
3266 if (!(file->flags & LFS_F_READING) ||
3267 file->off == lfs->cfg->block_size) {
3268 if (!(file->flags & LFS_F_INLINE)) {
3269 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3270 file->ctz.head, file->ctz.size,
3271 file->pos, &file->block, &file->off);
3272 if (err) {
3273 return err;
3274 }
3275 } else {
3276 file->block = LFS_BLOCK_INLINE;
3277 file->off = file->pos;
3278 }
3279
3280 file->flags |= LFS_F_READING;
3281 }
3282
3283 // read as much as we can in current block
3284 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3285 if (file->flags & LFS_F_INLINE) {
3286 int err = lfs_dir_getread(lfs, &file->m,
3287 NULL, &file->cache, lfs->cfg->block_size,
3288 LFS_MKTAG(0xfff, 0x1ff, 0),
3289 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
3290 file->off, data, diff);
3291 if (err) {
3292 return err;
3293 }
3294 } else {
3295 int err = lfs_bd_read(lfs,
3296 NULL, &file->cache, lfs->cfg->block_size,
3297 file->block, file->off, data, diff);
3298 if (err) {
3299 return err;
3300 }
3301 }
3302
3303 file->pos += diff;
3304 file->off += diff;
3305 data += diff;
3306 nsize -= diff;
3307 }
3308
3309 return size;
3310 }
3311
3312 static lfs_ssize_t lfs_file_rawread(lfs_t *lfs, lfs_file_t *file,
3313 void *buffer, lfs_size_t size) {
3314 LFS_ASSERT((file->flags & LFS_O_RDONLY) == LFS_O_RDONLY);
3315
3316 #ifndef LFS_READONLY
3317 if (file->flags & LFS_F_WRITING) {
3318 // flush out any writes
3319 int err = lfs_file_flush(lfs, file);
3320 if (err) {
3321 return err;
3322 }
3323 }
3324 #endif
3325
3326 return lfs_file_flushedread(lfs, file, buffer, size);
3327 }
3328
3329
3330 #ifndef LFS_READONLY
3331 static lfs_ssize_t lfs_file_flushedwrite(lfs_t *lfs, lfs_file_t *file,
3332 const void *buffer, lfs_size_t size) {
3333 const uint8_t *data = buffer;
3334 lfs_size_t nsize = size;
3335
3336 if ((file->flags & LFS_F_INLINE) &&
3337 lfs_max(file->pos+nsize, file->ctz.size) >
3338 lfs_min(0x3fe, lfs_min(
3339 lfs->cfg->cache_size,
3340 (lfs->cfg->metadata_max ?
3341 lfs->cfg->metadata_max : lfs->cfg->block_size) / 8))) {
3342 // inline file doesn't fit anymore
3343 int err = lfs_file_outline(lfs, file);
3344 if (err) {
3345 file->flags |= LFS_F_ERRED;
3346 return err;
3347 }
3348 }
3349
3350 while (nsize > 0) {
3351 // check if we need a new block
3352 if (!(file->flags & LFS_F_WRITING) ||
3353 file->off == lfs->cfg->block_size) {
3354 if (!(file->flags & LFS_F_INLINE)) {
3355 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
3356 // find out which block we're extending from
3357 int err = lfs_ctz_find(lfs, NULL, &file->cache,
3358 file->ctz.head, file->ctz.size,
3359 file->pos-1, &file->block, &file->off);
3360 if (err) {
3361 file->flags |= LFS_F_ERRED;
3362 return err;
3363 }
3364
3365 // mark cache as dirty since we may have read data into it
3366 lfs_cache_zero(lfs, &file->cache);
3367 }
3368
3369 // extend file with new blocks
3370 lfs_alloc_ack(lfs);
3371 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
3372 file->block, file->pos,
3373 &file->block, &file->off);
3374 if (err) {
3375 file->flags |= LFS_F_ERRED;
3376 return err;
3377 }
3378 } else {
3379 file->block = LFS_BLOCK_INLINE;
3380 file->off = file->pos;
3381 }
3382
3383 file->flags |= LFS_F_WRITING;
3384 }
3385
3386 // program as much as we can in current block
3387 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
3388 while (true) {
3389 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
3390 file->block, file->off, data, diff);
3391 if (err) {
3392 if (err == LFS_ERR_CORRUPT) {
3393 goto relocate;
3394 }
3395 file->flags |= LFS_F_ERRED;
3396 return err;
3397 }
3398
3399 break;
3400 relocate:
3401 err = lfs_file_relocate(lfs, file);
3402 if (err) {
3403 file->flags |= LFS_F_ERRED;
3404 return err;
3405 }
3406 }
3407
3408 file->pos += diff;
3409 file->off += diff;
3410 data += diff;
3411 nsize -= diff;
3412
3413 lfs_alloc_ack(lfs);
3414 }
3415
3416 return size;
3417 }
3418
3419 static lfs_ssize_t lfs_file_rawwrite(lfs_t *lfs, lfs_file_t *file,
3420 const void *buffer, lfs_size_t size) {
3421 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3422
3423 if (file->flags & LFS_F_READING) {
3424 // drop any reads
3425 int err = lfs_file_flush(lfs, file);
3426 if (err) {
3427 return err;
3428 }
3429 }
3430
3431 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
3432 file->pos = file->ctz.size;
3433 }
3434
3435 if (file->pos + size > lfs->file_max) {
3436 // Larger than file limit?
3437 return LFS_ERR_FBIG;
3438 }
3439
3440 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
3441 // fill with zeros
3442 lfs_off_t pos = file->pos;
3443 file->pos = file->ctz.size;
3444
3445 while (file->pos < pos) {
3446 lfs_ssize_t res = lfs_file_flushedwrite(lfs, file, &(uint8_t){0}, 1);
3447 if (res < 0) {
3448 return res;
3449 }
3450 }
3451 }
3452
3453 lfs_ssize_t nsize = lfs_file_flushedwrite(lfs, file, buffer, size);
3454 if (nsize < 0) {
3455 return nsize;
3456 }
3457
3458 file->flags &= ~LFS_F_ERRED;
3459 return nsize;
3460 }
3461 #endif
3462
3463 static lfs_soff_t lfs_file_rawseek(lfs_t *lfs, lfs_file_t *file,
3464 lfs_soff_t off, int whence) {
3465 // find new pos
3466 lfs_off_t npos = file->pos;
3467 if (whence == LFS_SEEK_SET) {
3468 npos = off;
3469 } else if (whence == LFS_SEEK_CUR) {
3470 if ((lfs_soff_t)file->pos + off < 0) {
3471 return LFS_ERR_INVAL;
3472 } else {
3473 npos = file->pos + off;
3474 }
3475 } else if (whence == LFS_SEEK_END) {
3476 lfs_soff_t res = lfs_file_rawsize(lfs, file) + off;
3477 if (res < 0) {
3478 return LFS_ERR_INVAL;
3479 } else {
3480 npos = res;
3481 }
3482 }
3483
3484 if (npos > lfs->file_max) {
3485 // file position out of range
3486 return LFS_ERR_INVAL;
3487 }
3488
3489 if (file->pos == npos) {
3490 // noop - position has not changed
3491 return npos;
3492 }
3493
3494 // if we're only reading and our new offset is still in the file's cache
3495 // we can avoid flushing and needing to reread the data
3496 if (
3497 #ifndef LFS_READONLY
3498 !(file->flags & LFS_F_WRITING)
3499 #else
3500 true
3501 #endif
3502 ) {
3503 int oindex = lfs_ctz_index(lfs, &(lfs_off_t){file->pos});
3504 lfs_off_t noff = npos;
3505 int nindex = lfs_ctz_index(lfs, &noff);
3506 if (oindex == nindex
3507 && noff >= file->cache.off
3508 && noff < file->cache.off + file->cache.size) {
3509 file->pos = npos;
3510 file->off = noff;
3511 return npos;
3512 }
3513 }
3514
3515 // write out everything beforehand, may be noop if rdonly
3516 int err = lfs_file_flush(lfs, file);
3517 if (err) {
3518 return err;
3519 }
3520
3521 // update pos
3522 file->pos = npos;
3523 return npos;
3524 }
3525
3526 #ifndef LFS_READONLY
3527 static int lfs_file_rawtruncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3528 LFS_ASSERT((file->flags & LFS_O_WRONLY) == LFS_O_WRONLY);
3529
3530 if (size > LFS_FILE_MAX) {
3531 return LFS_ERR_INVAL;
3532 }
3533
3534 lfs_off_t pos = file->pos;
3535 lfs_off_t oldsize = lfs_file_rawsize(lfs, file);
3536 if (size < oldsize) {
3537 // need to flush since directly changing metadata
3538 int err = lfs_file_flush(lfs, file);
3539 if (err) {
3540 return err;
3541 }
3542
3543 // lookup new head in ctz skip list
3544 err = lfs_ctz_find(lfs, NULL, &file->cache,
3545 file->ctz.head, file->ctz.size,
3546 size, &file->block, &file->off);
3547 if (err) {
3548 return err;
3549 }
3550
3551 // need to set pos/block/off consistently so seeking back to
3552 // the old position does not get confused
3553 file->pos = size;
3554 file->ctz.head = file->block;
3555 file->ctz.size = size;
3556 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3557 } else if (size > oldsize) {
3558 // flush+seek if not already at end
3559 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_END);
3560 if (res < 0) {
3561 return (int)res;
3562 }
3563
3564 // fill with zeros
3565 while (file->pos < size) {
3566 res = lfs_file_rawwrite(lfs, file, &(uint8_t){0}, 1);
3567 if (res < 0) {
3568 return (int)res;
3569 }
3570 }
3571 }
3572
3573 // restore pos
3574 lfs_soff_t res = lfs_file_rawseek(lfs, file, pos, LFS_SEEK_SET);
3575 if (res < 0) {
3576 return (int)res;
3577 }
3578
3579 return 0;
3580 }
3581 #endif
3582
3583 static lfs_soff_t lfs_file_rawtell(lfs_t *lfs, lfs_file_t *file) {
3584 (void)lfs;
3585 return file->pos;
3586 }
3587
3588 static int lfs_file_rawrewind(lfs_t *lfs, lfs_file_t *file) {
3589 lfs_soff_t res = lfs_file_rawseek(lfs, file, 0, LFS_SEEK_SET);
3590 if (res < 0) {
3591 return (int)res;
3592 }
3593
3594 return 0;
3595 }
3596
3597 static lfs_soff_t lfs_file_rawsize(lfs_t *lfs, lfs_file_t *file) {
3598 (void)lfs;
3599
3600 #ifndef LFS_READONLY
3601 if (file->flags & LFS_F_WRITING) {
3602 return lfs_max(file->pos, file->ctz.size);
3603 }
3604 #endif
3605
3606 return file->ctz.size;
3607 }
3608
3609
3610 /// General fs operations ///
3611 static int lfs_rawstat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3612 lfs_mdir_t cwd;
3613 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3614 if (tag < 0) {
3615 return (int)tag;
3616 }
3617
3618 return lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3619 }
3620
3621 #ifndef LFS_READONLY
3622 static int lfs_rawremove(lfs_t *lfs, const char *path) {
3623 // deorphan if we haven't yet, needed at most once after poweron
3624 int err = lfs_fs_forceconsistency(lfs);
3625 if (err) {
3626 return err;
3627 }
3628
3629 lfs_mdir_t cwd;
3630 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3631 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3632 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3633 }
3634
3635 struct lfs_mlist dir;
3636 dir.next = lfs->mlist;
3637 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3638 // must be empty before removal
3639 lfs_block_t pair[2];
3640 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3641 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3642 if (res < 0) {
3643 return (int)res;
3644 }
3645 lfs_pair_fromle32(pair);
3646
3647 err = lfs_dir_fetch(lfs, &dir.m, pair);
3648 if (err) {
3649 return err;
3650 }
3651
3652 if (dir.m.count > 0 || dir.m.split) {
3653 return LFS_ERR_NOTEMPTY;
3654 }
3655
3656 // mark fs as orphaned
3657 err = lfs_fs_preporphans(lfs, +1);
3658 if (err) {
3659 return err;
3660 }
3661
3662 // I know it's crazy but yes, dir can be changed by our parent's
3663 // commit (if predecessor is child)
3664 dir.type = 0;
3665 dir.id = 0;
3666 lfs->mlist = &dir;
3667 }
3668
3669 // delete the entry
3670 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3671 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0), NULL}));
3672 if (err) {
3673 lfs->mlist = dir.next;
3674 return err;
3675 }
3676
3677 lfs->mlist = dir.next;
3678 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3679 // fix orphan
3680 err = lfs_fs_preporphans(lfs, -1);
3681 if (err) {
3682 return err;
3683 }
3684
3685 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3686 if (err) {
3687 return err;
3688 }
3689
3690 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3691 if (err) {
3692 return err;
3693 }
3694 }
3695
3696 return 0;
3697 }
3698 #endif
3699
3700 #ifndef LFS_READONLY
3701 static int lfs_rawrename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3702 // deorphan if we haven't yet, needed at most once after poweron
3703 int err = lfs_fs_forceconsistency(lfs);
3704 if (err) {
3705 return err;
3706 }
3707
3708 // find old entry
3709 lfs_mdir_t oldcwd;
3710 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3711 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3712 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3713 }
3714
3715 // find new entry
3716 lfs_mdir_t newcwd;
3717 uint16_t newid;
3718 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3719 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3720 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3721 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3722 }
3723
3724 // if we're in the same pair there's a few special cases...
3725 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3726 uint16_t newoldid = lfs_tag_id(oldtag);
3727
3728 struct lfs_mlist prevdir;
3729 prevdir.next = lfs->mlist;
3730 if (prevtag == LFS_ERR_NOENT) {
3731 // check that name fits
3732 lfs_size_t nlen = strlen(newpath);
3733 if (nlen > lfs->name_max) {
3734 return LFS_ERR_NAMETOOLONG;
3735 }
3736
3737 // there is a small chance we are being renamed in the same
3738 // directory/ to an id less than our old id, the global update
3739 // to handle this is a bit messy
3740 if (samepair && newid <= newoldid) {
3741 newoldid += 1;
3742 }
3743 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3744 return LFS_ERR_ISDIR;
3745 } else if (samepair && newid == newoldid) {
3746 // we're renaming to ourselves??
3747 return 0;
3748 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3749 // must be empty before removal
3750 lfs_block_t prevpair[2];
3751 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3752 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3753 if (res < 0) {
3754 return (int)res;
3755 }
3756 lfs_pair_fromle32(prevpair);
3757
3758 // must be empty before removal
3759 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3760 if (err) {
3761 return err;
3762 }
3763
3764 if (prevdir.m.count > 0 || prevdir.m.split) {
3765 return LFS_ERR_NOTEMPTY;
3766 }
3767
3768 // mark fs as orphaned
3769 err = lfs_fs_preporphans(lfs, +1);
3770 if (err) {
3771 return err;
3772 }
3773
3774 // I know it's crazy but yes, dir can be changed by our parent's
3775 // commit (if predecessor is child)
3776 prevdir.type = 0;
3777 prevdir.id = 0;
3778 lfs->mlist = &prevdir;
3779 }
3780
3781 if (!samepair) {
3782 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3783 }
3784
3785 // move over all attributes
3786 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3787 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3788 LFS_TYPE_DELETE, newid, 0), NULL},
3789 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0), NULL},
3790 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3791 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3792 {LFS_MKTAG_IF(samepair,
3793 LFS_TYPE_DELETE, newoldid, 0), NULL}));
3794 if (err) {
3795 lfs->mlist = prevdir.next;
3796 return err;
3797 }
3798
3799 // let commit clean up after move (if we're different! otherwise move
3800 // logic already fixed it for us)
3801 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
3802 // prep gstate and delete move id
3803 lfs_fs_prepmove(lfs, 0x3ff, NULL);
3804 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
3805 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0), NULL}));
3806 if (err) {
3807 lfs->mlist = prevdir.next;
3808 return err;
3809 }
3810 }
3811
3812 lfs->mlist = prevdir.next;
3813 if (prevtag != LFS_ERR_NOENT
3814 && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3815 // fix orphan
3816 err = lfs_fs_preporphans(lfs, -1);
3817 if (err) {
3818 return err;
3819 }
3820
3821 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
3822 if (err) {
3823 return err;
3824 }
3825
3826 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
3827 if (err) {
3828 return err;
3829 }
3830 }
3831
3832 return 0;
3833 }
3834 #endif
3835
3836 static lfs_ssize_t lfs_rawgetattr(lfs_t *lfs, const char *path,
3837 uint8_t type, void *buffer, lfs_size_t size) {
3838 lfs_mdir_t cwd;
3839 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3840 if (tag < 0) {
3841 return tag;
3842 }
3843
3844 uint16_t id = lfs_tag_id(tag);
3845 if (id == 0x3ff) {
3846 // special case for root
3847 id = 0;
3848 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3849 if (err) {
3850 return err;
3851 }
3852 }
3853
3854 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
3855 LFS_MKTAG(LFS_TYPE_USERATTR + type,
3856 id, lfs_min(size, lfs->attr_max)),
3857 buffer);
3858 if (tag < 0) {
3859 if (tag == LFS_ERR_NOENT) {
3860 return LFS_ERR_NOATTR;
3861 }
3862
3863 return tag;
3864 }
3865
3866 return lfs_tag_size(tag);
3867 }
3868
3869 #ifndef LFS_READONLY
3870 static int lfs_commitattr(lfs_t *lfs, const char *path,
3871 uint8_t type, const void *buffer, lfs_size_t size) {
3872 lfs_mdir_t cwd;
3873 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3874 if (tag < 0) {
3875 return tag;
3876 }
3877
3878 uint16_t id = lfs_tag_id(tag);
3879 if (id == 0x3ff) {
3880 // special case for root
3881 id = 0;
3882 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3883 if (err) {
3884 return err;
3885 }
3886 }
3887
3888 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3889 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
3890 }
3891 #endif
3892
3893 #ifndef LFS_READONLY
3894 static int lfs_rawsetattr(lfs_t *lfs, const char *path,
3895 uint8_t type, const void *buffer, lfs_size_t size) {
3896 if (size > lfs->attr_max) {
3897 return LFS_ERR_NOSPC;
3898 }
3899
3900 return lfs_commitattr(lfs, path, type, buffer, size);
3901 }
3902 #endif
3903
3904 #ifndef LFS_READONLY
3905 static int lfs_rawremoveattr(lfs_t *lfs, const char *path, uint8_t type) {
3906 return lfs_commitattr(lfs, path, type, NULL, 0x3ff);
3907 }
3908 #endif
3909
3910
3911 /// Filesystem operations ///
3912 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
3913 lfs->cfg = cfg;
3914 int err = 0;
3915
3916 // validate that the lfs-cfg sizes were initiated properly before
3917 // performing any arithmetic logics with them
3918 LFS_ASSERT(lfs->cfg->read_size != 0);
3919 LFS_ASSERT(lfs->cfg->prog_size != 0);
3920 LFS_ASSERT(lfs->cfg->cache_size != 0);
3921
3922 // check that block size is a multiple of cache size is a multiple
3923 // of prog and read sizes
3924 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
3925 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
3926 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
3927
3928 // check that the block size is large enough to fit ctz pointers
3929 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
3930 <= lfs->cfg->block_size);
3931
3932 // block_cycles = 0 is no longer supported.
3933 //
3934 // block_cycles is the number of erase cycles before littlefs evicts
3935 // metadata logs as a part of wear leveling. Suggested values are in the
3936 // range of 100-1000, or set block_cycles to -1 to disable block-level
3937 // wear-leveling.
3938 LFS_ASSERT(lfs->cfg->block_cycles != 0);
3939
3940
3941 // setup read cache
3942 if (lfs->cfg->read_buffer) {
3943 lfs->rcache.buffer = lfs->cfg->read_buffer;
3944 } else {
3945 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3946 if (!lfs->rcache.buffer) {
3947 err = LFS_ERR_NOMEM;
3948 goto cleanup;
3949 }
3950 }
3951
3952 // setup program cache
3953 if (lfs->cfg->prog_buffer) {
3954 lfs->pcache.buffer = lfs->cfg->prog_buffer;
3955 } else {
3956 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3957 if (!lfs->pcache.buffer) {
3958 err = LFS_ERR_NOMEM;
3959 goto cleanup;
3960 }
3961 }
3962
3963 // zero to avoid information leaks
3964 lfs_cache_zero(lfs, &lfs->rcache);
3965 lfs_cache_zero(lfs, &lfs->pcache);
3966
3967 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
3968 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
3969 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
3970 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
3971 if (lfs->cfg->lookahead_buffer) {
3972 lfs->free.buffer = lfs->cfg->lookahead_buffer;
3973 } else {
3974 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
3975 if (!lfs->free.buffer) {
3976 err = LFS_ERR_NOMEM;
3977 goto cleanup;
3978 }
3979 }
3980
3981 // check that the size limits are sane
3982 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
3983 lfs->name_max = lfs->cfg->name_max;
3984 if (!lfs->name_max) {
3985 lfs->name_max = LFS_NAME_MAX;
3986 }
3987
3988 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
3989 lfs->file_max = lfs->cfg->file_max;
3990 if (!lfs->file_max) {
3991 lfs->file_max = LFS_FILE_MAX;
3992 }
3993
3994 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
3995 lfs->attr_max = lfs->cfg->attr_max;
3996 if (!lfs->attr_max) {
3997 lfs->attr_max = LFS_ATTR_MAX;
3998 }
3999
4000 LFS_ASSERT(lfs->cfg->metadata_max <= lfs->cfg->block_size);
4001
4002 // setup default state
4003 lfs->root[0] = LFS_BLOCK_NULL;
4004 lfs->root[1] = LFS_BLOCK_NULL;
4005 lfs->mlist = NULL;
4006 lfs->seed = 0;
4007 lfs->gdisk = (lfs_gstate_t){0};
4008 lfs->gstate = (lfs_gstate_t){0};
4009 lfs->gdelta = (lfs_gstate_t){0};
4010 #ifdef LFS_MIGRATE
4011 lfs->lfs1 = NULL;
4012 #endif
4013
4014 return 0;
4015
4016 cleanup:
4017 lfs_deinit(lfs);
4018 return err;
4019 }
4020
4021 static int lfs_deinit(lfs_t *lfs) {
4022 // free allocated memory
4023 if (!lfs->cfg->read_buffer) {
4024 lfs_free(lfs->rcache.buffer);
4025 }
4026
4027 if (!lfs->cfg->prog_buffer) {
4028 lfs_free(lfs->pcache.buffer);
4029 }
4030
4031 if (!lfs->cfg->lookahead_buffer) {
4032 lfs_free(lfs->free.buffer);
4033 }
4034
4035 return 0;
4036 }
4037
4038 #ifndef LFS_READONLY
4039 static int lfs_rawformat(lfs_t *lfs, const struct lfs_config *cfg) {
4040 int err = 0;
4041 {
4042 err = lfs_init(lfs, cfg);
4043 if (err) {
4044 return err;
4045 }
4046
4047 // create free lookahead
4048 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
4049 lfs->free.off = 0;
4050 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
4051 lfs->cfg->block_count);
4052 lfs->free.i = 0;
4053 lfs_alloc_ack(lfs);
4054
4055 // create root dir
4056 lfs_mdir_t root;
4057 err = lfs_dir_alloc(lfs, &root);
4058 if (err) {
4059 goto cleanup;
4060 }
4061
4062 // write one superblock
4063 lfs_superblock_t superblock = {
4064 .version = LFS_DISK_VERSION,
4065 .block_size = lfs->cfg->block_size,
4066 .block_count = lfs->cfg->block_count,
4067 .name_max = lfs->name_max,
4068 .file_max = lfs->file_max,
4069 .attr_max = lfs->attr_max,
4070 };
4071
4072 lfs_superblock_tole32(&superblock);
4073 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
4074 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
4075 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4076 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4077 &superblock}));
4078 if (err) {
4079 goto cleanup;
4080 }
4081
4082 // force compaction to prevent accidentally mounting any
4083 // older version of littlefs that may live on disk
4084 root.erased = false;
4085 err = lfs_dir_commit(lfs, &root, NULL, 0);
4086 if (err) {
4087 goto cleanup;
4088 }
4089
4090 // sanity check that fetch works
4091 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
4092 if (err) {
4093 goto cleanup;
4094 }
4095 }
4096
4097 cleanup:
4098 lfs_deinit(lfs);
4099 return err;
4100
4101 }
4102 #endif
4103
4104 static int lfs_rawmount(lfs_t *lfs, const struct lfs_config *cfg) {
4105 int err = lfs_init(lfs, cfg);
4106 if (err) {
4107 return err;
4108 }
4109
4110 // scan directory blocks for superblock and any global updates
4111 lfs_mdir_t dir = {.tail = {0, 1}};
4112 lfs_block_t cycle = 0;
4113 while (!lfs_pair_isnull(dir.tail)) {
4114 if (cycle >= lfs->cfg->block_count/2) {
4115 // loop detected
4116 err = LFS_ERR_CORRUPT;
4117 goto cleanup;
4118 }
4119 cycle += 1;
4120
4121 // fetch next block in tail list
4122 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
4123 LFS_MKTAG(0x7ff, 0x3ff, 0),
4124 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
4125 NULL,
4126 lfs_dir_find_match, &(struct lfs_dir_find_match){
4127 lfs, "littlefs", 8});
4128 if (tag < 0) {
4129 err = tag;
4130 goto cleanup;
4131 }
4132
4133 // has superblock?
4134 if (tag && !lfs_tag_isdelete(tag)) {
4135 // update root
4136 lfs->root[0] = dir.pair[0];
4137 lfs->root[1] = dir.pair[1];
4138
4139 // grab superblock
4140 lfs_superblock_t superblock;
4141 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
4142 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4143 &superblock);
4144 if (tag < 0) {
4145 err = tag;
4146 goto cleanup;
4147 }
4148 lfs_superblock_fromle32(&superblock);
4149
4150 // check version
4151 uint16_t major_version = (0xffff & (superblock.version >> 16));
4152 uint16_t minor_version = (0xffff & (superblock.version >> 0));
4153 if ((major_version != LFS_DISK_VERSION_MAJOR ||
4154 minor_version > LFS_DISK_VERSION_MINOR)) {
4155 LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
4156 major_version, minor_version);
4157 err = LFS_ERR_INVAL;
4158 goto cleanup;
4159 }
4160
4161 // check superblock configuration
4162 if (superblock.name_max) {
4163 if (superblock.name_max > lfs->name_max) {
4164 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
4165 superblock.name_max, lfs->name_max);
4166 err = LFS_ERR_INVAL;
4167 goto cleanup;
4168 }
4169
4170 lfs->name_max = superblock.name_max;
4171 }
4172
4173 if (superblock.file_max) {
4174 if (superblock.file_max > lfs->file_max) {
4175 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
4176 superblock.file_max, lfs->file_max);
4177 err = LFS_ERR_INVAL;
4178 goto cleanup;
4179 }
4180
4181 lfs->file_max = superblock.file_max;
4182 }
4183
4184 if (superblock.attr_max) {
4185 if (superblock.attr_max > lfs->attr_max) {
4186 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
4187 superblock.attr_max, lfs->attr_max);
4188 err = LFS_ERR_INVAL;
4189 goto cleanup;
4190 }
4191
4192 lfs->attr_max = superblock.attr_max;
4193 }
4194
4195 if (superblock.block_count != lfs->cfg->block_count) {
4196 LFS_ERROR("Invalid block count (%"PRIu32" != %"PRIu32")",
4197 superblock.block_count, lfs->cfg->block_count);
4198 err = LFS_ERR_INVAL;
4199 goto cleanup;
4200 }
4201
4202 if (superblock.block_size != lfs->cfg->block_size) {
4203 LFS_ERROR("Invalid block size (%"PRIu32" != %"PRIu32")",
4204 superblock.block_count, lfs->cfg->block_count);
4205 err = LFS_ERR_INVAL;
4206 goto cleanup;
4207 }
4208 }
4209
4210 // has gstate?
4211 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
4212 if (err) {
4213 goto cleanup;
4214 }
4215 }
4216
4217 // found superblock?
4218 if (lfs_pair_isnull(lfs->root)) {
4219 err = LFS_ERR_INVAL;
4220 goto cleanup;
4221 }
4222
4223 // update littlefs with gstate
4224 if (!lfs_gstate_iszero(&lfs->gstate)) {
4225 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
4226 lfs->gstate.tag,
4227 lfs->gstate.pair[0],
4228 lfs->gstate.pair[1]);
4229 }
4230 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
4231 lfs->gdisk = lfs->gstate;
4232
4233 // setup free lookahead, to distribute allocations uniformly across
4234 // boots, we start the allocator at a random location
4235 lfs->free.off = lfs->seed % lfs->cfg->block_count;
4236 lfs_alloc_drop(lfs);
4237
4238 return 0;
4239
4240 cleanup:
4241 lfs_rawunmount(lfs);
4242 return err;
4243 }
4244
4245 static int lfs_rawunmount(lfs_t *lfs) {
4246 return lfs_deinit(lfs);
4247 }
4248
4249
4250 /// Filesystem filesystem operations ///
4251 int lfs_fs_rawtraverse(lfs_t *lfs,
4252 int (*cb)(void *data, lfs_block_t block), void *data,
4253 bool includeorphans) {
4254 // iterate over metadata pairs
4255 lfs_mdir_t dir = {.tail = {0, 1}};
4256
4257 #ifdef LFS_MIGRATE
4258 // also consider v1 blocks during migration
4259 if (lfs->lfs1) {
4260 int err = lfs1_traverse(lfs, cb, data);
4261 if (err) {
4262 return err;
4263 }
4264
4265 dir.tail[0] = lfs->root[0];
4266 dir.tail[1] = lfs->root[1];
4267 }
4268 #endif
4269
4270 lfs_block_t cycle = 0;
4271 while (!lfs_pair_isnull(dir.tail)) {
4272 if (cycle >= lfs->cfg->block_count/2) {
4273 // loop detected
4274 return LFS_ERR_CORRUPT;
4275 }
4276 cycle += 1;
4277
4278 for (int i = 0; i < 2; i++) {
4279 int err = cb(data, dir.tail[i]);
4280 if (err) {
4281 return err;
4282 }
4283 }
4284
4285 // iterate through ids in directory
4286 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
4287 if (err) {
4288 return err;
4289 }
4290
4291 for (uint16_t id = 0; id < dir.count; id++) {
4292 struct lfs_ctz ctz;
4293 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
4294 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
4295 if (tag < 0) {
4296 if (tag == LFS_ERR_NOENT) {
4297 continue;
4298 }
4299 return tag;
4300 }
4301 lfs_ctz_fromle32(&ctz);
4302
4303 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
4304 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4305 ctz.head, ctz.size, cb, data);
4306 if (err) {
4307 return err;
4308 }
4309 } else if (includeorphans &&
4310 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
4311 for (int i = 0; i < 2; i++) {
4312 err = cb(data, (&ctz.head)[i]);
4313 if (err) {
4314 return err;
4315 }
4316 }
4317 }
4318 }
4319 }
4320
4321 #ifndef LFS_READONLY
4322 // iterate over any open files
4323 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
4324 if (f->type != LFS_TYPE_REG) {
4325 continue;
4326 }
4327
4328 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
4329 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4330 f->ctz.head, f->ctz.size, cb, data);
4331 if (err) {
4332 return err;
4333 }
4334 }
4335
4336 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
4337 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
4338 f->block, f->pos, cb, data);
4339 if (err) {
4340 return err;
4341 }
4342 }
4343 }
4344 #endif
4345
4346 return 0;
4347 }
4348
4349 #ifndef LFS_READONLY
4350 static int lfs_fs_pred(lfs_t *lfs,
4351 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
4352 // iterate over all directory directory entries
4353 pdir->tail[0] = 0;
4354 pdir->tail[1] = 1;
4355 lfs_block_t cycle = 0;
4356 while (!lfs_pair_isnull(pdir->tail)) {
4357 if (cycle >= lfs->cfg->block_count/2) {
4358 // loop detected
4359 return LFS_ERR_CORRUPT;
4360 }
4361 cycle += 1;
4362
4363 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
4364 return 0;
4365 }
4366
4367 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
4368 if (err) {
4369 return err;
4370 }
4371 }
4372
4373 return LFS_ERR_NOENT;
4374 }
4375 #endif
4376
4377 #ifndef LFS_READONLY
4378 struct lfs_fs_parent_match {
4379 lfs_t *lfs;
4380 const lfs_block_t pair[2];
4381 };
4382 #endif
4383
4384 #ifndef LFS_READONLY
4385 static int lfs_fs_parent_match(void *data,
4386 lfs_tag_t tag, const void *buffer) {
4387 struct lfs_fs_parent_match *find = data;
4388 lfs_t *lfs = find->lfs;
4389 const struct lfs_diskoff *disk = buffer;
4390 (void)tag;
4391
4392 lfs_block_t child[2];
4393 int err = lfs_bd_read(lfs,
4394 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
4395 disk->block, disk->off, &child, sizeof(child));
4396 if (err) {
4397 return err;
4398 }
4399
4400 lfs_pair_fromle32(child);
4401 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
4402 }
4403 #endif
4404
4405 #ifndef LFS_READONLY
4406 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
4407 lfs_mdir_t *parent) {
4408 // use fetchmatch with callback to find pairs
4409 parent->tail[0] = 0;
4410 parent->tail[1] = 1;
4411 lfs_block_t cycle = 0;
4412 while (!lfs_pair_isnull(parent->tail)) {
4413 if (cycle >= lfs->cfg->block_count/2) {
4414 // loop detected
4415 return LFS_ERR_CORRUPT;
4416 }
4417 cycle += 1;
4418
4419 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
4420 LFS_MKTAG(0x7ff, 0, 0x3ff),
4421 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
4422 NULL,
4423 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
4424 lfs, {pair[0], pair[1]}});
4425 if (tag && tag != LFS_ERR_NOENT) {
4426 return tag;
4427 }
4428 }
4429
4430 return LFS_ERR_NOENT;
4431 }
4432 #endif
4433
4434 #ifndef LFS_READONLY
4435 static int lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4436 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
4437 lfs->gstate.tag += orphans;
4438 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4439 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4440
4441 return 0;
4442 }
4443 #endif
4444
4445 #ifndef LFS_READONLY
4446 static void lfs_fs_prepmove(lfs_t *lfs,
4447 uint16_t id, const lfs_block_t pair[2]) {
4448 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4449 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4450 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4451 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4452 }
4453 #endif
4454
4455 #ifndef LFS_READONLY
4456 static int lfs_fs_demove(lfs_t *lfs) {
4457 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4458 return 0;
4459 }
4460
4461 // Fix bad moves
4462 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4463 lfs->gdisk.pair[0],
4464 lfs->gdisk.pair[1],
4465 lfs_tag_id(lfs->gdisk.tag));
4466
4467 // fetch and delete the moved entry
4468 lfs_mdir_t movedir;
4469 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4470 if (err) {
4471 return err;
4472 }
4473
4474 // prep gstate and delete move id
4475 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4476 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4477 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4478 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0), NULL}));
4479 if (err) {
4480 return err;
4481 }
4482
4483 return 0;
4484 }
4485 #endif
4486
4487 #ifndef LFS_READONLY
4488 static int lfs_fs_deorphan(lfs_t *lfs, bool powerloss) {
4489 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4490 return 0;
4491 }
4492
4493 int8_t found = 0;
4494 restart:
4495 {
4496 // Fix any orphans
4497 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4498 lfs_mdir_t dir;
4499
4500 // iterate over all directory directory entries
4501 while (!lfs_pair_isnull(pdir.tail)) {
4502 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4503 if (err) {
4504 return err;
4505 }
4506
4507 // check head blocks for orphans
4508 if (!pdir.split) {
4509 // check if we have a parent
4510 lfs_mdir_t parent;
4511 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4512 if (tag < 0 && tag != LFS_ERR_NOENT) {
4513 return tag;
4514 }
4515
4516 // note we only check for full orphans if we may have had a
4517 // power-loss, otherwise orphans are created intentionally
4518 // during operations such as lfs_mkdir
4519 if (tag == LFS_ERR_NOENT && powerloss) {
4520 // we are an orphan
4521 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4522 pdir.tail[0], pdir.tail[1]);
4523
4524 // steal state
4525 err = lfs_dir_getgstate(lfs, &dir, &lfs->gdelta);
4526 if (err) {
4527 return err;
4528 }
4529
4530 // steal tail
4531 lfs_pair_tole32(dir.tail);
4532 int state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4533 {LFS_MKTAG(LFS_TYPE_TAIL + dir.split, 0x3ff, 8),
4534 dir.tail}));
4535 lfs_pair_fromle32(dir.tail);
4536 if (state < 0) {
4537 return state;
4538 }
4539
4540 found += 1;
4541
4542 // did our commit create more orphans?
4543 if (state == LFS_OK_ORPHANED) {
4544 goto restart;
4545 }
4546
4547 // refetch tail
4548 continue;
4549 }
4550
4551 if (tag != LFS_ERR_NOENT) {
4552 lfs_block_t pair[2];
4553 lfs_stag_t state = lfs_dir_get(lfs, &parent,
4554 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4555 if (state < 0) {
4556 return state;
4557 }
4558 lfs_pair_fromle32(pair);
4559
4560 if (!lfs_pair_sync(pair, pdir.tail)) {
4561 // we have desynced
4562 LFS_DEBUG("Fixing half-orphan "
4563 "{0x%"PRIx32", 0x%"PRIx32"} "
4564 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4565 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4566
4567 // fix pending move in this pair? this looks like an
4568 // optimization but is in fact _required_ since
4569 // relocating may outdate the move.
4570 uint16_t moveid = 0x3ff;
4571 if (lfs_gstate_hasmovehere(&lfs->gstate, pdir.pair)) {
4572 moveid = lfs_tag_id(lfs->gstate.tag);
4573 LFS_DEBUG("Fixing move while fixing orphans "
4574 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4575 pdir.pair[0], pdir.pair[1], moveid);
4576 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4577 }
4578
4579 lfs_pair_tole32(pair);
4580 state = lfs_dir_orphaningcommit(lfs, &pdir, LFS_MKATTRS(
4581 {LFS_MKTAG_IF(moveid != 0x3ff,
4582 LFS_TYPE_DELETE, moveid, 0), NULL},
4583 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8),
4584 pair}));
4585 lfs_pair_fromle32(pair);
4586 if (state < 0) {
4587 return state;
4588 }
4589
4590 found += 1;
4591
4592 // did our commit create more orphans?
4593 if (state == LFS_OK_ORPHANED) {
4594 goto restart;
4595 }
4596
4597 // refetch tail
4598 continue;
4599 }
4600 }
4601 }
4602
4603 pdir = dir;
4604 }
4605 }
4606
4607 // mark orphans as fixed
4608 return lfs_fs_preporphans(lfs, -lfs_min(
4609 lfs_gstate_getorphans(&lfs->gstate),
4610 found));
4611 }
4612 #endif
4613
4614 #ifndef LFS_READONLY
4615 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4616 int err = lfs_fs_demove(lfs);
4617 if (err) {
4618 return err;
4619 }
4620
4621 err = lfs_fs_deorphan(lfs, true);
4622 if (err) {
4623 return err;
4624 }
4625
4626 return 0;
4627 }
4628 #endif
4629
4630 static int lfs_fs_size_count(void *p, lfs_block_t block) {
4631 (void)block;
4632 lfs_size_t *size = p;
4633 *size += 1;
4634 return 0;
4635 }
4636
4637 static lfs_ssize_t lfs_fs_rawsize(lfs_t *lfs) {
4638 lfs_size_t size = 0;
4639 int err = lfs_fs_rawtraverse(lfs, lfs_fs_size_count, &size, false);
4640 if (err) {
4641 return err;
4642 }
4643
4644 return size;
4645 }
4646
4647 #ifdef LFS_MIGRATE
4648 ////// Migration from littelfs v1 below this //////
4649
4650 /// Version info ///
4651
4652 // Software library version
4653 // Major (top-nibble), incremented on backwards incompatible changes
4654 // Minor (bottom-nibble), incremented on feature additions
4655 #define LFS1_VERSION 0x00010007
4656 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
4657 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
4658
4659 // Version of On-disk data structures
4660 // Major (top-nibble), incremented on backwards incompatible changes
4661 // Minor (bottom-nibble), incremented on feature additions
4662 #define LFS1_DISK_VERSION 0x00010001
4663 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
4664 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
4665
4666
4667 /// v1 Definitions ///
4668
4669 // File types
4670 enum lfs1_type {
4671 LFS1_TYPE_REG = 0x11,
4672 LFS1_TYPE_DIR = 0x22,
4673 LFS1_TYPE_SUPERBLOCK = 0x2e,
4674 };
4675
4676 typedef struct lfs1 {
4677 lfs_block_t root[2];
4678 } lfs1_t;
4679
4680 typedef struct lfs1_entry {
4681 lfs_off_t off;
4682
4683 struct lfs1_disk_entry {
4684 uint8_t type;
4685 uint8_t elen;
4686 uint8_t alen;
4687 uint8_t nlen;
4688 union {
4689 struct {
4690 lfs_block_t head;
4691 lfs_size_t size;
4692 } file;
4693 lfs_block_t dir[2];
4694 } u;
4695 } d;
4696 } lfs1_entry_t;
4697
4698 typedef struct lfs1_dir {
4699 struct lfs1_dir *next;
4700 lfs_block_t pair[2];
4701 lfs_off_t off;
4702
4703 lfs_block_t head[2];
4704 lfs_off_t pos;
4705
4706 struct lfs1_disk_dir {
4707 uint32_t rev;
4708 lfs_size_t size;
4709 lfs_block_t tail[2];
4710 } d;
4711 } lfs1_dir_t;
4712
4713 typedef struct lfs1_superblock {
4714 lfs_off_t off;
4715
4716 struct lfs1_disk_superblock {
4717 uint8_t type;
4718 uint8_t elen;
4719 uint8_t alen;
4720 uint8_t nlen;
4721 lfs_block_t root[2];
4722 uint32_t block_size;
4723 uint32_t block_count;
4724 uint32_t version;
4725 char magic[8];
4726 } d;
4727 } lfs1_superblock_t;
4728
4729
4730 /// Low-level wrappers v1->v2 ///
4731 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
4732 *crc = lfs_crc(*crc, buffer, size);
4733 }
4734
4735 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
4736 lfs_off_t off, void *buffer, lfs_size_t size) {
4737 // if we ever do more than writes to alternating pairs,
4738 // this may need to consider pcache
4739 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
4740 block, off, buffer, size);
4741 }
4742
4743 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
4744 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
4745 for (lfs_off_t i = 0; i < size; i++) {
4746 uint8_t c;
4747 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
4748 if (err) {
4749 return err;
4750 }
4751
4752 lfs1_crc(crc, &c, 1);
4753 }
4754
4755 return 0;
4756 }
4757
4758
4759 /// Endian swapping functions ///
4760 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
4761 d->rev = lfs_fromle32(d->rev);
4762 d->size = lfs_fromle32(d->size);
4763 d->tail[0] = lfs_fromle32(d->tail[0]);
4764 d->tail[1] = lfs_fromle32(d->tail[1]);
4765 }
4766
4767 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
4768 d->rev = lfs_tole32(d->rev);
4769 d->size = lfs_tole32(d->size);
4770 d->tail[0] = lfs_tole32(d->tail[0]);
4771 d->tail[1] = lfs_tole32(d->tail[1]);
4772 }
4773
4774 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
4775 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
4776 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
4777 }
4778
4779 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
4780 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
4781 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
4782 }
4783
4784 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
4785 d->root[0] = lfs_fromle32(d->root[0]);
4786 d->root[1] = lfs_fromle32(d->root[1]);
4787 d->block_size = lfs_fromle32(d->block_size);
4788 d->block_count = lfs_fromle32(d->block_count);
4789 d->version = lfs_fromle32(d->version);
4790 }
4791
4792
4793 ///// Metadata pair and directory operations ///
4794 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
4795 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
4796 }
4797
4798 static int lfs1_dir_fetch(lfs_t *lfs,
4799 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
4800 // copy out pair, otherwise may be aliasing dir
4801 const lfs_block_t tpair[2] = {pair[0], pair[1]};
4802 bool valid = false;
4803
4804 // check both blocks for the most recent revision
4805 for (int i = 0; i < 2; i++) {
4806 struct lfs1_disk_dir test;
4807 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
4808 lfs1_dir_fromle32(&test);
4809 if (err) {
4810 if (err == LFS_ERR_CORRUPT) {
4811 continue;
4812 }
4813 return err;
4814 }
4815
4816 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
4817 continue;
4818 }
4819
4820 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
4821 (0x7fffffff & test.size) > lfs->cfg->block_size) {
4822 continue;
4823 }
4824
4825 uint32_t crc = 0xffffffff;
4826 lfs1_dir_tole32(&test);
4827 lfs1_crc(&crc, &test, sizeof(test));
4828 lfs1_dir_fromle32(&test);
4829 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
4830 (0x7fffffff & test.size) - sizeof(test), &crc);
4831 if (err) {
4832 if (err == LFS_ERR_CORRUPT) {
4833 continue;
4834 }
4835 return err;
4836 }
4837
4838 if (crc != 0) {
4839 continue;
4840 }
4841
4842 valid = true;
4843
4844 // setup dir in case it's valid
4845 dir->pair[0] = tpair[(i+0) % 2];
4846 dir->pair[1] = tpair[(i+1) % 2];
4847 dir->off = sizeof(dir->d);
4848 dir->d = test;
4849 }
4850
4851 if (!valid) {
4852 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
4853 tpair[0], tpair[1]);
4854 return LFS_ERR_CORRUPT;
4855 }
4856
4857 return 0;
4858 }
4859
4860 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
4861 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
4862 if (!(0x80000000 & dir->d.size)) {
4863 entry->off = dir->off;
4864 return LFS_ERR_NOENT;
4865 }
4866
4867 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
4868 if (err) {
4869 return err;
4870 }
4871
4872 dir->off = sizeof(dir->d);
4873 dir->pos += sizeof(dir->d) + 4;
4874 }
4875
4876 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
4877 &entry->d, sizeof(entry->d));
4878 lfs1_entry_fromle32(&entry->d);
4879 if (err) {
4880 return err;
4881 }
4882
4883 entry->off = dir->off;
4884 dir->off += lfs1_entry_size(entry);
4885 dir->pos += lfs1_entry_size(entry);
4886 return 0;
4887 }
4888
4889 /// littlefs v1 specific operations ///
4890 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
4891 if (lfs_pair_isnull(lfs->lfs1->root)) {
4892 return 0;
4893 }
4894
4895 // iterate over metadata pairs
4896 lfs1_dir_t dir;
4897 lfs1_entry_t entry;
4898 lfs_block_t cwd[2] = {0, 1};
4899
4900 while (true) {
4901 for (int i = 0; i < 2; i++) {
4902 int err = cb(data, cwd[i]);
4903 if (err) {
4904 return err;
4905 }
4906 }
4907
4908 int err = lfs1_dir_fetch(lfs, &dir, cwd);
4909 if (err) {
4910 return err;
4911 }
4912
4913 // iterate over contents
4914 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
4915 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
4916 &entry.d, sizeof(entry.d));
4917 lfs1_entry_fromle32(&entry.d);
4918 if (err) {
4919 return err;
4920 }
4921
4922 dir.off += lfs1_entry_size(&entry);
4923 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
4924 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4925 entry.d.u.file.head, entry.d.u.file.size, cb, data);
4926 if (err) {
4927 return err;
4928 }
4929 }
4930 }
4931
4932 // we also need to check if we contain a threaded v2 directory
4933 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
4934 while (dir2.split) {
4935 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4936 if (err) {
4937 break;
4938 }
4939
4940 for (int i = 0; i < 2; i++) {
4941 err = cb(data, dir2.pair[i]);
4942 if (err) {
4943 return err;
4944 }
4945 }
4946 }
4947
4948 cwd[0] = dir.d.tail[0];
4949 cwd[1] = dir.d.tail[1];
4950
4951 if (lfs_pair_isnull(cwd)) {
4952 break;
4953 }
4954 }
4955
4956 return 0;
4957 }
4958
4959 static int lfs1_moved(lfs_t *lfs, const void *e) {
4960 if (lfs_pair_isnull(lfs->lfs1->root)) {
4961 return 0;
4962 }
4963
4964 // skip superblock
4965 lfs1_dir_t cwd;
4966 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
4967 if (err) {
4968 return err;
4969 }
4970
4971 // iterate over all directory directory entries
4972 lfs1_entry_t entry;
4973 while (!lfs_pair_isnull(cwd.d.tail)) {
4974 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
4975 if (err) {
4976 return err;
4977 }
4978
4979 while (true) {
4980 err = lfs1_dir_next(lfs, &cwd, &entry);
4981 if (err && err != LFS_ERR_NOENT) {
4982 return err;
4983 }
4984
4985 if (err == LFS_ERR_NOENT) {
4986 break;
4987 }
4988
4989 if (!(0x80 & entry.d.type) &&
4990 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
4991 return true;
4992 }
4993 }
4994 }
4995
4996 return false;
4997 }
4998
4999 /// Filesystem operations ///
5000 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
5001 const struct lfs_config *cfg) {
5002 int err = 0;
5003 {
5004 err = lfs_init(lfs, cfg);
5005 if (err) {
5006 return err;
5007 }
5008
5009 lfs->lfs1 = lfs1;
5010 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
5011 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
5012
5013 // setup free lookahead
5014 lfs->free.off = 0;
5015 lfs->free.size = 0;
5016 lfs->free.i = 0;
5017 lfs_alloc_ack(lfs);
5018
5019 // load superblock
5020 lfs1_dir_t dir;
5021 lfs1_superblock_t superblock;
5022 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
5023 if (err && err != LFS_ERR_CORRUPT) {
5024 goto cleanup;
5025 }
5026
5027 if (!err) {
5028 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
5029 &superblock.d, sizeof(superblock.d));
5030 lfs1_superblock_fromle32(&superblock.d);
5031 if (err) {
5032 goto cleanup;
5033 }
5034
5035 lfs->lfs1->root[0] = superblock.d.root[0];
5036 lfs->lfs1->root[1] = superblock.d.root[1];
5037 }
5038
5039 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
5040 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
5041 0, 1);
5042 err = LFS_ERR_CORRUPT;
5043 goto cleanup;
5044 }
5045
5046 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
5047 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
5048 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
5049 minor_version > LFS1_DISK_VERSION_MINOR)) {
5050 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
5051 err = LFS_ERR_INVAL;
5052 goto cleanup;
5053 }
5054
5055 return 0;
5056 }
5057
5058 cleanup:
5059 lfs_deinit(lfs);
5060 return err;
5061 }
5062
5063 static int lfs1_unmount(lfs_t *lfs) {
5064 return lfs_deinit(lfs);
5065 }
5066
5067 /// v1 migration ///
5068 static int lfs_rawmigrate(lfs_t *lfs, const struct lfs_config *cfg) {
5069 struct lfs1 lfs1;
5070 int err = lfs1_mount(lfs, &lfs1, cfg);
5071 if (err) {
5072 return err;
5073 }
5074
5075 {
5076 // iterate through each directory, copying over entries
5077 // into new directory
5078 lfs1_dir_t dir1;
5079 lfs_mdir_t dir2;
5080 dir1.d.tail[0] = lfs->lfs1->root[0];
5081 dir1.d.tail[1] = lfs->lfs1->root[1];
5082 while (!lfs_pair_isnull(dir1.d.tail)) {
5083 // iterate old dir
5084 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
5085 if (err) {
5086 goto cleanup;
5087 }
5088
5089 // create new dir and bind as temporary pretend root
5090 err = lfs_dir_alloc(lfs, &dir2);
5091 if (err) {
5092 goto cleanup;
5093 }
5094
5095 dir2.rev = dir1.d.rev;
5096 dir1.head[0] = dir1.pair[0];
5097 dir1.head[1] = dir1.pair[1];
5098 lfs->root[0] = dir2.pair[0];
5099 lfs->root[1] = dir2.pair[1];
5100
5101 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5102 if (err) {
5103 goto cleanup;
5104 }
5105
5106 while (true) {
5107 lfs1_entry_t entry1;
5108 err = lfs1_dir_next(lfs, &dir1, &entry1);
5109 if (err && err != LFS_ERR_NOENT) {
5110 goto cleanup;
5111 }
5112
5113 if (err == LFS_ERR_NOENT) {
5114 break;
5115 }
5116
5117 // check that entry has not been moved
5118 if (entry1.d.type & 0x80) {
5119 int moved = lfs1_moved(lfs, &entry1.d.u);
5120 if (moved < 0) {
5121 err = moved;
5122 goto cleanup;
5123 }
5124
5125 if (moved) {
5126 continue;
5127 }
5128
5129 entry1.d.type &= ~0x80;
5130 }
5131
5132 // also fetch name
5133 char name[LFS_NAME_MAX+1];
5134 memset(name, 0, sizeof(name));
5135 err = lfs1_bd_read(lfs, dir1.pair[0],
5136 entry1.off + 4+entry1.d.elen+entry1.d.alen,
5137 name, entry1.d.nlen);
5138 if (err) {
5139 goto cleanup;
5140 }
5141
5142 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
5143
5144 // create entry in new dir
5145 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5146 if (err) {
5147 goto cleanup;
5148 }
5149
5150 uint16_t id;
5151 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
5152 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
5153 err = (err < 0) ? err : LFS_ERR_EXIST;
5154 goto cleanup;
5155 }
5156
5157 lfs1_entry_tole32(&entry1.d);
5158 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5159 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
5160 {LFS_MKTAG_IF_ELSE(isdir,
5161 LFS_TYPE_DIR, id, entry1.d.nlen,
5162 LFS_TYPE_REG, id, entry1.d.nlen),
5163 name},
5164 {LFS_MKTAG_IF_ELSE(isdir,
5165 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
5166 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
5167 &entry1.d.u}));
5168 lfs1_entry_fromle32(&entry1.d);
5169 if (err) {
5170 goto cleanup;
5171 }
5172 }
5173
5174 if (!lfs_pair_isnull(dir1.d.tail)) {
5175 // find last block and update tail to thread into fs
5176 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5177 if (err) {
5178 goto cleanup;
5179 }
5180
5181 while (dir2.split) {
5182 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
5183 if (err) {
5184 goto cleanup;
5185 }
5186 }
5187
5188 lfs_pair_tole32(dir2.pair);
5189 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5190 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
5191 lfs_pair_fromle32(dir2.pair);
5192 if (err) {
5193 goto cleanup;
5194 }
5195 }
5196
5197 // Copy over first block to thread into fs. Unfortunately
5198 // if this fails there is not much we can do.
5199 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
5200 "-> {0x%"PRIx32", 0x%"PRIx32"}",
5201 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
5202
5203 err = lfs_bd_erase(lfs, dir1.head[1]);
5204 if (err) {
5205 goto cleanup;
5206 }
5207
5208 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
5209 if (err) {
5210 goto cleanup;
5211 }
5212
5213 for (lfs_off_t i = 0; i < dir2.off; i++) {
5214 uint8_t dat;
5215 err = lfs_bd_read(lfs,
5216 NULL, &lfs->rcache, dir2.off,
5217 dir2.pair[0], i, &dat, 1);
5218 if (err) {
5219 goto cleanup;
5220 }
5221
5222 err = lfs_bd_prog(lfs,
5223 &lfs->pcache, &lfs->rcache, true,
5224 dir1.head[1], i, &dat, 1);
5225 if (err) {
5226 goto cleanup;
5227 }
5228 }
5229
5230 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
5231 if (err) {
5232 goto cleanup;
5233 }
5234 }
5235
5236 // Create new superblock. This marks a successful migration!
5237 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
5238 if (err) {
5239 goto cleanup;
5240 }
5241
5242 dir2.pair[0] = dir1.pair[0];
5243 dir2.pair[1] = dir1.pair[1];
5244 dir2.rev = dir1.d.rev;
5245 dir2.off = sizeof(dir2.rev);
5246 dir2.etag = 0xffffffff;
5247 dir2.count = 0;
5248 dir2.tail[0] = lfs->lfs1->root[0];
5249 dir2.tail[1] = lfs->lfs1->root[1];
5250 dir2.erased = false;
5251 dir2.split = true;
5252
5253 lfs_superblock_t superblock = {
5254 .version = LFS_DISK_VERSION,
5255 .block_size = lfs->cfg->block_size,
5256 .block_count = lfs->cfg->block_count,
5257 .name_max = lfs->name_max,
5258 .file_max = lfs->file_max,
5259 .attr_max = lfs->attr_max,
5260 };
5261
5262 lfs_superblock_tole32(&superblock);
5263 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
5264 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0), NULL},
5265 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
5266 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
5267 &superblock}));
5268 if (err) {
5269 goto cleanup;
5270 }
5271
5272 // sanity check that fetch works
5273 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
5274 if (err) {
5275 goto cleanup;
5276 }
5277
5278 // force compaction to prevent accidentally mounting v1
5279 dir2.erased = false;
5280 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
5281 if (err) {
5282 goto cleanup;
5283 }
5284 }
5285
5286 cleanup:
5287 lfs1_unmount(lfs);
5288 return err;
5289 }
5290
5291 #endif
5292
5293
5294 /// Public API wrappers ///
5295
5296 // Here we can add tracing/thread safety easily
5297
5298 // Thread-safe wrappers if enabled
5299 #ifdef LFS_THREADSAFE
5300 #define LFS_LOCK(cfg) cfg->lock(cfg)
5301 #define LFS_UNLOCK(cfg) cfg->unlock(cfg)
5302 #else
5303 #define LFS_LOCK(cfg) ((void)cfg, 0)
5304 #define LFS_UNLOCK(cfg) ((void)cfg)
5305 #endif
5306
5307 // Public API
5308 #ifndef LFS_READONLY
5309 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
5310 int err = LFS_LOCK(cfg);
5311 if (err) {
5312 return err;
5313 }
5314 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
5315 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5316 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5317 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5318 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5319 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5320 ".prog_buffer=%p, .lookahead_buffer=%p, "
5321 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5322 ".attr_max=%"PRIu32"})",
5323 (void*)lfs, (void*)cfg, cfg->context,
5324 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5325 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5326 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5327 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5328 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5329 cfg->name_max, cfg->file_max, cfg->attr_max);
5330
5331 err = lfs_rawformat(lfs, cfg);
5332
5333 LFS_TRACE("lfs_format -> %d", err);
5334 LFS_UNLOCK(cfg);
5335 return err;
5336 }
5337 #endif
5338
5339 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
5340 int err = LFS_LOCK(cfg);
5341 if (err) {
5342 return err;
5343 }
5344 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
5345 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5346 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5347 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5348 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5349 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5350 ".prog_buffer=%p, .lookahead_buffer=%p, "
5351 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5352 ".attr_max=%"PRIu32"})",
5353 (void*)lfs, (void*)cfg, cfg->context,
5354 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5355 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5356 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5357 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5358 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5359 cfg->name_max, cfg->file_max, cfg->attr_max);
5360
5361 err = lfs_rawmount(lfs, cfg);
5362
5363 LFS_TRACE("lfs_mount -> %d", err);
5364 LFS_UNLOCK(cfg);
5365 return err;
5366 }
5367
5368 int lfs_unmount(lfs_t *lfs) {
5369 int err = LFS_LOCK(lfs->cfg);
5370 if (err) {
5371 return err;
5372 }
5373 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
5374
5375 err = lfs_rawunmount(lfs);
5376
5377 LFS_TRACE("lfs_unmount -> %d", err);
5378 LFS_UNLOCK(lfs->cfg);
5379 return err;
5380 }
5381
5382 #ifndef LFS_READONLY
5383 int lfs_remove(lfs_t *lfs, const char *path) {
5384 int err = LFS_LOCK(lfs->cfg);
5385 if (err) {
5386 return err;
5387 }
5388 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
5389
5390 err = lfs_rawremove(lfs, path);
5391
5392 LFS_TRACE("lfs_remove -> %d", err);
5393 LFS_UNLOCK(lfs->cfg);
5394 return err;
5395 }
5396 #endif
5397
5398 #ifndef LFS_READONLY
5399 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
5400 int err = LFS_LOCK(lfs->cfg);
5401 if (err) {
5402 return err;
5403 }
5404 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
5405
5406 err = lfs_rawrename(lfs, oldpath, newpath);
5407
5408 LFS_TRACE("lfs_rename -> %d", err);
5409 LFS_UNLOCK(lfs->cfg);
5410 return err;
5411 }
5412 #endif
5413
5414 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
5415 int err = LFS_LOCK(lfs->cfg);
5416 if (err) {
5417 return err;
5418 }
5419 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
5420
5421 err = lfs_rawstat(lfs, path, info);
5422
5423 LFS_TRACE("lfs_stat -> %d", err);
5424 LFS_UNLOCK(lfs->cfg);
5425 return err;
5426 }
5427
5428 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
5429 uint8_t type, void *buffer, lfs_size_t size) {
5430 int err = LFS_LOCK(lfs->cfg);
5431 if (err) {
5432 return err;
5433 }
5434 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5435 (void*)lfs, path, type, buffer, size);
5436
5437 lfs_ssize_t res = lfs_rawgetattr(lfs, path, type, buffer, size);
5438
5439 LFS_TRACE("lfs_getattr -> %"PRId32, res);
5440 LFS_UNLOCK(lfs->cfg);
5441 return res;
5442 }
5443
5444 #ifndef LFS_READONLY
5445 int lfs_setattr(lfs_t *lfs, const char *path,
5446 uint8_t type, const void *buffer, lfs_size_t size) {
5447 int err = LFS_LOCK(lfs->cfg);
5448 if (err) {
5449 return err;
5450 }
5451 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
5452 (void*)lfs, path, type, buffer, size);
5453
5454 err = lfs_rawsetattr(lfs, path, type, buffer, size);
5455
5456 LFS_TRACE("lfs_setattr -> %d", err);
5457 LFS_UNLOCK(lfs->cfg);
5458 return err;
5459 }
5460 #endif
5461
5462 #ifndef LFS_READONLY
5463 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
5464 int err = LFS_LOCK(lfs->cfg);
5465 if (err) {
5466 return err;
5467 }
5468 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
5469
5470 err = lfs_rawremoveattr(lfs, path, type);
5471
5472 LFS_TRACE("lfs_removeattr -> %d", err);
5473 LFS_UNLOCK(lfs->cfg);
5474 return err;
5475 }
5476 #endif
5477
5478 #ifndef LFS_NO_MALLOC
5479 int lfs_file_open(lfs_t *lfs, lfs_file_t *file, const char *path, int flags) {
5480 int err = LFS_LOCK(lfs->cfg);
5481 if (err) {
5482 return err;
5483 }
5484 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
5485 (void*)lfs, (void*)file, path, flags);
5486 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5487
5488 err = lfs_file_rawopen(lfs, file, path, flags);
5489
5490 LFS_TRACE("lfs_file_open -> %d", err);
5491 LFS_UNLOCK(lfs->cfg);
5492 return err;
5493 }
5494 #endif
5495
5496 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
5497 const char *path, int flags,
5498 const struct lfs_file_config *cfg) {
5499 int err = LFS_LOCK(lfs->cfg);
5500 if (err) {
5501 return err;
5502 }
5503 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
5504 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
5505 (void*)lfs, (void*)file, path, flags,
5506 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
5507 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5508
5509 err = lfs_file_rawopencfg(lfs, file, path, flags, cfg);
5510
5511 LFS_TRACE("lfs_file_opencfg -> %d", err);
5512 LFS_UNLOCK(lfs->cfg);
5513 return err;
5514 }
5515
5516 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
5517 int err = LFS_LOCK(lfs->cfg);
5518 if (err) {
5519 return err;
5520 }
5521 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
5522 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5523
5524 err = lfs_file_rawclose(lfs, file);
5525
5526 LFS_TRACE("lfs_file_close -> %d", err);
5527 LFS_UNLOCK(lfs->cfg);
5528 return err;
5529 }
5530
5531 #ifndef LFS_READONLY
5532 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
5533 int err = LFS_LOCK(lfs->cfg);
5534 if (err) {
5535 return err;
5536 }
5537 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
5538 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5539
5540 err = lfs_file_rawsync(lfs, file);
5541
5542 LFS_TRACE("lfs_file_sync -> %d", err);
5543 LFS_UNLOCK(lfs->cfg);
5544 return err;
5545 }
5546 #endif
5547
5548 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
5549 void *buffer, lfs_size_t size) {
5550 int err = LFS_LOCK(lfs->cfg);
5551 if (err) {
5552 return err;
5553 }
5554 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
5555 (void*)lfs, (void*)file, buffer, size);
5556 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5557
5558 lfs_ssize_t res = lfs_file_rawread(lfs, file, buffer, size);
5559
5560 LFS_TRACE("lfs_file_read -> %"PRId32, res);
5561 LFS_UNLOCK(lfs->cfg);
5562 return res;
5563 }
5564
5565 #ifndef LFS_READONLY
5566 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
5567 const void *buffer, lfs_size_t size) {
5568 int err = LFS_LOCK(lfs->cfg);
5569 if (err) {
5570 return err;
5571 }
5572 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
5573 (void*)lfs, (void*)file, buffer, size);
5574 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5575
5576 lfs_ssize_t res = lfs_file_rawwrite(lfs, file, buffer, size);
5577
5578 LFS_TRACE("lfs_file_write -> %"PRId32, res);
5579 LFS_UNLOCK(lfs->cfg);
5580 return res;
5581 }
5582 #endif
5583
5584 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
5585 lfs_soff_t off, int whence) {
5586 int err = LFS_LOCK(lfs->cfg);
5587 if (err) {
5588 return err;
5589 }
5590 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
5591 (void*)lfs, (void*)file, off, whence);
5592 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5593
5594 lfs_soff_t res = lfs_file_rawseek(lfs, file, off, whence);
5595
5596 LFS_TRACE("lfs_file_seek -> %"PRId32, res);
5597 LFS_UNLOCK(lfs->cfg);
5598 return res;
5599 }
5600
5601 #ifndef LFS_READONLY
5602 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
5603 int err = LFS_LOCK(lfs->cfg);
5604 if (err) {
5605 return err;
5606 }
5607 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
5608 (void*)lfs, (void*)file, size);
5609 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5610
5611 err = lfs_file_rawtruncate(lfs, file, size);
5612
5613 LFS_TRACE("lfs_file_truncate -> %d", err);
5614 LFS_UNLOCK(lfs->cfg);
5615 return err;
5616 }
5617 #endif
5618
5619 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
5620 int err = LFS_LOCK(lfs->cfg);
5621 if (err) {
5622 return err;
5623 }
5624 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
5625 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5626
5627 lfs_soff_t res = lfs_file_rawtell(lfs, file);
5628
5629 LFS_TRACE("lfs_file_tell -> %"PRId32, res);
5630 LFS_UNLOCK(lfs->cfg);
5631 return res;
5632 }
5633
5634 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
5635 int err = LFS_LOCK(lfs->cfg);
5636 if (err) {
5637 return err;
5638 }
5639 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
5640
5641 err = lfs_file_rawrewind(lfs, file);
5642
5643 LFS_TRACE("lfs_file_rewind -> %d", err);
5644 LFS_UNLOCK(lfs->cfg);
5645 return err;
5646 }
5647
5648 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
5649 int err = LFS_LOCK(lfs->cfg);
5650 if (err) {
5651 return err;
5652 }
5653 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
5654 LFS_ASSERT(lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)file));
5655
5656 lfs_soff_t res = lfs_file_rawsize(lfs, file);
5657
5658 LFS_TRACE("lfs_file_size -> %"PRId32, res);
5659 LFS_UNLOCK(lfs->cfg);
5660 return res;
5661 }
5662
5663 #ifndef LFS_READONLY
5664 int lfs_mkdir(lfs_t *lfs, const char *path) {
5665 int err = LFS_LOCK(lfs->cfg);
5666 if (err) {
5667 return err;
5668 }
5669 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
5670
5671 err = lfs_rawmkdir(lfs, path);
5672
5673 LFS_TRACE("lfs_mkdir -> %d", err);
5674 LFS_UNLOCK(lfs->cfg);
5675 return err;
5676 }
5677 #endif
5678
5679 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
5680 int err = LFS_LOCK(lfs->cfg);
5681 if (err) {
5682 return err;
5683 }
5684 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
5685 LFS_ASSERT(!lfs_mlist_isopen(lfs->mlist, (struct lfs_mlist*)dir));
5686
5687 err = lfs_dir_rawopen(lfs, dir, path);
5688
5689 LFS_TRACE("lfs_dir_open -> %d", err);
5690 LFS_UNLOCK(lfs->cfg);
5691 return err;
5692 }
5693
5694 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
5695 int err = LFS_LOCK(lfs->cfg);
5696 if (err) {
5697 return err;
5698 }
5699 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
5700
5701 err = lfs_dir_rawclose(lfs, dir);
5702
5703 LFS_TRACE("lfs_dir_close -> %d", err);
5704 LFS_UNLOCK(lfs->cfg);
5705 return err;
5706 }
5707
5708 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
5709 int err = LFS_LOCK(lfs->cfg);
5710 if (err) {
5711 return err;
5712 }
5713 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
5714 (void*)lfs, (void*)dir, (void*)info);
5715
5716 err = lfs_dir_rawread(lfs, dir, info);
5717
5718 LFS_TRACE("lfs_dir_read -> %d", err);
5719 LFS_UNLOCK(lfs->cfg);
5720 return err;
5721 }
5722
5723 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
5724 int err = LFS_LOCK(lfs->cfg);
5725 if (err) {
5726 return err;
5727 }
5728 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
5729 (void*)lfs, (void*)dir, off);
5730
5731 err = lfs_dir_rawseek(lfs, dir, off);
5732
5733 LFS_TRACE("lfs_dir_seek -> %d", err);
5734 LFS_UNLOCK(lfs->cfg);
5735 return err;
5736 }
5737
5738 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
5739 int err = LFS_LOCK(lfs->cfg);
5740 if (err) {
5741 return err;
5742 }
5743 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
5744
5745 lfs_soff_t res = lfs_dir_rawtell(lfs, dir);
5746
5747 LFS_TRACE("lfs_dir_tell -> %"PRId32, res);
5748 LFS_UNLOCK(lfs->cfg);
5749 return res;
5750 }
5751
5752 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
5753 int err = LFS_LOCK(lfs->cfg);
5754 if (err) {
5755 return err;
5756 }
5757 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
5758
5759 err = lfs_dir_rawrewind(lfs, dir);
5760
5761 LFS_TRACE("lfs_dir_rewind -> %d", err);
5762 LFS_UNLOCK(lfs->cfg);
5763 return err;
5764 }
5765
5766 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
5767 int err = LFS_LOCK(lfs->cfg);
5768 if (err) {
5769 return err;
5770 }
5771 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
5772
5773 lfs_ssize_t res = lfs_fs_rawsize(lfs);
5774
5775 LFS_TRACE("lfs_fs_size -> %"PRId32, res);
5776 LFS_UNLOCK(lfs->cfg);
5777 return res;
5778 }
5779
5780 int lfs_fs_traverse(lfs_t *lfs, int (*cb)(void *, lfs_block_t), void *data) {
5781 int err = LFS_LOCK(lfs->cfg);
5782 if (err) {
5783 return err;
5784 }
5785 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
5786 (void*)lfs, (void*)(uintptr_t)cb, data);
5787
5788 err = lfs_fs_rawtraverse(lfs, cb, data, true);
5789
5790 LFS_TRACE("lfs_fs_traverse -> %d", err);
5791 LFS_UNLOCK(lfs->cfg);
5792 return err;
5793 }
5794
5795 #ifdef LFS_MIGRATE
5796 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
5797 int err = LFS_LOCK(cfg);
5798 if (err) {
5799 return err;
5800 }
5801 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
5802 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
5803 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
5804 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
5805 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
5806 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
5807 ".prog_buffer=%p, .lookahead_buffer=%p, "
5808 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
5809 ".attr_max=%"PRIu32"})",
5810 (void*)lfs, (void*)cfg, cfg->context,
5811 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
5812 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
5813 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
5814 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
5815 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
5816 cfg->name_max, cfg->file_max, cfg->attr_max);
5817
5818 err = lfs_rawmigrate(lfs, cfg);
5819
5820 LFS_TRACE("lfs_migrate -> %d", err);
5821 LFS_UNLOCK(cfg);
5822 return err;
5823 }
5824 #endif
5825
5826