1 /*
2 * The little filesystem
3 *
4 * Copyright (c) 2017, Arm Limited. All rights reserved.
5 * SPDX-License-Identifier: BSD-3-Clause
6 */
7 #include "lfs.h"
8 #include "lfs_util.h"
9
10 #define LFS_BLOCK_NULL ((lfs_block_t)-1)
11 #define LFS_BLOCK_INLINE ((lfs_block_t)-2)
12
13 /// Caching block device operations ///
lfs_cache_drop(lfs_t * lfs,lfs_cache_t * rcache)14 static inline void lfs_cache_drop(lfs_t *lfs, lfs_cache_t *rcache) {
15 // do not zero, cheaper if cache is readonly or only going to be
16 // written with identical data (during relocates)
17 (void)lfs;
18 rcache->block = LFS_BLOCK_NULL;
19 }
20
lfs_cache_zero(lfs_t * lfs,lfs_cache_t * pcache)21 static inline void lfs_cache_zero(lfs_t *lfs, lfs_cache_t *pcache) {
22 // zero to avoid information leak
23 memset(pcache->buffer, 0xff, lfs->cfg->cache_size);
24 pcache->block = LFS_BLOCK_NULL;
25 }
26
lfs_bd_read(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)27 static int lfs_bd_read(lfs_t *lfs,
28 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
29 lfs_block_t block, lfs_off_t off,
30 void *buffer, lfs_size_t size) {
31 uint8_t *data = buffer;
32 if (block >= lfs->cfg->block_count ||
33 off+size > lfs->cfg->block_size) {
34 return LFS_ERR_CORRUPT;
35 }
36
37 while (size > 0) {
38 lfs_size_t diff = size;
39
40 if (pcache && block == pcache->block &&
41 off < pcache->off + pcache->size) {
42 if (off >= pcache->off) {
43 // is already in pcache?
44 diff = lfs_min(diff, pcache->size - (off-pcache->off));
45 memcpy(data, &pcache->buffer[off-pcache->off], diff);
46
47 data += diff;
48 off += diff;
49 size -= diff;
50 continue;
51 }
52
53 // pcache takes priority
54 diff = lfs_min(diff, pcache->off-off);
55 }
56
57 if (block == rcache->block &&
58 off < rcache->off + rcache->size) {
59 if (off >= rcache->off) {
60 // is already in rcache?
61 diff = lfs_min(diff, rcache->size - (off-rcache->off));
62 memcpy(data, &rcache->buffer[off-rcache->off], diff);
63
64 data += diff;
65 off += diff;
66 size -= diff;
67 continue;
68 }
69
70 // rcache takes priority
71 diff = lfs_min(diff, rcache->off-off);
72 }
73
74 if (size >= hint && off % lfs->cfg->read_size == 0 &&
75 size >= lfs->cfg->read_size) {
76 // bypass cache?
77 diff = lfs_aligndown(diff, lfs->cfg->read_size);
78 int err = lfs->cfg->read(lfs->cfg, block, off, data, diff);
79 if (err) {
80 return err;
81 }
82
83 data += diff;
84 off += diff;
85 size -= diff;
86 continue;
87 }
88
89 // load to cache, first condition can no longer fail
90 LFS_ASSERT(block < lfs->cfg->block_count);
91 rcache->block = block;
92 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
93 rcache->size = lfs_min(
94 lfs_min(
95 lfs_alignup(off+hint, lfs->cfg->read_size),
96 lfs->cfg->block_size)
97 - rcache->off,
98 lfs->cfg->cache_size);
99 int err = lfs->cfg->read(lfs->cfg, rcache->block,
100 rcache->off, rcache->buffer, rcache->size);
101 LFS_ASSERT(err <= 0);
102 if (err) {
103 return err;
104 }
105 }
106
107 return 0;
108 }
109
110 enum {
111 LFS_CMP_EQ = 0,
112 LFS_CMP_LT = 1,
113 LFS_CMP_GT = 2,
114 };
115
lfs_bd_cmp(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)116 static int lfs_bd_cmp(lfs_t *lfs,
117 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
118 lfs_block_t block, lfs_off_t off,
119 const void *buffer, lfs_size_t size) {
120 const uint8_t *data = buffer;
121
122 for (lfs_off_t i = 0; i < size; i++) {
123 uint8_t dat;
124 int err = lfs_bd_read(lfs,
125 pcache, rcache, hint-i,
126 block, off+i, &dat, 1);
127 if (err) {
128 return err;
129 }
130
131 if (dat != data[i]) {
132 return (dat < data[i]) ? LFS_CMP_LT : LFS_CMP_GT;
133 }
134 }
135
136 return LFS_CMP_EQ;
137 }
138
lfs_bd_flush(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)139 static int lfs_bd_flush(lfs_t *lfs,
140 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
141 if (pcache->block != LFS_BLOCK_NULL && pcache->block != LFS_BLOCK_INLINE) {
142 LFS_ASSERT(pcache->block < lfs->cfg->block_count);
143 lfs_size_t diff = lfs_alignup(pcache->size, lfs->cfg->prog_size);
144 int err = lfs->cfg->prog(lfs->cfg, pcache->block,
145 pcache->off, pcache->buffer, diff);
146 LFS_ASSERT(err <= 0);
147 if (err) {
148 return err;
149 }
150
151 if (validate) {
152 // check data on disk
153 lfs_cache_drop(lfs, rcache);
154 int res = lfs_bd_cmp(lfs,
155 NULL, rcache, diff,
156 pcache->block, pcache->off, pcache->buffer, diff);
157 if (res < 0) {
158 return res;
159 }
160
161 if (res != LFS_CMP_EQ) {
162 return LFS_ERR_CORRUPT;
163 }
164 }
165
166 lfs_cache_zero(lfs, pcache);
167 }
168
169 return 0;
170 }
171
lfs_bd_sync(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate)172 static int lfs_bd_sync(lfs_t *lfs,
173 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate) {
174 lfs_cache_drop(lfs, rcache);
175
176 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
177 if (err) {
178 return err;
179 }
180
181 err = lfs->cfg->sync(lfs->cfg);
182 LFS_ASSERT(err <= 0);
183 return err;
184 }
185
lfs_bd_prog(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,bool validate,lfs_block_t block,lfs_off_t off,const void * buffer,lfs_size_t size)186 static int lfs_bd_prog(lfs_t *lfs,
187 lfs_cache_t *pcache, lfs_cache_t *rcache, bool validate,
188 lfs_block_t block, lfs_off_t off,
189 const void *buffer, lfs_size_t size) {
190 const uint8_t *data = buffer;
191 LFS_ASSERT(block == LFS_BLOCK_INLINE || block < lfs->cfg->block_count);
192 LFS_ASSERT(off + size <= lfs->cfg->block_size);
193
194 while (size > 0) {
195 if (block == pcache->block &&
196 off >= pcache->off &&
197 off < pcache->off + lfs->cfg->cache_size) {
198 // already fits in pcache?
199 lfs_size_t diff = lfs_min(size,
200 lfs->cfg->cache_size - (off-pcache->off));
201 memcpy(&pcache->buffer[off-pcache->off], data, diff);
202
203 data += diff;
204 off += diff;
205 size -= diff;
206
207 pcache->size = lfs_max(pcache->size, off - pcache->off);
208 if (pcache->size == lfs->cfg->cache_size) {
209 // eagerly flush out pcache if we fill up
210 int err = lfs_bd_flush(lfs, pcache, rcache, validate);
211 if (err) {
212 return err;
213 }
214 }
215
216 continue;
217 }
218
219 // pcache must have been flushed, either by programming and
220 // entire block or manually flushing the pcache
221 LFS_ASSERT(pcache->block == LFS_BLOCK_NULL);
222
223 // prepare pcache, first condition can no longer fail
224 pcache->block = block;
225 pcache->off = lfs_aligndown(off, lfs->cfg->prog_size);
226 pcache->size = 0;
227 }
228
229 return 0;
230 }
231
lfs_bd_erase(lfs_t * lfs,lfs_block_t block)232 static int lfs_bd_erase(lfs_t *lfs, lfs_block_t block) {
233 LFS_ASSERT(block < lfs->cfg->block_count);
234 int err = lfs->cfg->erase(lfs->cfg, block);
235 LFS_ASSERT(err <= 0);
236 return err;
237 }
238
239
240 /// Small type-level utilities ///
241 // operations on block pairs
lfs_pair_swap(lfs_block_t pair[2])242 static inline void lfs_pair_swap(lfs_block_t pair[2]) {
243 lfs_block_t t = pair[0];
244 pair[0] = pair[1];
245 pair[1] = t;
246 }
247
lfs_pair_isnull(const lfs_block_t pair[2])248 static inline bool lfs_pair_isnull(const lfs_block_t pair[2]) {
249 return pair[0] == LFS_BLOCK_NULL || pair[1] == LFS_BLOCK_NULL;
250 }
251
lfs_pair_cmp(const lfs_block_t paira[2],const lfs_block_t pairb[2])252 static inline int lfs_pair_cmp(
253 const lfs_block_t paira[2],
254 const lfs_block_t pairb[2]) {
255 return !(paira[0] == pairb[0] || paira[1] == pairb[1] ||
256 paira[0] == pairb[1] || paira[1] == pairb[0]);
257 }
258
lfs_pair_sync(const lfs_block_t paira[2],const lfs_block_t pairb[2])259 static inline bool lfs_pair_sync(
260 const lfs_block_t paira[2],
261 const lfs_block_t pairb[2]) {
262 return (paira[0] == pairb[0] && paira[1] == pairb[1]) ||
263 (paira[0] == pairb[1] && paira[1] == pairb[0]);
264 }
265
lfs_pair_fromle32(lfs_block_t pair[2])266 static inline void lfs_pair_fromle32(lfs_block_t pair[2]) {
267 pair[0] = lfs_fromle32(pair[0]);
268 pair[1] = lfs_fromle32(pair[1]);
269 }
270
lfs_pair_tole32(lfs_block_t pair[2])271 static inline void lfs_pair_tole32(lfs_block_t pair[2]) {
272 pair[0] = lfs_tole32(pair[0]);
273 pair[1] = lfs_tole32(pair[1]);
274 }
275
276 // operations on 32-bit entry tags
277 typedef uint32_t lfs_tag_t;
278 typedef int32_t lfs_stag_t;
279
280 #define LFS_MKTAG(type, id, size) \
281 (((lfs_tag_t)(type) << 20) | ((lfs_tag_t)(id) << 10) | (lfs_tag_t)(size))
282
283 #define LFS_MKTAG_IF(cond, type, id, size) \
284 ((cond) ? LFS_MKTAG(type, id, size) : LFS_MKTAG(LFS_FROM_NOOP, 0, 0))
285
286 #define LFS_MKTAG_IF_ELSE(cond, type1, id1, size1, type2, id2, size2) \
287 ((cond) ? LFS_MKTAG(type1, id1, size1) : LFS_MKTAG(type2, id2, size2))
288
lfs_tag_isvalid(lfs_tag_t tag)289 static inline bool lfs_tag_isvalid(lfs_tag_t tag) {
290 return !(tag & 0x80000000);
291 }
292
lfs_tag_isdelete(lfs_tag_t tag)293 static inline bool lfs_tag_isdelete(lfs_tag_t tag) {
294 return ((int32_t)(tag << 22) >> 22) == -1;
295 }
296
lfs_tag_type1(lfs_tag_t tag)297 static inline uint16_t lfs_tag_type1(lfs_tag_t tag) {
298 return (tag & 0x70000000) >> 20;
299 }
300
lfs_tag_type3(lfs_tag_t tag)301 static inline uint16_t lfs_tag_type3(lfs_tag_t tag) {
302 return (tag & 0x7ff00000) >> 20;
303 }
304
lfs_tag_chunk(lfs_tag_t tag)305 static inline uint8_t lfs_tag_chunk(lfs_tag_t tag) {
306 return (tag & 0x0ff00000) >> 20;
307 }
308
lfs_tag_splice(lfs_tag_t tag)309 static inline int8_t lfs_tag_splice(lfs_tag_t tag) {
310 return (int8_t)lfs_tag_chunk(tag);
311 }
312
lfs_tag_id(lfs_tag_t tag)313 static inline uint16_t lfs_tag_id(lfs_tag_t tag) {
314 return (tag & 0x000ffc00) >> 10;
315 }
316
lfs_tag_size(lfs_tag_t tag)317 static inline lfs_size_t lfs_tag_size(lfs_tag_t tag) {
318 return tag & 0x000003ff;
319 }
320
lfs_tag_dsize(lfs_tag_t tag)321 static inline lfs_size_t lfs_tag_dsize(lfs_tag_t tag) {
322 return sizeof(tag) + lfs_tag_size(tag + lfs_tag_isdelete(tag));
323 }
324
325 // operations on attributes in attribute lists
326 struct lfs_mattr {
327 lfs_tag_t tag;
328 const void *buffer;
329 };
330
331 struct lfs_diskoff {
332 lfs_block_t block;
333 lfs_off_t off;
334 };
335
336 #define LFS_MKATTRS(...) \
337 (struct lfs_mattr[]){__VA_ARGS__}, \
338 sizeof((struct lfs_mattr[]){__VA_ARGS__}) / sizeof(struct lfs_mattr)
339
340 // operations on global state
lfs_gstate_xor(lfs_gstate_t * a,const lfs_gstate_t * b)341 static inline void lfs_gstate_xor(lfs_gstate_t *a, const lfs_gstate_t *b) {
342 for (int i = 0; i < 3; i++) {
343 ((uint32_t*)a)[i] ^= ((const uint32_t*)b)[i];
344 }
345 }
346
lfs_gstate_iszero(const lfs_gstate_t * a)347 static inline bool lfs_gstate_iszero(const lfs_gstate_t *a) {
348 for (int i = 0; i < 3; i++) {
349 if (((uint32_t*)a)[i] != 0) {
350 return false;
351 }
352 }
353 return true;
354 }
355
lfs_gstate_hasorphans(const lfs_gstate_t * a)356 static inline bool lfs_gstate_hasorphans(const lfs_gstate_t *a) {
357 return lfs_tag_size(a->tag);
358 }
359
lfs_gstate_getorphans(const lfs_gstate_t * a)360 static inline uint8_t lfs_gstate_getorphans(const lfs_gstate_t *a) {
361 return lfs_tag_size(a->tag);
362 }
363
lfs_gstate_hasmove(const lfs_gstate_t * a)364 static inline bool lfs_gstate_hasmove(const lfs_gstate_t *a) {
365 return lfs_tag_type1(a->tag);
366 }
367
lfs_gstate_hasmovehere(const lfs_gstate_t * a,const lfs_block_t * pair)368 static inline bool lfs_gstate_hasmovehere(const lfs_gstate_t *a,
369 const lfs_block_t *pair) {
370 return lfs_tag_type1(a->tag) && lfs_pair_cmp(a->pair, pair) == 0;
371 }
372
lfs_gstate_fromle32(lfs_gstate_t * a)373 static inline void lfs_gstate_fromle32(lfs_gstate_t *a) {
374 a->tag = lfs_fromle32(a->tag);
375 a->pair[0] = lfs_fromle32(a->pair[0]);
376 a->pair[1] = lfs_fromle32(a->pair[1]);
377 }
378
lfs_gstate_tole32(lfs_gstate_t * a)379 static inline void lfs_gstate_tole32(lfs_gstate_t *a) {
380 a->tag = lfs_tole32(a->tag);
381 a->pair[0] = lfs_tole32(a->pair[0]);
382 a->pair[1] = lfs_tole32(a->pair[1]);
383 }
384
385 // other endianness operations
lfs_ctz_fromle32(struct lfs_ctz * ctz)386 static void lfs_ctz_fromle32(struct lfs_ctz *ctz) {
387 ctz->head = lfs_fromle32(ctz->head);
388 ctz->size = lfs_fromle32(ctz->size);
389 }
390
lfs_ctz_tole32(struct lfs_ctz * ctz)391 static void lfs_ctz_tole32(struct lfs_ctz *ctz) {
392 ctz->head = lfs_tole32(ctz->head);
393 ctz->size = lfs_tole32(ctz->size);
394 }
395
lfs_superblock_fromle32(lfs_superblock_t * superblock)396 static inline void lfs_superblock_fromle32(lfs_superblock_t *superblock) {
397 superblock->version = lfs_fromle32(superblock->version);
398 superblock->block_size = lfs_fromle32(superblock->block_size);
399 superblock->block_count = lfs_fromle32(superblock->block_count);
400 superblock->name_max = lfs_fromle32(superblock->name_max);
401 superblock->file_max = lfs_fromle32(superblock->file_max);
402 superblock->attr_max = lfs_fromle32(superblock->attr_max);
403 }
404
lfs_superblock_tole32(lfs_superblock_t * superblock)405 static inline void lfs_superblock_tole32(lfs_superblock_t *superblock) {
406 superblock->version = lfs_tole32(superblock->version);
407 superblock->block_size = lfs_tole32(superblock->block_size);
408 superblock->block_count = lfs_tole32(superblock->block_count);
409 superblock->name_max = lfs_tole32(superblock->name_max);
410 superblock->file_max = lfs_tole32(superblock->file_max);
411 superblock->attr_max = lfs_tole32(superblock->attr_max);
412 }
413
414
415 /// Internal operations predeclared here ///
416 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
417 const struct lfs_mattr *attrs, int attrcount);
418 static int lfs_dir_compact(lfs_t *lfs,
419 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
420 lfs_mdir_t *source, uint16_t begin, uint16_t end);
421 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file);
422 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file);
423 static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans);
424 static void lfs_fs_prepmove(lfs_t *lfs,
425 uint16_t id, const lfs_block_t pair[2]);
426 static int lfs_fs_pred(lfs_t *lfs, const lfs_block_t dir[2],
427 lfs_mdir_t *pdir);
428 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t dir[2],
429 lfs_mdir_t *parent);
430 static int lfs_fs_relocate(lfs_t *lfs,
431 const lfs_block_t oldpair[2], lfs_block_t newpair[2]);
432 int lfs_fs_traverseraw(lfs_t *lfs,
433 int (*cb)(void *data, lfs_block_t block), void *data,
434 bool includeorphans);
435 static int lfs_fs_forceconsistency(lfs_t *lfs);
436 static int lfs_deinit(lfs_t *lfs);
437 #ifdef LFS_MIGRATE
438 static int lfs1_traverse(lfs_t *lfs,
439 int (*cb)(void*, lfs_block_t), void *data);
440 #endif
441
442 /// Block allocator ///
lfs_alloc_lookahead(void * p,lfs_block_t block)443 static int lfs_alloc_lookahead(void *p, lfs_block_t block) {
444 lfs_t *lfs = (lfs_t*)p;
445 lfs_block_t off = ((block - lfs->free.off)
446 + lfs->cfg->block_count) % lfs->cfg->block_count;
447
448 if (off < lfs->free.size) {
449 lfs->free.buffer[off / 32] |= 1U << (off % 32);
450 }
451
452 return 0;
453 }
454
lfs_alloc_ack(lfs_t * lfs)455 static void lfs_alloc_ack(lfs_t *lfs) {
456 lfs->free.ack = lfs->cfg->block_count;
457 }
458
459 // Invalidate the lookahead buffer. This is done during mounting and
460 // failed traversals
lfs_alloc_reset(lfs_t * lfs)461 static void lfs_alloc_reset(lfs_t *lfs) {
462 lfs->free.off = lfs->seed % lfs->cfg->block_size;
463 lfs->free.size = 0;
464 lfs->free.i = 0;
465 lfs_alloc_ack(lfs);
466 }
467
lfs_alloc(lfs_t * lfs,lfs_block_t * block)468 static int lfs_alloc(lfs_t *lfs, lfs_block_t *block) {
469 while (true) {
470 while (lfs->free.i != lfs->free.size) {
471 lfs_block_t off = lfs->free.i;
472 lfs->free.i += 1;
473 lfs->free.ack -= 1;
474
475 if (!(lfs->free.buffer[off / 32] & (1U << (off % 32)))) {
476 // found a free block
477 *block = (lfs->free.off + off) % lfs->cfg->block_count;
478
479 // eagerly find next off so an alloc ack can
480 // discredit old lookahead blocks
481 while (lfs->free.i != lfs->free.size &&
482 (lfs->free.buffer[lfs->free.i / 32]
483 & (1U << (lfs->free.i % 32)))) {
484 lfs->free.i += 1;
485 lfs->free.ack -= 1;
486 }
487
488 return 0;
489 }
490 }
491
492 // check if we have looked at all blocks since last ack
493 if (lfs->free.ack == 0) {
494 LFS_ERROR("No more free space %"PRIu32,
495 lfs->free.i + lfs->free.off);
496 return LFS_ERR_NOSPC;
497 }
498
499 lfs->free.off = (lfs->free.off + lfs->free.size)
500 % lfs->cfg->block_count;
501 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size, lfs->free.ack);
502 lfs->free.i = 0;
503
504 // find mask of free blocks from tree
505 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
506 int err = lfs_fs_traverseraw(lfs, lfs_alloc_lookahead, lfs, true);
507 if (err) {
508 lfs_alloc_reset(lfs);
509 return err;
510 }
511 }
512 }
513
514 /// Metadata pair and directory operations ///
lfs_dir_getslice(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t goff,void * gbuffer,lfs_size_t gsize)515 static lfs_stag_t lfs_dir_getslice(lfs_t *lfs, const lfs_mdir_t *dir,
516 lfs_tag_t gmask, lfs_tag_t gtag,
517 lfs_off_t goff, void *gbuffer, lfs_size_t gsize) {
518 lfs_off_t off = dir->off;
519 lfs_tag_t ntag = dir->etag;
520 lfs_stag_t gdiff = 0;
521
522 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair) &&
523 lfs_tag_id(gmask) != 0 &&
524 lfs_tag_id(lfs->gdisk.tag) <= lfs_tag_id(gtag)) {
525 // synthetic moves
526 gdiff -= LFS_MKTAG(0, 1, 0);
527 }
528
529 // iterate over dir block backwards (for faster lookups)
530 while (off >= sizeof(lfs_tag_t) + lfs_tag_dsize(ntag)) {
531 off -= lfs_tag_dsize(ntag);
532 lfs_tag_t tag = ntag;
533 int err = lfs_bd_read(lfs,
534 NULL, &lfs->rcache, sizeof(ntag),
535 dir->pair[0], off, &ntag, sizeof(ntag));
536 if (err) {
537 return err;
538 }
539
540 ntag = (lfs_frombe32(ntag) ^ tag) & 0x7fffffff;
541
542 if (lfs_tag_id(gmask) != 0 &&
543 lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
544 lfs_tag_id(tag) <= lfs_tag_id(gtag - gdiff)) {
545 if (tag == (LFS_MKTAG(LFS_TYPE_CREATE, 0, 0) |
546 (LFS_MKTAG(0, 0x3ff, 0) & (gtag - gdiff)))) {
547 // found where we were created
548 return LFS_ERR_NOENT;
549 }
550
551 // move around splices
552 gdiff += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
553 }
554
555 if ((gmask & tag) == (gmask & (gtag - gdiff))) {
556 if (lfs_tag_isdelete(tag)) {
557 return LFS_ERR_NOENT;
558 }
559
560 lfs_size_t diff = lfs_min(lfs_tag_size(tag), gsize);
561 err = lfs_bd_read(lfs,
562 NULL, &lfs->rcache, diff,
563 dir->pair[0], off+sizeof(tag)+goff, gbuffer, diff);
564 if (err) {
565 return err;
566 }
567
568 memset((uint8_t*)gbuffer + diff, 0, gsize - diff);
569
570 return tag + gdiff;
571 }
572 }
573
574 return LFS_ERR_NOENT;
575 }
576
lfs_dir_get(lfs_t * lfs,const lfs_mdir_t * dir,lfs_tag_t gmask,lfs_tag_t gtag,void * buffer)577 static lfs_stag_t lfs_dir_get(lfs_t *lfs, const lfs_mdir_t *dir,
578 lfs_tag_t gmask, lfs_tag_t gtag, void *buffer) {
579 return lfs_dir_getslice(lfs, dir,
580 gmask, gtag,
581 0, buffer, lfs_tag_size(gtag));
582 }
583
lfs_dir_getread(lfs_t * lfs,const lfs_mdir_t * dir,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_size_t hint,lfs_tag_t gmask,lfs_tag_t gtag,lfs_off_t off,void * buffer,lfs_size_t size)584 static int lfs_dir_getread(lfs_t *lfs, const lfs_mdir_t *dir,
585 const lfs_cache_t *pcache, lfs_cache_t *rcache, lfs_size_t hint,
586 lfs_tag_t gmask, lfs_tag_t gtag,
587 lfs_off_t off, void *buffer, lfs_size_t size) {
588 uint8_t *data = buffer;
589 if (off+size > lfs->cfg->block_size) {
590 return LFS_ERR_CORRUPT;
591 }
592
593 while (size > 0) {
594 lfs_size_t diff = size;
595
596 if (pcache && pcache->block == LFS_BLOCK_INLINE &&
597 off < pcache->off + pcache->size) {
598 if (off >= pcache->off) {
599 // is already in pcache?
600 diff = lfs_min(diff, pcache->size - (off-pcache->off));
601 memcpy(data, &pcache->buffer[off-pcache->off], diff);
602
603 data += diff;
604 off += diff;
605 size -= diff;
606 continue;
607 }
608
609 // pcache takes priority
610 diff = lfs_min(diff, pcache->off-off);
611 }
612
613 if (rcache->block == LFS_BLOCK_INLINE &&
614 off < rcache->off + rcache->size) {
615 if (off >= rcache->off) {
616 // is already in rcache?
617 diff = lfs_min(diff, rcache->size - (off-rcache->off));
618 memcpy(data, &rcache->buffer[off-rcache->off], diff);
619
620 data += diff;
621 off += diff;
622 size -= diff;
623 continue;
624 }
625
626 // rcache takes priority
627 diff = lfs_min(diff, rcache->off-off);
628 }
629
630 // load to cache, first condition can no longer fail
631 rcache->block = LFS_BLOCK_INLINE;
632 rcache->off = lfs_aligndown(off, lfs->cfg->read_size);
633 rcache->size = lfs_min(lfs_alignup(off+hint, lfs->cfg->read_size),
634 lfs->cfg->cache_size);
635 int err = lfs_dir_getslice(lfs, dir, gmask, gtag,
636 rcache->off, rcache->buffer, rcache->size);
637 if (err < 0) {
638 return err;
639 }
640 }
641
642 return 0;
643 }
644
lfs_dir_traverse_filter(void * p,lfs_tag_t tag,const void * buffer)645 static int lfs_dir_traverse_filter(void *p,
646 lfs_tag_t tag, const void *buffer) {
647 lfs_tag_t *filtertag = p;
648 (void)buffer;
649
650 // which mask depends on unique bit in tag structure
651 uint32_t mask = (tag & LFS_MKTAG(0x100, 0, 0))
652 ? LFS_MKTAG(0x7ff, 0x3ff, 0)
653 : LFS_MKTAG(0x700, 0x3ff, 0);
654
655 // check for redundancy
656 if ((mask & tag) == (mask & *filtertag) ||
657 lfs_tag_isdelete(*filtertag) ||
658 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) == (
659 LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
660 (LFS_MKTAG(0, 0x3ff, 0) & *filtertag))) {
661 return true;
662 }
663
664 // check if we need to adjust for created/deleted tags
665 if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE &&
666 lfs_tag_id(tag) <= lfs_tag_id(*filtertag)) {
667 *filtertag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
668 }
669
670 return false;
671 }
672
lfs_dir_traverse(lfs_t * lfs,const lfs_mdir_t * dir,lfs_off_t off,lfs_tag_t ptag,const struct lfs_mattr * attrs,int attrcount,lfs_tag_t tmask,lfs_tag_t ttag,uint16_t begin,uint16_t end,int16_t diff,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)673 static int lfs_dir_traverse(lfs_t *lfs,
674 const lfs_mdir_t *dir, lfs_off_t off, lfs_tag_t ptag,
675 const struct lfs_mattr *attrs, int attrcount,
676 lfs_tag_t tmask, lfs_tag_t ttag,
677 uint16_t begin, uint16_t end, int16_t diff,
678 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
679 // iterate over directory and attrs
680 while (true) {
681 lfs_tag_t tag;
682 const void *buffer;
683 struct lfs_diskoff disk;
684 if (off+lfs_tag_dsize(ptag) < dir->off) {
685 off += lfs_tag_dsize(ptag);
686 int err = lfs_bd_read(lfs,
687 NULL, &lfs->rcache, sizeof(tag),
688 dir->pair[0], off, &tag, sizeof(tag));
689 if (err) {
690 return err;
691 }
692
693 tag = (lfs_frombe32(tag) ^ ptag) | 0x80000000;
694 disk.block = dir->pair[0];
695 disk.off = off+sizeof(lfs_tag_t);
696 buffer = &disk;
697 ptag = tag;
698 } else if (attrcount > 0) {
699 tag = attrs[0].tag;
700 buffer = attrs[0].buffer;
701 attrs += 1;
702 attrcount -= 1;
703 } else {
704 return 0;
705 }
706
707 lfs_tag_t mask = LFS_MKTAG(0x7ff, 0, 0);
708 if ((mask & tmask & tag) != (mask & tmask & ttag)) {
709 continue;
710 }
711
712 // do we need to filter? inlining the filtering logic here allows
713 // for some minor optimizations
714 if (lfs_tag_id(tmask) != 0) {
715 // scan for duplicates and update tag based on creates/deletes
716 int filter = lfs_dir_traverse(lfs,
717 dir, off, ptag, attrs, attrcount,
718 0, 0, 0, 0, 0,
719 lfs_dir_traverse_filter, &tag);
720 if (filter < 0) {
721 return filter;
722 }
723
724 if (filter) {
725 continue;
726 }
727
728 // in filter range?
729 if (!(lfs_tag_id(tag) >= begin && lfs_tag_id(tag) < end)) {
730 continue;
731 }
732 }
733
734 // handle special cases for mcu-side operations
735 if (lfs_tag_type3(tag) == LFS_FROM_NOOP) {
736 // do nothing
737 } else if (lfs_tag_type3(tag) == LFS_FROM_MOVE) {
738 uint16_t fromid = lfs_tag_size(tag);
739 uint16_t toid = lfs_tag_id(tag);
740 int err = lfs_dir_traverse(lfs,
741 buffer, 0, 0xffffffff, NULL, 0,
742 LFS_MKTAG(0x600, 0x3ff, 0),
743 LFS_MKTAG(LFS_TYPE_STRUCT, 0, 0),
744 fromid, fromid+1, toid-fromid+diff,
745 cb, data);
746 if (err) {
747 return err;
748 }
749 } else if (lfs_tag_type3(tag) == LFS_FROM_USERATTRS) {
750 for (unsigned i = 0; i < lfs_tag_size(tag); i++) {
751 const struct lfs_attr *a = buffer;
752 int err = cb(data, LFS_MKTAG(LFS_TYPE_USERATTR + a[i].type,
753 lfs_tag_id(tag) + diff, a[i].size), a[i].buffer);
754 if (err) {
755 return err;
756 }
757 }
758 } else {
759 int err = cb(data, tag + LFS_MKTAG(0, diff, 0), buffer);
760 if (err) {
761 return err;
762 }
763 }
764 }
765 }
766
lfs_dir_fetchmatch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2],lfs_tag_t fmask,lfs_tag_t ftag,uint16_t * id,int (* cb)(void * data,lfs_tag_t tag,const void * buffer),void * data)767 static lfs_stag_t lfs_dir_fetchmatch(lfs_t *lfs,
768 lfs_mdir_t *dir, const lfs_block_t pair[2],
769 lfs_tag_t fmask, lfs_tag_t ftag, uint16_t *id,
770 int (*cb)(void *data, lfs_tag_t tag, const void *buffer), void *data) {
771 // we can find tag very efficiently during a fetch, since we're already
772 // scanning the entire directory
773 lfs_stag_t besttag = -1;
774
775 // if either block address is invalid we return LFS_ERR_CORRUPT here,
776 // otherwise later writes to the pair could fail
777 if (pair[0] >= lfs->cfg->block_count || pair[1] >= lfs->cfg->block_count) {
778 return LFS_ERR_CORRUPT;
779 }
780
781 // find the block with the most recent revision
782 uint32_t revs[2] = {0, 0};
783 int r = 0;
784 for (int i = 0; i < 2; i++) {
785 int err = lfs_bd_read(lfs,
786 NULL, &lfs->rcache, sizeof(revs[i]),
787 pair[i], 0, &revs[i], sizeof(revs[i]));
788 revs[i] = lfs_fromle32(revs[i]);
789 if (err && err != LFS_ERR_CORRUPT) {
790 return err;
791 }
792
793 if (err != LFS_ERR_CORRUPT &&
794 lfs_scmp(revs[i], revs[(i+1)%2]) > 0) {
795 r = i;
796 }
797 }
798
799 dir->pair[0] = pair[(r+0)%2];
800 dir->pair[1] = pair[(r+1)%2];
801 dir->rev = revs[(r+0)%2];
802 dir->off = 0; // nonzero = found some commits
803
804 // now scan tags to fetch the actual dir and find possible match
805 for (int i = 0; i < 2; i++) {
806 lfs_off_t off = 0;
807 lfs_tag_t ptag = 0xffffffff;
808
809 uint16_t tempcount = 0;
810 lfs_block_t temptail[2] = {LFS_BLOCK_NULL, LFS_BLOCK_NULL};
811 bool tempsplit = false;
812 lfs_stag_t tempbesttag = besttag;
813
814 dir->rev = lfs_tole32(dir->rev);
815 uint32_t crc = lfs_crc(0xffffffff, &dir->rev, sizeof(dir->rev));
816 dir->rev = lfs_fromle32(dir->rev);
817
818 while (true) {
819 // extract next tag
820 lfs_tag_t tag;
821 off += lfs_tag_dsize(ptag);
822 int err = lfs_bd_read(lfs,
823 NULL, &lfs->rcache, lfs->cfg->block_size,
824 dir->pair[0], off, &tag, sizeof(tag));
825 if (err) {
826 if (err == LFS_ERR_CORRUPT) {
827 // can't continue?
828 dir->erased = false;
829 break;
830 }
831 return err;
832 }
833
834 crc = lfs_crc(crc, &tag, sizeof(tag));
835 tag = lfs_frombe32(tag) ^ ptag;
836
837 // next commit not yet programmed or we're not in valid range
838 if (!lfs_tag_isvalid(tag)) {
839 dir->erased = (lfs_tag_type1(ptag) == LFS_TYPE_CRC &&
840 dir->off % lfs->cfg->prog_size == 0);
841 break;
842 } else if (off + lfs_tag_dsize(tag) > lfs->cfg->block_size) {
843 dir->erased = false;
844 break;
845 }
846
847 ptag = tag;
848
849 if (lfs_tag_type1(tag) == LFS_TYPE_CRC) {
850 // check the crc attr
851 uint32_t dcrc;
852 err = lfs_bd_read(lfs,
853 NULL, &lfs->rcache, lfs->cfg->block_size,
854 dir->pair[0], off+sizeof(tag), &dcrc, sizeof(dcrc));
855 if (err) {
856 if (err == LFS_ERR_CORRUPT) {
857 dir->erased = false;
858 break;
859 }
860 return err;
861 }
862 dcrc = lfs_fromle32(dcrc);
863
864 if (crc != dcrc) {
865 dir->erased = false;
866 break;
867 }
868
869 // reset the next bit if we need to
870 ptag ^= (lfs_tag_t)(lfs_tag_chunk(tag) & 1U) << 31;
871
872 // toss our crc into the filesystem seed for
873 // pseudorandom numbers
874 lfs->seed ^= crc;
875
876 // update with what's found so far
877 besttag = tempbesttag;
878 dir->off = off + lfs_tag_dsize(tag);
879 dir->etag = ptag;
880 dir->count = tempcount;
881 dir->tail[0] = temptail[0];
882 dir->tail[1] = temptail[1];
883 dir->split = tempsplit;
884
885 // reset crc
886 crc = 0xffffffff;
887 continue;
888 }
889
890 // crc the entry first, hopefully leaving it in the cache
891 for (lfs_off_t j = sizeof(tag); j < lfs_tag_dsize(tag); j++) {
892 uint8_t dat;
893 err = lfs_bd_read(lfs,
894 NULL, &lfs->rcache, lfs->cfg->block_size,
895 dir->pair[0], off+j, &dat, 1);
896 if (err) {
897 if (err == LFS_ERR_CORRUPT) {
898 dir->erased = false;
899 break;
900 }
901 return err;
902 }
903
904 crc = lfs_crc(crc, &dat, 1);
905 }
906
907 // directory modification tags?
908 if (lfs_tag_type1(tag) == LFS_TYPE_NAME) {
909 // increase count of files if necessary
910 if (lfs_tag_id(tag) >= tempcount) {
911 tempcount = lfs_tag_id(tag) + 1;
912 }
913 } else if (lfs_tag_type1(tag) == LFS_TYPE_SPLICE) {
914 tempcount += lfs_tag_splice(tag);
915
916 if (tag == (LFS_MKTAG(LFS_TYPE_DELETE, 0, 0) |
917 (LFS_MKTAG(0, 0x3ff, 0) & tempbesttag))) {
918 tempbesttag |= 0x80000000;
919 } else if (tempbesttag != -1 &&
920 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
921 tempbesttag += LFS_MKTAG(0, lfs_tag_splice(tag), 0);
922 }
923 } else if (lfs_tag_type1(tag) == LFS_TYPE_TAIL) {
924 tempsplit = (lfs_tag_chunk(tag) & 1);
925
926 err = lfs_bd_read(lfs,
927 NULL, &lfs->rcache, lfs->cfg->block_size,
928 dir->pair[0], off+sizeof(tag), &temptail, 8);
929 if (err) {
930 if (err == LFS_ERR_CORRUPT) {
931 dir->erased = false;
932 break;
933 }
934 }
935 lfs_pair_fromle32(temptail);
936 }
937
938 // found a match for our fetcher?
939 if ((fmask & tag) == (fmask & ftag)) {
940 int res = cb(data, tag, &(struct lfs_diskoff){
941 dir->pair[0], off+sizeof(tag)});
942 if (res < 0) {
943 if (res == LFS_ERR_CORRUPT) {
944 dir->erased = false;
945 break;
946 }
947 return res;
948 }
949
950 if (res == LFS_CMP_EQ) {
951 // found a match
952 tempbesttag = tag;
953 } else if ((LFS_MKTAG(0x7ff, 0x3ff, 0) & tag) ==
954 (LFS_MKTAG(0x7ff, 0x3ff, 0) & tempbesttag)) {
955 // found an identical tag, but contents didn't match
956 // this must mean that our besttag has been overwritten
957 tempbesttag = -1;
958 } else if (res == LFS_CMP_GT &&
959 lfs_tag_id(tag) <= lfs_tag_id(tempbesttag)) {
960 // found a greater match, keep track to keep things sorted
961 tempbesttag = tag | 0x80000000;
962 }
963 }
964 }
965
966 // consider what we have good enough
967 if (dir->off > 0) {
968 // synthetic move
969 if (lfs_gstate_hasmovehere(&lfs->gdisk, dir->pair)) {
970 if (lfs_tag_id(lfs->gdisk.tag) == lfs_tag_id(besttag)) {
971 besttag |= 0x80000000;
972 } else if (besttag != -1 &&
973 lfs_tag_id(lfs->gdisk.tag) < lfs_tag_id(besttag)) {
974 besttag -= LFS_MKTAG(0, 1, 0);
975 }
976 }
977
978 // found tag? or found best id?
979 if (id) {
980 *id = lfs_min(lfs_tag_id(besttag), dir->count);
981 }
982
983 if (lfs_tag_isvalid(besttag)) {
984 return besttag;
985 } else if (lfs_tag_id(besttag) < dir->count) {
986 return LFS_ERR_NOENT;
987 } else {
988 return 0;
989 }
990 }
991
992 // failed, try the other block?
993 lfs_pair_swap(dir->pair);
994 dir->rev = revs[(r+1)%2];
995 }
996
997 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
998 dir->pair[0], dir->pair[1]);
999 return LFS_ERR_CORRUPT;
1000 }
1001
lfs_dir_fetch(lfs_t * lfs,lfs_mdir_t * dir,const lfs_block_t pair[2])1002 static int lfs_dir_fetch(lfs_t *lfs,
1003 lfs_mdir_t *dir, const lfs_block_t pair[2]) {
1004 // note, mask=-1, tag=-1 can never match a tag since this
1005 // pattern has the invalid bit set
1006 return (int)lfs_dir_fetchmatch(lfs, dir, pair,
1007 (lfs_tag_t)-1, (lfs_tag_t)-1, NULL, NULL, NULL);
1008 }
1009
lfs_dir_getgstate(lfs_t * lfs,const lfs_mdir_t * dir,lfs_gstate_t * gstate)1010 static int lfs_dir_getgstate(lfs_t *lfs, const lfs_mdir_t *dir,
1011 lfs_gstate_t *gstate) {
1012 lfs_gstate_t temp;
1013 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x7ff, 0, 0),
1014 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0, sizeof(temp)), &temp);
1015 if (res < 0 && res != LFS_ERR_NOENT) {
1016 return res;
1017 }
1018
1019 if (res != LFS_ERR_NOENT) {
1020 // xor together to find resulting gstate
1021 lfs_gstate_fromle32(&temp);
1022 lfs_gstate_xor(gstate, &temp);
1023 }
1024
1025 return 0;
1026 }
1027
lfs_dir_getinfo(lfs_t * lfs,lfs_mdir_t * dir,uint16_t id,struct lfs_info * info)1028 static int lfs_dir_getinfo(lfs_t *lfs, lfs_mdir_t *dir,
1029 uint16_t id, struct lfs_info *info) {
1030 if (id == 0x3ff) {
1031 // special case for root
1032 strcpy(info->name, "/");
1033 info->type = LFS_TYPE_DIR;
1034 return 0;
1035 }
1036
1037 lfs_stag_t tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x780, 0x3ff, 0),
1038 LFS_MKTAG(LFS_TYPE_NAME, id, lfs->name_max+1), info->name);
1039 if (tag < 0) {
1040 return (int)tag;
1041 }
1042
1043 info->type = lfs_tag_type3(tag);
1044
1045 struct lfs_ctz ctz;
1046 tag = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1047 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
1048 if (tag < 0) {
1049 return (int)tag;
1050 }
1051 lfs_ctz_fromle32(&ctz);
1052
1053 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
1054 info->size = ctz.size;
1055 } else if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
1056 info->size = lfs_tag_size(tag);
1057 }
1058
1059 return 0;
1060 }
1061
1062 struct lfs_dir_find_match {
1063 lfs_t *lfs;
1064 const void *name;
1065 lfs_size_t size;
1066 };
1067
lfs_dir_find_match(void * data,lfs_tag_t tag,const void * buffer)1068 static int lfs_dir_find_match(void *data,
1069 lfs_tag_t tag, const void *buffer) {
1070 struct lfs_dir_find_match *name = data;
1071 lfs_t *lfs = name->lfs;
1072 const struct lfs_diskoff *disk = buffer;
1073
1074 // compare with disk
1075 lfs_size_t diff = lfs_min(name->size, lfs_tag_size(tag));
1076 int res = lfs_bd_cmp(lfs,
1077 NULL, &lfs->rcache, diff,
1078 disk->block, disk->off, name->name, diff);
1079 if (res != LFS_CMP_EQ) {
1080 return res;
1081 }
1082
1083 // only equal if our size is still the same
1084 if (name->size != lfs_tag_size(tag)) {
1085 return (name->size < lfs_tag_size(tag)) ? LFS_CMP_LT : LFS_CMP_GT;
1086 }
1087
1088 // found a match!
1089 return LFS_CMP_EQ;
1090 }
1091
lfs_dir_find(lfs_t * lfs,lfs_mdir_t * dir,const char ** path,uint16_t * id)1092 static lfs_stag_t lfs_dir_find(lfs_t *lfs, lfs_mdir_t *dir,
1093 const char **path, uint16_t *id) {
1094 // we reduce path to a single name if we can find it
1095 const char *name = *path;
1096 if (id) {
1097 *id = 0x3ff;
1098 }
1099
1100 // default to root dir
1101 lfs_stag_t tag = LFS_MKTAG(LFS_TYPE_DIR, 0x3ff, 0);
1102 dir->tail[0] = lfs->root[0];
1103 dir->tail[1] = lfs->root[1];
1104
1105 while (true) {
1106 nextname:
1107 // skip slashes
1108 name += strspn(name, "/");
1109 lfs_size_t namelen = strcspn(name, "/");
1110
1111 // skip '.' and root '..'
1112 if ((namelen == 1 && memcmp(name, ".", 1) == 0) ||
1113 (namelen == 2 && memcmp(name, "..", 2) == 0)) {
1114 name += namelen;
1115 goto nextname;
1116 }
1117
1118 // skip if matched by '..' in name
1119 const char *suffix = name + namelen;
1120 lfs_size_t sufflen;
1121 int depth = 1;
1122 while (true) {
1123 suffix += strspn(suffix, "/");
1124 sufflen = strcspn(suffix, "/");
1125 if (sufflen == 0) {
1126 break;
1127 }
1128
1129 if (sufflen == 2 && memcmp(suffix, "..", 2) == 0) {
1130 depth -= 1;
1131 if (depth == 0) {
1132 name = suffix + sufflen;
1133 goto nextname;
1134 }
1135 } else {
1136 depth += 1;
1137 }
1138
1139 suffix += sufflen;
1140 }
1141
1142 // found path
1143 if (name[0] == '\0') {
1144 return tag;
1145 }
1146
1147 // update what we've found so far
1148 *path = name;
1149
1150 // only continue if we hit a directory
1151 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
1152 return LFS_ERR_NOTDIR;
1153 }
1154
1155 // grab the entry data
1156 if (lfs_tag_id(tag) != 0x3ff) {
1157 lfs_stag_t res = lfs_dir_get(lfs, dir, LFS_MKTAG(0x700, 0x3ff, 0),
1158 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), dir->tail);
1159 if (res < 0) {
1160 return res;
1161 }
1162 lfs_pair_fromle32(dir->tail);
1163 }
1164
1165 // find entry matching name
1166 while (true) {
1167 tag = lfs_dir_fetchmatch(lfs, dir, dir->tail,
1168 LFS_MKTAG(0x780, 0, 0),
1169 LFS_MKTAG(LFS_TYPE_NAME, 0, namelen),
1170 // are we last name?
1171 (strchr(name, '/') == NULL) ? id : NULL,
1172 lfs_dir_find_match, &(struct lfs_dir_find_match){
1173 lfs, name, namelen});
1174 if (tag < 0) {
1175 return tag;
1176 }
1177
1178 if (tag) {
1179 break;
1180 }
1181
1182 if (!dir->split) {
1183 return LFS_ERR_NOENT;
1184 }
1185 }
1186
1187 // to next name
1188 name += namelen;
1189 }
1190 }
1191
1192 // commit logic
1193 struct lfs_commit {
1194 lfs_block_t block;
1195 lfs_off_t off;
1196 lfs_tag_t ptag;
1197 uint32_t crc;
1198
1199 lfs_off_t begin;
1200 lfs_off_t end;
1201 };
1202
lfs_dir_commitprog(lfs_t * lfs,struct lfs_commit * commit,const void * buffer,lfs_size_t size)1203 static int lfs_dir_commitprog(lfs_t *lfs, struct lfs_commit *commit,
1204 const void *buffer, lfs_size_t size) {
1205 int err = lfs_bd_prog(lfs,
1206 &lfs->pcache, &lfs->rcache, false,
1207 commit->block, commit->off ,
1208 (const uint8_t*)buffer, size);
1209 if (err) {
1210 return err;
1211 }
1212
1213 commit->crc = lfs_crc(commit->crc, buffer, size);
1214 commit->off += size;
1215 return 0;
1216 }
1217
lfs_dir_commitattr(lfs_t * lfs,struct lfs_commit * commit,lfs_tag_t tag,const void * buffer)1218 static int lfs_dir_commitattr(lfs_t *lfs, struct lfs_commit *commit,
1219 lfs_tag_t tag, const void *buffer) {
1220 // check if we fit
1221 lfs_size_t dsize = lfs_tag_dsize(tag);
1222 if (commit->off + dsize > commit->end) {
1223 return LFS_ERR_NOSPC;
1224 }
1225
1226 // write out tag
1227 lfs_tag_t ntag = lfs_tobe32((tag & 0x7fffffff) ^ commit->ptag);
1228 int err = lfs_dir_commitprog(lfs, commit, &ntag, sizeof(ntag));
1229 if (err) {
1230 return err;
1231 }
1232
1233 if (!(tag & 0x80000000)) {
1234 // from memory
1235 err = lfs_dir_commitprog(lfs, commit, buffer, dsize-sizeof(tag));
1236 if (err) {
1237 return err;
1238 }
1239 } else {
1240 // from disk
1241 const struct lfs_diskoff *disk = buffer;
1242 for (lfs_off_t i = 0; i < dsize-sizeof(tag); i++) {
1243 // rely on caching to make this efficient
1244 uint8_t dat;
1245 err = lfs_bd_read(lfs,
1246 NULL, &lfs->rcache, dsize-sizeof(tag)-i,
1247 disk->block, disk->off+i, &dat, 1);
1248 if (err) {
1249 return err;
1250 }
1251
1252 err = lfs_dir_commitprog(lfs, commit, &dat, 1);
1253 if (err) {
1254 return err;
1255 }
1256 }
1257 }
1258
1259 commit->ptag = tag & 0x7fffffff;
1260 return 0;
1261 }
1262
lfs_dir_commitcrc(lfs_t * lfs,struct lfs_commit * commit)1263 static int lfs_dir_commitcrc(lfs_t *lfs, struct lfs_commit *commit) {
1264 const lfs_off_t off1 = commit->off;
1265 const uint32_t crc1 = commit->crc;
1266 // align to program units
1267 const lfs_off_t end = lfs_alignup(off1 + 2*sizeof(uint32_t),
1268 lfs->cfg->prog_size);
1269
1270 // create crc tags to fill up remainder of commit, note that
1271 // padding is not crced, which lets fetches skip padding but
1272 // makes committing a bit more complicated
1273 while (commit->off < end) {
1274 lfs_off_t off = commit->off + sizeof(lfs_tag_t);
1275 lfs_off_t noff = lfs_min(end - off, 0x3fe) + off;
1276 if (noff < end) {
1277 noff = lfs_min(noff, end - 2*sizeof(uint32_t));
1278 }
1279
1280 // read erased state from next program unit
1281 lfs_tag_t tag = 0xffffffff;
1282 int err = lfs_bd_read(lfs,
1283 NULL, &lfs->rcache, sizeof(tag),
1284 commit->block, noff, &tag, sizeof(tag));
1285 if (err && err != LFS_ERR_CORRUPT) {
1286 return err;
1287 }
1288
1289 // build crc tag
1290 bool reset = ~lfs_frombe32(tag) >> 31;
1291 tag = LFS_MKTAG(LFS_TYPE_CRC + reset, 0x3ff, noff - off);
1292
1293 // write out crc
1294 uint32_t footer[2];
1295 footer[0] = lfs_tobe32(tag ^ commit->ptag);
1296 commit->crc = lfs_crc(commit->crc, &footer[0], sizeof(footer[0]));
1297 footer[1] = lfs_tole32(commit->crc);
1298 err = lfs_bd_prog(lfs,
1299 &lfs->pcache, &lfs->rcache, false,
1300 commit->block, commit->off, &footer, sizeof(footer));
1301 if (err) {
1302 return err;
1303 }
1304
1305 commit->off += sizeof(tag)+lfs_tag_size(tag);
1306 commit->ptag = tag ^ ((lfs_tag_t)reset << 31);
1307 commit->crc = 0xffffffff; // reset crc for next "commit"
1308 }
1309
1310 // flush buffers
1311 int err = lfs_bd_sync(lfs, &lfs->pcache, &lfs->rcache, false);
1312 if (err) {
1313 return err;
1314 }
1315
1316 // successful commit, check checksums to make sure
1317 lfs_off_t off = commit->begin;
1318 lfs_off_t noff = off1 + sizeof(uint32_t);
1319 while (off < end) {
1320 uint32_t crc = 0xffffffff;
1321 for (lfs_off_t i = off; i < noff+sizeof(uint32_t); i++) {
1322 // check against written crc, may catch blocks that
1323 // become readonly and match our commit size exactly
1324 if (i == off1 && crc != crc1) {
1325 return LFS_ERR_CORRUPT;
1326 }
1327
1328 // leave it up to caching to make this efficient
1329 uint8_t dat;
1330 err = lfs_bd_read(lfs,
1331 NULL, &lfs->rcache, noff+sizeof(uint32_t)-i,
1332 commit->block, i, &dat, 1);
1333 if (err) {
1334 return err;
1335 }
1336
1337 crc = lfs_crc(crc, &dat, 1);
1338 }
1339
1340 // detected write error?
1341 if (crc != 0) {
1342 return LFS_ERR_CORRUPT;
1343 }
1344
1345 // skip padding
1346 off = lfs_min(end - noff, 0x3fe) + noff;
1347 if (off < end) {
1348 off = lfs_min(off, end - 2*sizeof(uint32_t));
1349 }
1350 noff = off + sizeof(uint32_t);
1351 }
1352
1353 return 0;
1354 }
1355
lfs_dir_alloc(lfs_t * lfs,lfs_mdir_t * dir)1356 static int lfs_dir_alloc(lfs_t *lfs, lfs_mdir_t *dir) {
1357 // allocate pair of dir blocks (backwards, so we write block 1 first)
1358 for (int i = 0; i < 2; i++) {
1359 int err = lfs_alloc(lfs, &dir->pair[(i+1)%2]);
1360 if (err) {
1361 return err;
1362 }
1363 }
1364
1365 // zero for reproducability in case initial block is unreadable
1366 dir->rev = 0;
1367
1368 // rather than clobbering one of the blocks we just pretend
1369 // the revision may be valid
1370 int err = lfs_bd_read(lfs,
1371 NULL, &lfs->rcache, sizeof(dir->rev),
1372 dir->pair[0], 0, &dir->rev, sizeof(dir->rev));
1373 dir->rev = lfs_fromle32(dir->rev);
1374 if (err && err != LFS_ERR_CORRUPT) {
1375 return err;
1376 }
1377
1378 // make sure we don't immediately evict
1379 dir->rev += dir->rev & 1;
1380
1381 // set defaults
1382 dir->off = sizeof(dir->rev);
1383 dir->etag = 0xffffffff;
1384 dir->count = 0;
1385 dir->tail[0] = LFS_BLOCK_NULL;
1386 dir->tail[1] = LFS_BLOCK_NULL;
1387 dir->erased = false;
1388 dir->split = false;
1389
1390 // don't write out yet, let caller take care of that
1391 return 0;
1392 }
1393
lfs_dir_drop(lfs_t * lfs,lfs_mdir_t * dir,lfs_mdir_t * tail)1394 static int lfs_dir_drop(lfs_t *lfs, lfs_mdir_t *dir, lfs_mdir_t *tail) {
1395 // steal state
1396 int err = lfs_dir_getgstate(lfs, tail, &lfs->gdelta);
1397 if (err) {
1398 return err;
1399 }
1400
1401 // steal tail
1402 lfs_pair_tole32(tail->tail);
1403 err = lfs_dir_commit(lfs, dir, LFS_MKATTRS(
1404 {LFS_MKTAG(LFS_TYPE_TAIL + tail->split, 0x3ff, 8), tail->tail}));
1405 lfs_pair_fromle32(tail->tail);
1406 if (err) {
1407 return err;
1408 }
1409
1410 return 0;
1411 }
1412
lfs_dir_split(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t split,uint16_t end)1413 static int lfs_dir_split(lfs_t *lfs,
1414 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1415 lfs_mdir_t *source, uint16_t split, uint16_t end) {
1416 // create tail directory
1417 lfs_alloc_ack(lfs);
1418 lfs_mdir_t tail;
1419 int err = lfs_dir_alloc(lfs, &tail);
1420 if (err) {
1421 return err;
1422 }
1423
1424 tail.split = dir->split;
1425 tail.tail[0] = dir->tail[0];
1426 tail.tail[1] = dir->tail[1];
1427
1428 err = lfs_dir_compact(lfs, &tail, attrs, attrcount, source, split, end);
1429 if (err) {
1430 return err;
1431 }
1432
1433 dir->tail[0] = tail.pair[0];
1434 dir->tail[1] = tail.pair[1];
1435 dir->split = true;
1436
1437 // update root if needed
1438 if (lfs_pair_cmp(dir->pair, lfs->root) == 0 && split == 0) {
1439 lfs->root[0] = tail.pair[0];
1440 lfs->root[1] = tail.pair[1];
1441 }
1442
1443 return 0;
1444 }
1445
lfs_dir_commit_size(void * p,lfs_tag_t tag,const void * buffer)1446 static int lfs_dir_commit_size(void *p, lfs_tag_t tag, const void *buffer) {
1447 lfs_size_t *size = p;
1448 (void)buffer;
1449
1450 *size += lfs_tag_dsize(tag);
1451 return 0;
1452 }
1453
1454 struct lfs_dir_commit_commit {
1455 lfs_t *lfs;
1456 struct lfs_commit *commit;
1457 };
1458
lfs_dir_commit_commit(void * p,lfs_tag_t tag,const void * buffer)1459 static int lfs_dir_commit_commit(void *p, lfs_tag_t tag, const void *buffer) {
1460 struct lfs_dir_commit_commit *commit = p;
1461 return lfs_dir_commitattr(commit->lfs, commit->commit, tag, buffer);
1462 }
1463
lfs_dir_compact(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount,lfs_mdir_t * source,uint16_t begin,uint16_t end)1464 static int lfs_dir_compact(lfs_t *lfs,
1465 lfs_mdir_t *dir, const struct lfs_mattr *attrs, int attrcount,
1466 lfs_mdir_t *source, uint16_t begin, uint16_t end) {
1467 // save some state in case block is bad
1468 const lfs_block_t oldpair[2] = {dir->pair[0], dir->pair[1]};
1469 bool relocated = false;
1470 bool tired = false;
1471
1472 // should we split?
1473 while (end - begin > 1) {
1474 // find size
1475 lfs_size_t size = 0;
1476 int err = lfs_dir_traverse(lfs,
1477 source, 0, 0xffffffff, attrs, attrcount,
1478 LFS_MKTAG(0x400, 0x3ff, 0),
1479 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1480 begin, end, -begin,
1481 lfs_dir_commit_size, &size);
1482 if (err) {
1483 return err;
1484 }
1485
1486 // space is complicated, we need room for tail, crc, gstate,
1487 // cleanup delete, and we cap at half a block to give room
1488 // for metadata updates.
1489 if (end - begin < 0xff &&
1490 size <= lfs_min(lfs->cfg->block_size - 36,
1491 lfs_alignup(lfs->cfg->block_size/2,
1492 lfs->cfg->prog_size))) {
1493 break;
1494 }
1495
1496 // can't fit, need to split, we should really be finding the
1497 // largest size that fits with a small binary search, but right now
1498 // it's not worth the code size
1499 uint16_t split = (end - begin) / 2;
1500 err = lfs_dir_split(lfs, dir, attrs, attrcount,
1501 source, begin+split, end);
1502 if (err) {
1503 // if we fail to split, we may be able to overcompact, unless
1504 // we're too big for even the full block, in which case our
1505 // only option is to error
1506 if (err == LFS_ERR_NOSPC && size <= lfs->cfg->block_size - 36) {
1507 break;
1508 }
1509 return err;
1510 }
1511
1512 end = begin + split;
1513 }
1514
1515 // increment revision count
1516 dir->rev += 1;
1517 // If our revision count == n * block_cycles, we should force a relocation,
1518 // this is how littlefs wear-levels at the metadata-pair level. Note that we
1519 // actually use (block_cycles+1)|1, this is to avoid two corner cases:
1520 // 1. block_cycles = 1, which would prevent relocations from terminating
1521 // 2. block_cycles = 2n, which, due to aliasing, would only ever relocate
1522 // one metadata block in the pair, effectively making this useless
1523 if (lfs->cfg->block_cycles > 0 &&
1524 (dir->rev % ((lfs->cfg->block_cycles+1)|1) == 0)) {
1525 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1526 // oh no! we're writing too much to the superblock,
1527 // should we expand?
1528 lfs_ssize_t res = lfs_fs_size(lfs);
1529 if (res < 0) {
1530 return res;
1531 }
1532
1533 // do we have extra space? littlefs can't reclaim this space
1534 // by itself, so expand cautiously
1535 if ((lfs_size_t)res < lfs->cfg->block_count/2) {
1536 LFS_DEBUG("Expanding superblock at rev %"PRIu32, dir->rev);
1537 int err = lfs_dir_split(lfs, dir, attrs, attrcount,
1538 source, begin, end);
1539 if (err && err != LFS_ERR_NOSPC) {
1540 return err;
1541 }
1542
1543 // welp, we tried, if we ran out of space there's not much
1544 // we can do, we'll error later if we've become frozen
1545 if (!err) {
1546 end = begin;
1547 }
1548 }
1549 #ifdef LFS_MIGRATE
1550 } else if (lfs->lfs1) {
1551 // do not proactively relocate blocks during migrations, this
1552 // can cause a number of failure states such: clobbering the
1553 // v1 superblock if we relocate root, and invalidating directory
1554 // pointers if we relocate the head of a directory. On top of
1555 // this, relocations increase the overall complexity of
1556 // lfs_migration, which is already a delicate operation.
1557 #endif
1558 } else {
1559 // we're writing too much, time to relocate
1560 tired = true;
1561 goto relocate;
1562 }
1563 }
1564
1565 // begin loop to commit compaction to blocks until a compact sticks
1566 while (true) {
1567 {
1568 // setup commit state
1569 struct lfs_commit commit = {
1570 .block = dir->pair[1],
1571 .off = 0,
1572 .ptag = 0xffffffff,
1573 .crc = 0xffffffff,
1574
1575 .begin = 0,
1576 .end = lfs->cfg->block_size - 8,
1577 };
1578
1579 // erase block to write to
1580 int err = lfs_bd_erase(lfs, dir->pair[1]);
1581 if (err) {
1582 if (err == LFS_ERR_CORRUPT) {
1583 goto relocate;
1584 }
1585 return err;
1586 }
1587
1588 // write out header
1589 dir->rev = lfs_tole32(dir->rev);
1590 err = lfs_dir_commitprog(lfs, &commit,
1591 &dir->rev, sizeof(dir->rev));
1592 dir->rev = lfs_fromle32(dir->rev);
1593 if (err) {
1594 if (err == LFS_ERR_CORRUPT) {
1595 goto relocate;
1596 }
1597 return err;
1598 }
1599
1600 // traverse the directory, this time writing out all unique tags
1601 err = lfs_dir_traverse(lfs,
1602 source, 0, 0xffffffff, attrs, attrcount,
1603 LFS_MKTAG(0x400, 0x3ff, 0),
1604 LFS_MKTAG(LFS_TYPE_NAME, 0, 0),
1605 begin, end, -begin,
1606 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1607 lfs, &commit});
1608 if (err) {
1609 if (err == LFS_ERR_CORRUPT) {
1610 goto relocate;
1611 }
1612 return err;
1613 }
1614
1615 // commit tail, which may be new after last size check
1616 if (!lfs_pair_isnull(dir->tail)) {
1617 lfs_pair_tole32(dir->tail);
1618 err = lfs_dir_commitattr(lfs, &commit,
1619 LFS_MKTAG(LFS_TYPE_TAIL + dir->split, 0x3ff, 8),
1620 dir->tail);
1621 lfs_pair_fromle32(dir->tail);
1622 if (err) {
1623 if (err == LFS_ERR_CORRUPT) {
1624 goto relocate;
1625 }
1626 return err;
1627 }
1628 }
1629
1630 // bring over gstate?
1631 lfs_gstate_t delta = {0};
1632 if (!relocated) {
1633 lfs_gstate_xor(&delta, &lfs->gdisk);
1634 lfs_gstate_xor(&delta, &lfs->gstate);
1635 }
1636 lfs_gstate_xor(&delta, &lfs->gdelta);
1637 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1638
1639 err = lfs_dir_getgstate(lfs, dir, &delta);
1640 if (err) {
1641 return err;
1642 }
1643
1644 if (!lfs_gstate_iszero(&delta)) {
1645 lfs_gstate_tole32(&delta);
1646 err = lfs_dir_commitattr(lfs, &commit,
1647 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1648 sizeof(delta)), &delta);
1649 if (err) {
1650 if (err == LFS_ERR_CORRUPT) {
1651 goto relocate;
1652 }
1653 return err;
1654 }
1655 }
1656
1657 // complete commit with crc
1658 err = lfs_dir_commitcrc(lfs, &commit);
1659 if (err) {
1660 if (err == LFS_ERR_CORRUPT) {
1661 goto relocate;
1662 }
1663 return err;
1664 }
1665
1666 // successful compaction, swap dir pair to indicate most recent
1667 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1668 lfs_pair_swap(dir->pair);
1669 dir->count = end - begin;
1670 dir->off = commit.off;
1671 dir->etag = commit.ptag;
1672 // update gstate
1673 lfs->gdelta = (lfs_gstate_t){0};
1674 if (!relocated) {
1675 lfs->gdisk = lfs->gstate;
1676 }
1677 }
1678 break;
1679
1680 relocate:
1681 // commit was corrupted, drop caches and prepare to relocate block
1682 relocated = true;
1683 lfs_cache_drop(lfs, &lfs->pcache);
1684 if (!tired) {
1685 LFS_DEBUG("Bad block at 0x%"PRIx32, dir->pair[1]);
1686 }
1687
1688 // can't relocate superblock, filesystem is now frozen
1689 if (lfs_pair_cmp(dir->pair, (const lfs_block_t[2]){0, 1}) == 0) {
1690 LFS_WARN("Superblock 0x%"PRIx32" has become unwritable",
1691 dir->pair[1]);
1692 return LFS_ERR_NOSPC;
1693 }
1694
1695 // relocate half of pair
1696 int err = lfs_alloc(lfs, &dir->pair[1]);
1697 if (err && (err != LFS_ERR_NOSPC || !tired)) {
1698 return err;
1699 }
1700
1701 tired = false;
1702 continue;
1703 }
1704
1705 if (relocated) {
1706 // update references if we relocated
1707 LFS_DEBUG("Relocating {0x%"PRIx32", 0x%"PRIx32"} "
1708 "-> {0x%"PRIx32", 0x%"PRIx32"}",
1709 oldpair[0], oldpair[1], dir->pair[0], dir->pair[1]);
1710 int err = lfs_fs_relocate(lfs, oldpair, dir->pair);
1711 if (err) {
1712 return err;
1713 }
1714 }
1715
1716 return 0;
1717 }
1718
lfs_dir_commit(lfs_t * lfs,lfs_mdir_t * dir,const struct lfs_mattr * attrs,int attrcount)1719 static int lfs_dir_commit(lfs_t *lfs, lfs_mdir_t *dir,
1720 const struct lfs_mattr *attrs, int attrcount) {
1721 // check for any inline files that aren't RAM backed and
1722 // forcefully evict them, needed for filesystem consistency
1723 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
1724 if (dir != &f->m && lfs_pair_cmp(f->m.pair, dir->pair) == 0 &&
1725 f->type == LFS_TYPE_REG && (f->flags & LFS_F_INLINE) &&
1726 f->ctz.size > lfs->cfg->cache_size) {
1727 int err = lfs_file_outline(lfs, f);
1728 if (err) {
1729 return err;
1730 }
1731
1732 err = lfs_file_flush(lfs, f);
1733 if (err) {
1734 return err;
1735 }
1736 }
1737 }
1738
1739 // calculate changes to the directory
1740 lfs_mdir_t olddir = *dir;
1741 bool hasdelete = false;
1742 for (int i = 0; i < attrcount; i++) {
1743 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE) {
1744 dir->count += 1;
1745 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE) {
1746 LFS_ASSERT(dir->count > 0);
1747 dir->count -= 1;
1748 hasdelete = true;
1749 } else if (lfs_tag_type1(attrs[i].tag) == LFS_TYPE_TAIL) {
1750 dir->tail[0] = ((lfs_block_t*)attrs[i].buffer)[0];
1751 dir->tail[1] = ((lfs_block_t*)attrs[i].buffer)[1];
1752 dir->split = (lfs_tag_chunk(attrs[i].tag) & 1);
1753 lfs_pair_fromle32(dir->tail);
1754 }
1755 }
1756
1757 // should we actually drop the directory block?
1758 if (hasdelete && dir->count == 0) {
1759 lfs_mdir_t pdir;
1760 int err = lfs_fs_pred(lfs, dir->pair, &pdir);
1761 if (err && err != LFS_ERR_NOENT) {
1762 *dir = olddir;
1763 return err;
1764 }
1765
1766 if (err != LFS_ERR_NOENT && pdir.split) {
1767 err = lfs_dir_drop(lfs, &pdir, dir);
1768 if (err) {
1769 *dir = olddir;
1770 return err;
1771 }
1772 }
1773 }
1774
1775 if (dir->erased || dir->count >= 0xff) {
1776 // try to commit
1777 struct lfs_commit commit = {
1778 .block = dir->pair[0],
1779 .off = dir->off,
1780 .ptag = dir->etag,
1781 .crc = 0xffffffff,
1782
1783 .begin = dir->off,
1784 .end = lfs->cfg->block_size - 8,
1785 };
1786
1787 // traverse attrs that need to be written out
1788 lfs_pair_tole32(dir->tail);
1789 int err = lfs_dir_traverse(lfs,
1790 dir, dir->off, dir->etag, attrs, attrcount,
1791 0, 0, 0, 0, 0,
1792 lfs_dir_commit_commit, &(struct lfs_dir_commit_commit){
1793 lfs, &commit});
1794 lfs_pair_fromle32(dir->tail);
1795 if (err) {
1796 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1797 goto compact;
1798 }
1799 *dir = olddir;
1800 return err;
1801 }
1802
1803 // commit any global diffs if we have any
1804 lfs_gstate_t delta = {0};
1805 lfs_gstate_xor(&delta, &lfs->gstate);
1806 lfs_gstate_xor(&delta, &lfs->gdisk);
1807 lfs_gstate_xor(&delta, &lfs->gdelta);
1808 delta.tag &= ~LFS_MKTAG(0, 0, 0x3ff);
1809 if (!lfs_gstate_iszero(&delta)) {
1810 err = lfs_dir_getgstate(lfs, dir, &delta);
1811 if (err) {
1812 *dir = olddir;
1813 return err;
1814 }
1815
1816 lfs_gstate_tole32(&delta);
1817 err = lfs_dir_commitattr(lfs, &commit,
1818 LFS_MKTAG(LFS_TYPE_MOVESTATE, 0x3ff,
1819 sizeof(delta)), &delta);
1820 if (err) {
1821 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1822 goto compact;
1823 }
1824 *dir = olddir;
1825 return err;
1826 }
1827 }
1828
1829 // finalize commit with the crc
1830 err = lfs_dir_commitcrc(lfs, &commit);
1831 if (err) {
1832 if (err == LFS_ERR_NOSPC || err == LFS_ERR_CORRUPT) {
1833 goto compact;
1834 }
1835 *dir = olddir;
1836 return err;
1837 }
1838
1839 // successful commit, update dir
1840 LFS_ASSERT(commit.off % lfs->cfg->prog_size == 0);
1841 dir->off = commit.off;
1842 dir->etag = commit.ptag;
1843 // and update gstate
1844 lfs->gdisk = lfs->gstate;
1845 lfs->gdelta = (lfs_gstate_t){0};
1846 } else {
1847 compact:
1848 // fall back to compaction
1849 lfs_cache_drop(lfs, &lfs->pcache);
1850
1851 int err = lfs_dir_compact(lfs, dir, attrs, attrcount,
1852 dir, 0, dir->count);
1853 if (err) {
1854 *dir = olddir;
1855 return err;
1856 }
1857 }
1858
1859 // this complicated bit of logic is for fixing up any active
1860 // metadata-pairs that we may have affected
1861 //
1862 // note we have to make two passes since the mdir passed to
1863 // lfs_dir_commit could also be in this list, and even then
1864 // we need to copy the pair so they don't get clobbered if we refetch
1865 // our mdir.
1866 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
1867 if (&d->m != dir && lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
1868 d->m = *dir;
1869 for (int i = 0; i < attrcount; i++) {
1870 if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
1871 d->id == lfs_tag_id(attrs[i].tag)) {
1872 d->m.pair[0] = LFS_BLOCK_NULL;
1873 d->m.pair[1] = LFS_BLOCK_NULL;
1874 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_DELETE &&
1875 d->id > lfs_tag_id(attrs[i].tag)) {
1876 d->id -= 1;
1877 if (d->type == LFS_TYPE_DIR) {
1878 ((lfs_dir_t*)d)->pos -= 1;
1879 }
1880 } else if (lfs_tag_type3(attrs[i].tag) == LFS_TYPE_CREATE &&
1881 d->id >= lfs_tag_id(attrs[i].tag)) {
1882 d->id += 1;
1883 if (d->type == LFS_TYPE_DIR) {
1884 ((lfs_dir_t*)d)->pos += 1;
1885 }
1886 }
1887 }
1888 }
1889 }
1890
1891 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
1892 if (lfs_pair_cmp(d->m.pair, olddir.pair) == 0) {
1893 while (d->id >= d->m.count && d->m.split) {
1894 // we split and id is on tail now
1895 d->id -= d->m.count;
1896 int err = lfs_dir_fetch(lfs, &d->m, d->m.tail);
1897 if (err) {
1898 return err;
1899 }
1900 }
1901 }
1902 }
1903
1904 return 0;
1905 }
1906
1907
1908 /// Top level directory operations ///
lfs_mkdir(lfs_t * lfs,const char * path)1909 int lfs_mkdir(lfs_t *lfs, const char *path) {
1910 LFS_TRACE("lfs_mkdir(%p, \"%s\")", (void*)lfs, path);
1911 // deorphan if we haven't yet, needed at most once after poweron
1912 int err = lfs_fs_forceconsistency(lfs);
1913 if (err) {
1914 LFS_TRACE("lfs_mkdir -> %d", err);
1915 return err;
1916 }
1917
1918 struct lfs_mlist cwd;
1919 cwd.next = lfs->mlist;
1920 uint16_t id;
1921 err = lfs_dir_find(lfs, &cwd.m, &path, &id);
1922 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
1923 LFS_TRACE("lfs_mkdir -> %d", (err < 0) ? err : LFS_ERR_EXIST);
1924 return (err < 0) ? err : LFS_ERR_EXIST;
1925 }
1926
1927 // check that name fits
1928 lfs_size_t nlen = strlen(path);
1929 if (nlen > lfs->name_max) {
1930 LFS_TRACE("lfs_mkdir -> %d", LFS_ERR_NAMETOOLONG);
1931 return LFS_ERR_NAMETOOLONG;
1932 }
1933
1934 // build up new directory
1935 lfs_alloc_ack(lfs);
1936 lfs_mdir_t dir;
1937 err = lfs_dir_alloc(lfs, &dir);
1938 if (err) {
1939 LFS_TRACE("lfs_mkdir -> %d", err);
1940 return err;
1941 }
1942
1943 // find end of list
1944 lfs_mdir_t pred = cwd.m;
1945 while (pred.split) {
1946 err = lfs_dir_fetch(lfs, &pred, pred.tail);
1947 if (err) {
1948 LFS_TRACE("lfs_mkdir -> %d", err);
1949 return err;
1950 }
1951 }
1952
1953 // setup dir
1954 lfs_pair_tole32(pred.tail);
1955 err = lfs_dir_commit(lfs, &dir, LFS_MKATTRS(
1956 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pred.tail}));
1957 lfs_pair_fromle32(pred.tail);
1958 if (err) {
1959 LFS_TRACE("lfs_mkdir -> %d", err);
1960 return err;
1961 }
1962
1963 // current block end of list?
1964 if (cwd.m.split) {
1965 // update tails, this creates a desync
1966 lfs_fs_preporphans(lfs, +1);
1967
1968 // it's possible our predecessor has to be relocated, and if
1969 // our parent is our predecessor's predecessor, this could have
1970 // caused our parent to go out of date, fortunately we can hook
1971 // ourselves into littlefs to catch this
1972 cwd.type = 0;
1973 cwd.id = 0;
1974 lfs->mlist = &cwd;
1975
1976 lfs_pair_tole32(dir.pair);
1977 err = lfs_dir_commit(lfs, &pred, LFS_MKATTRS(
1978 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
1979 lfs_pair_fromle32(dir.pair);
1980 if (err) {
1981 lfs->mlist = cwd.next;
1982 LFS_TRACE("lfs_mkdir -> %d", err);
1983 return err;
1984 }
1985
1986 lfs->mlist = cwd.next;
1987 lfs_fs_preporphans(lfs, -1);
1988 }
1989
1990 // now insert into our parent block
1991 lfs_pair_tole32(dir.pair);
1992 err = lfs_dir_commit(lfs, &cwd.m, LFS_MKATTRS(
1993 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0), NULL},
1994 {LFS_MKTAG(LFS_TYPE_DIR, id, nlen), path},
1995 {LFS_MKTAG(LFS_TYPE_DIRSTRUCT, id, 8), dir.pair},
1996 {LFS_MKTAG_IF(!cwd.m.split,
1997 LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir.pair}));
1998 lfs_pair_fromle32(dir.pair);
1999 if (err) {
2000 LFS_TRACE("lfs_mkdir -> %d", err);
2001 return err;
2002 }
2003
2004 LFS_TRACE("lfs_mkdir -> %d", 0);
2005 return 0;
2006 }
2007
lfs_dir_open(lfs_t * lfs,lfs_dir_t * dir,const char * path)2008 int lfs_dir_open(lfs_t *lfs, lfs_dir_t *dir, const char *path) {
2009 LFS_TRACE("lfs_dir_open(%p, %p, \"%s\")", (void*)lfs, (void*)dir, path);
2010 lfs_stag_t tag = lfs_dir_find(lfs, &dir->m, &path, NULL);
2011 if (tag < 0) {
2012 LFS_TRACE("lfs_dir_open -> %"PRId32, tag);
2013 return tag;
2014 }
2015
2016 if (lfs_tag_type3(tag) != LFS_TYPE_DIR) {
2017 LFS_TRACE("lfs_dir_open -> %d", LFS_ERR_NOTDIR);
2018 return LFS_ERR_NOTDIR;
2019 }
2020
2021 lfs_block_t pair[2];
2022 if (lfs_tag_id(tag) == 0x3ff) {
2023 // handle root dir separately
2024 pair[0] = lfs->root[0];
2025 pair[1] = lfs->root[1];
2026 } else {
2027 // get dir pair from parent
2028 lfs_stag_t res = lfs_dir_get(lfs, &dir->m, LFS_MKTAG(0x700, 0x3ff, 0),
2029 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
2030 if (res < 0) {
2031 LFS_TRACE("lfs_dir_open -> %"PRId32, res);
2032 return res;
2033 }
2034 lfs_pair_fromle32(pair);
2035 }
2036
2037 // fetch first pair
2038 int err = lfs_dir_fetch(lfs, &dir->m, pair);
2039 if (err) {
2040 LFS_TRACE("lfs_dir_open -> %d", err);
2041 return err;
2042 }
2043
2044 // setup entry
2045 dir->head[0] = dir->m.pair[0];
2046 dir->head[1] = dir->m.pair[1];
2047 dir->id = 0;
2048 dir->pos = 0;
2049
2050 // add to list of mdirs
2051 dir->type = LFS_TYPE_DIR;
2052 dir->next = (lfs_dir_t*)lfs->mlist;
2053 lfs->mlist = (struct lfs_mlist*)dir;
2054
2055 LFS_TRACE("lfs_dir_open -> %d", 0);
2056 return 0;
2057 }
2058
lfs_dir_close(lfs_t * lfs,lfs_dir_t * dir)2059 int lfs_dir_close(lfs_t *lfs, lfs_dir_t *dir) {
2060 LFS_TRACE("lfs_dir_close(%p, %p)", (void*)lfs, (void*)dir);
2061 // remove from list of mdirs
2062 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
2063 if (*p == (struct lfs_mlist*)dir) {
2064 *p = (*p)->next;
2065 break;
2066 }
2067 }
2068
2069 LFS_TRACE("lfs_dir_close -> %d", 0);
2070 return 0;
2071 }
2072
lfs_dir_read(lfs_t * lfs,lfs_dir_t * dir,struct lfs_info * info)2073 int lfs_dir_read(lfs_t *lfs, lfs_dir_t *dir, struct lfs_info *info) {
2074 LFS_TRACE("lfs_dir_read(%p, %p, %p)",
2075 (void*)lfs, (void*)dir, (void*)info);
2076 memset(info, 0, sizeof(*info));
2077
2078 // special offset for '.' and '..'
2079 if (dir->pos == 0) {
2080 info->type = LFS_TYPE_DIR;
2081 strcpy(info->name, ".");
2082 dir->pos += 1;
2083 LFS_TRACE("lfs_dir_read -> %d", true);
2084 return true;
2085 } else if (dir->pos == 1) {
2086 info->type = LFS_TYPE_DIR;
2087 strcpy(info->name, "..");
2088 dir->pos += 1;
2089 LFS_TRACE("lfs_dir_read -> %d", true);
2090 return true;
2091 }
2092
2093 while (true) {
2094 if (dir->id == dir->m.count) {
2095 if (!dir->m.split) {
2096 LFS_TRACE("lfs_dir_read -> %d", false);
2097 return false;
2098 }
2099
2100 int err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2101 if (err) {
2102 LFS_TRACE("lfs_dir_read -> %d", err);
2103 return err;
2104 }
2105
2106 dir->id = 0;
2107 }
2108
2109 int err = lfs_dir_getinfo(lfs, &dir->m, dir->id, info);
2110 if (err && err != LFS_ERR_NOENT) {
2111 LFS_TRACE("lfs_dir_read -> %d", err);
2112 return err;
2113 }
2114
2115 dir->id += 1;
2116 if (err != LFS_ERR_NOENT) {
2117 break;
2118 }
2119 }
2120
2121 dir->pos += 1;
2122 LFS_TRACE("lfs_dir_read -> %d", true);
2123 return true;
2124 }
2125
lfs_dir_seek(lfs_t * lfs,lfs_dir_t * dir,lfs_off_t off)2126 int lfs_dir_seek(lfs_t *lfs, lfs_dir_t *dir, lfs_off_t off) {
2127 LFS_TRACE("lfs_dir_seek(%p, %p, %"PRIu32")",
2128 (void*)lfs, (void*)dir, off);
2129 // simply walk from head dir
2130 int err = lfs_dir_rewind(lfs, dir);
2131 if (err) {
2132 LFS_TRACE("lfs_dir_seek -> %d", err);
2133 return err;
2134 }
2135
2136 // first two for ./..
2137 dir->pos = lfs_min(2, off);
2138 off -= dir->pos;
2139
2140 // skip superblock entry
2141 dir->id = (off > 0 && lfs_pair_cmp(dir->head, lfs->root) == 0);
2142
2143 while (off > 0) {
2144 int diff = lfs_min(dir->m.count - dir->id, off);
2145 dir->id += diff;
2146 dir->pos += diff;
2147 off -= diff;
2148
2149 if (dir->id == dir->m.count) {
2150 if (!dir->m.split) {
2151 LFS_TRACE("lfs_dir_seek -> %d", LFS_ERR_INVAL);
2152 return LFS_ERR_INVAL;
2153 }
2154
2155 err = lfs_dir_fetch(lfs, &dir->m, dir->m.tail);
2156 if (err) {
2157 LFS_TRACE("lfs_dir_seek -> %d", err);
2158 return err;
2159 }
2160
2161 dir->id = 0;
2162 }
2163 }
2164
2165 LFS_TRACE("lfs_dir_seek -> %d", 0);
2166 return 0;
2167 }
2168
lfs_dir_tell(lfs_t * lfs,lfs_dir_t * dir)2169 lfs_soff_t lfs_dir_tell(lfs_t *lfs, lfs_dir_t *dir) {
2170 LFS_TRACE("lfs_dir_tell(%p, %p)", (void*)lfs, (void*)dir);
2171 (void)lfs;
2172 LFS_TRACE("lfs_dir_tell -> %"PRId32, dir->pos);
2173 return dir->pos;
2174 }
2175
lfs_dir_rewind(lfs_t * lfs,lfs_dir_t * dir)2176 int lfs_dir_rewind(lfs_t *lfs, lfs_dir_t *dir) {
2177 LFS_TRACE("lfs_dir_rewind(%p, %p)", (void*)lfs, (void*)dir);
2178 // reload the head dir
2179 int err = lfs_dir_fetch(lfs, &dir->m, dir->head);
2180 if (err) {
2181 LFS_TRACE("lfs_dir_rewind -> %d", err);
2182 return err;
2183 }
2184
2185 dir->id = 0;
2186 dir->pos = 0;
2187 LFS_TRACE("lfs_dir_rewind -> %d", 0);
2188 return 0;
2189 }
2190
2191
2192 /// File index list operations ///
lfs_ctz_index(lfs_t * lfs,lfs_off_t * off)2193 static int lfs_ctz_index(lfs_t *lfs, lfs_off_t *off) {
2194 lfs_off_t size = *off;
2195 lfs_off_t b = lfs->cfg->block_size - 2*4;
2196 lfs_off_t i = size / b;
2197 if (i == 0) {
2198 return 0;
2199 }
2200
2201 i = (size - 4*(lfs_popc(i-1)+2)) / b;
2202 *off = size - b*i - 4*lfs_popc(i);
2203 return i;
2204 }
2205
lfs_ctz_find(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_size_t pos,lfs_block_t * block,lfs_off_t * off)2206 static int lfs_ctz_find(lfs_t *lfs,
2207 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2208 lfs_block_t head, lfs_size_t size,
2209 lfs_size_t pos, lfs_block_t *block, lfs_off_t *off) {
2210 if (size == 0) {
2211 *block = LFS_BLOCK_NULL;
2212 *off = 0;
2213 return 0;
2214 }
2215
2216 lfs_off_t current = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2217 lfs_off_t target = lfs_ctz_index(lfs, &pos);
2218
2219 while (current > target) {
2220 lfs_size_t skip = lfs_min(
2221 lfs_npw2(current-target+1) - 1,
2222 lfs_ctz(current));
2223
2224 int err = lfs_bd_read(lfs,
2225 pcache, rcache, sizeof(head),
2226 head, 4*skip, &head, sizeof(head));
2227 head = lfs_fromle32(head);
2228 if (err) {
2229 return err;
2230 }
2231
2232 current -= 1 << skip;
2233 }
2234
2235 *block = head;
2236 *off = pos;
2237 return 0;
2238 }
2239
lfs_ctz_extend(lfs_t * lfs,lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,lfs_block_t * block,lfs_off_t * off)2240 static int lfs_ctz_extend(lfs_t *lfs,
2241 lfs_cache_t *pcache, lfs_cache_t *rcache,
2242 lfs_block_t head, lfs_size_t size,
2243 lfs_block_t *block, lfs_off_t *off) {
2244 while (true) {
2245 // go ahead and grab a block
2246 lfs_block_t nblock;
2247 int err = lfs_alloc(lfs, &nblock);
2248 if (err) {
2249 return err;
2250 }
2251
2252 {
2253 err = lfs_bd_erase(lfs, nblock);
2254 if (err) {
2255 if (err == LFS_ERR_CORRUPT) {
2256 goto relocate;
2257 }
2258 return err;
2259 }
2260
2261 if (size == 0) {
2262 *block = nblock;
2263 *off = 0;
2264 return 0;
2265 }
2266
2267 lfs_size_t noff = size - 1;
2268 lfs_off_t index = lfs_ctz_index(lfs, &noff);
2269 noff = noff + 1;
2270
2271 // just copy out the last block if it is incomplete
2272 if (noff != lfs->cfg->block_size) {
2273 for (lfs_off_t i = 0; i < noff; i++) {
2274 uint8_t data;
2275 err = lfs_bd_read(lfs,
2276 NULL, rcache, noff-i,
2277 head, i, &data, 1);
2278 if (err) {
2279 return err;
2280 }
2281
2282 err = lfs_bd_prog(lfs,
2283 pcache, rcache, true,
2284 nblock, i, &data, 1);
2285 if (err) {
2286 if (err == LFS_ERR_CORRUPT) {
2287 goto relocate;
2288 }
2289 return err;
2290 }
2291 }
2292
2293 *block = nblock;
2294 *off = noff;
2295 return 0;
2296 }
2297
2298 // append block
2299 index += 1;
2300 lfs_size_t skips = lfs_ctz(index) + 1;
2301 lfs_block_t nhead = head;
2302 for (lfs_off_t i = 0; i < skips; i++) {
2303 nhead = lfs_tole32(nhead);
2304 err = lfs_bd_prog(lfs, pcache, rcache, true,
2305 nblock, 4*i, &nhead, 4);
2306 nhead = lfs_fromle32(nhead);
2307 if (err) {
2308 if (err == LFS_ERR_CORRUPT) {
2309 goto relocate;
2310 }
2311 return err;
2312 }
2313
2314 if (i != skips-1) {
2315 err = lfs_bd_read(lfs,
2316 NULL, rcache, sizeof(nhead),
2317 nhead, 4*i, &nhead, sizeof(nhead));
2318 nhead = lfs_fromle32(nhead);
2319 if (err) {
2320 return err;
2321 }
2322 }
2323 }
2324
2325 *block = nblock;
2326 *off = 4*skips;
2327 return 0;
2328 }
2329
2330 relocate:
2331 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2332
2333 // just clear cache and try a new block
2334 lfs_cache_drop(lfs, pcache);
2335 }
2336 }
2337
lfs_ctz_traverse(lfs_t * lfs,const lfs_cache_t * pcache,lfs_cache_t * rcache,lfs_block_t head,lfs_size_t size,int (* cb)(void *,lfs_block_t),void * data)2338 static int lfs_ctz_traverse(lfs_t *lfs,
2339 const lfs_cache_t *pcache, lfs_cache_t *rcache,
2340 lfs_block_t head, lfs_size_t size,
2341 int (*cb)(void*, lfs_block_t), void *data) {
2342 if (size == 0) {
2343 return 0;
2344 }
2345
2346 lfs_off_t index = lfs_ctz_index(lfs, &(lfs_off_t){size-1});
2347
2348 while (true) {
2349 int err = cb(data, head);
2350 if (err) {
2351 return err;
2352 }
2353
2354 if (index == 0) {
2355 return 0;
2356 }
2357
2358 lfs_block_t heads[2];
2359 int count = 2 - (index & 1);
2360 err = lfs_bd_read(lfs,
2361 pcache, rcache, count*sizeof(head),
2362 head, 0, &heads, count*sizeof(head));
2363 heads[0] = lfs_fromle32(heads[0]);
2364 heads[1] = lfs_fromle32(heads[1]);
2365 if (err) {
2366 return err;
2367 }
2368
2369 for (int i = 0; i < count-1; i++) {
2370 err = cb(data, heads[i]);
2371 if (err) {
2372 return err;
2373 }
2374 }
2375
2376 head = heads[count-1];
2377 index -= count;
2378 }
2379 }
2380
2381
2382 /// Top level file operations ///
lfs_file_opencfg(lfs_t * lfs,lfs_file_t * file,const char * path,int flags,const struct lfs_file_config * cfg)2383 int lfs_file_opencfg(lfs_t *lfs, lfs_file_t *file,
2384 const char *path, int flags,
2385 const struct lfs_file_config *cfg) {
2386 LFS_TRACE("lfs_file_opencfg(%p, %p, \"%s\", %x, %p {"
2387 ".buffer=%p, .attrs=%p, .attr_count=%"PRIu32"})",
2388 (void*)lfs, (void*)file, path, flags,
2389 (void*)cfg, cfg->buffer, (void*)cfg->attrs, cfg->attr_count);
2390
2391 // deorphan if we haven't yet, needed at most once after poweron
2392 if ((flags & 3) != LFS_O_RDONLY) {
2393 int err = lfs_fs_forceconsistency(lfs);
2394 if (err) {
2395 LFS_TRACE("lfs_file_opencfg -> %d", err);
2396 return err;
2397 }
2398 }
2399
2400 // setup simple file details
2401 int err;
2402 file->cfg = cfg;
2403 file->flags = flags | LFS_F_OPENED;
2404 file->pos = 0;
2405 file->off = 0;
2406 file->cache.buffer = NULL;
2407
2408 // allocate entry for file if it doesn't exist
2409 lfs_stag_t tag = lfs_dir_find(lfs, &file->m, &path, &file->id);
2410 if (tag < 0 && !(tag == LFS_ERR_NOENT && file->id != 0x3ff)) {
2411 err = tag;
2412 goto cleanup;
2413 }
2414
2415 // get id, add to list of mdirs to catch update changes
2416 file->type = LFS_TYPE_REG;
2417 file->next = (lfs_file_t*)lfs->mlist;
2418 lfs->mlist = (struct lfs_mlist*)file;
2419
2420 if (tag == LFS_ERR_NOENT) {
2421 if (!(flags & LFS_O_CREAT)) {
2422 err = LFS_ERR_NOENT;
2423 goto cleanup;
2424 }
2425
2426 // check that name fits
2427 lfs_size_t nlen = strlen(path);
2428 if (nlen > lfs->name_max) {
2429 err = LFS_ERR_NAMETOOLONG;
2430 goto cleanup;
2431 }
2432
2433 // get next slot and create entry to remember name
2434 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2435 {LFS_MKTAG(LFS_TYPE_CREATE, file->id, 0)},
2436 {LFS_MKTAG(LFS_TYPE_REG, file->id, nlen), path},
2437 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0)}));
2438 if (err) {
2439 err = LFS_ERR_NAMETOOLONG;
2440 goto cleanup;
2441 }
2442
2443 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, 0);
2444 } else if (flags & LFS_O_EXCL) {
2445 err = LFS_ERR_EXIST;
2446 goto cleanup;
2447 } else if (lfs_tag_type3(tag) != LFS_TYPE_REG) {
2448 err = LFS_ERR_ISDIR;
2449 goto cleanup;
2450 } else if (flags & LFS_O_TRUNC) {
2451 // truncate if requested
2452 tag = LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0);
2453 file->flags |= LFS_F_DIRTY;
2454 } else {
2455 // try to load what's on disk, if it's inlined we'll fix it later
2456 tag = lfs_dir_get(lfs, &file->m, LFS_MKTAG(0x700, 0x3ff, 0),
2457 LFS_MKTAG(LFS_TYPE_STRUCT, file->id, 8), &file->ctz);
2458 if (tag < 0) {
2459 err = tag;
2460 goto cleanup;
2461 }
2462 lfs_ctz_fromle32(&file->ctz);
2463 }
2464
2465 // fetch attrs
2466 for (unsigned i = 0; i < file->cfg->attr_count; i++) {
2467 if ((file->flags & 3) != LFS_O_WRONLY) {
2468 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2469 LFS_MKTAG(0x7ff, 0x3ff, 0),
2470 LFS_MKTAG(LFS_TYPE_USERATTR + file->cfg->attrs[i].type,
2471 file->id, file->cfg->attrs[i].size),
2472 file->cfg->attrs[i].buffer);
2473 if (res < 0 && res != LFS_ERR_NOENT) {
2474 err = res;
2475 goto cleanup;
2476 }
2477 }
2478
2479 if ((file->flags & 3) != LFS_O_RDONLY) {
2480 if (file->cfg->attrs[i].size > lfs->attr_max) {
2481 err = LFS_ERR_NOSPC;
2482 goto cleanup;
2483 }
2484
2485 file->flags |= LFS_F_DIRTY;
2486 }
2487 }
2488
2489 // allocate buffer if needed
2490 if (file->cfg->buffer) {
2491 file->cache.buffer = file->cfg->buffer;
2492 } else {
2493 file->cache.buffer = lfs_malloc(lfs->cfg->cache_size);
2494 if (!file->cache.buffer) {
2495 err = LFS_ERR_NOMEM;
2496 goto cleanup;
2497 }
2498 }
2499
2500 // zero to avoid information leak
2501 lfs_cache_zero(lfs, &file->cache);
2502
2503 if (lfs_tag_type3(tag) == LFS_TYPE_INLINESTRUCT) {
2504 // load inline files
2505 file->ctz.head = LFS_BLOCK_INLINE;
2506 file->ctz.size = lfs_tag_size(tag);
2507 file->flags |= LFS_F_INLINE;
2508 file->cache.block = file->ctz.head;
2509 file->cache.off = 0;
2510 file->cache.size = lfs->cfg->cache_size;
2511
2512 // don't always read (may be new/trunc file)
2513 if (file->ctz.size > 0) {
2514 lfs_stag_t res = lfs_dir_get(lfs, &file->m,
2515 LFS_MKTAG(0x700, 0x3ff, 0),
2516 LFS_MKTAG(LFS_TYPE_STRUCT, file->id,
2517 lfs_min(file->cache.size, 0x3fe)),
2518 file->cache.buffer);
2519 if (res < 0) {
2520 err = res;
2521 goto cleanup;
2522 }
2523 }
2524 }
2525
2526 LFS_TRACE("lfs_file_opencfg -> %d", 0);
2527 return 0;
2528
2529 cleanup:
2530 // clean up lingering resources
2531 file->flags |= LFS_F_ERRED;
2532 lfs_file_close(lfs, file);
2533 LFS_TRACE("lfs_file_opencfg -> %d", err);
2534 return err;
2535 }
2536
lfs_file_open(lfs_t * lfs,lfs_file_t * file,const char * path,int flags)2537 int lfs_file_open(lfs_t *lfs, lfs_file_t *file,
2538 const char *path, int flags) {
2539 LFS_TRACE("lfs_file_open(%p, %p, \"%s\", %x)",
2540 (void*)lfs, (void*)file, path, flags);
2541 static const struct lfs_file_config defaults = {0};
2542 int err = lfs_file_opencfg(lfs, file, path, flags, &defaults);
2543 LFS_TRACE("lfs_file_open -> %d", err);
2544 return err;
2545 }
2546
lfs_file_close(lfs_t * lfs,lfs_file_t * file)2547 int lfs_file_close(lfs_t *lfs, lfs_file_t *file) {
2548 LFS_TRACE("lfs_file_close(%p, %p)", (void*)lfs, (void*)file);
2549 LFS_ASSERT(file->flags & LFS_F_OPENED);
2550
2551 int err = lfs_file_sync(lfs, file);
2552
2553 // remove from list of mdirs
2554 for (struct lfs_mlist **p = &lfs->mlist; *p; p = &(*p)->next) {
2555 if (*p == (struct lfs_mlist*)file) {
2556 *p = (*p)->next;
2557 break;
2558 }
2559 }
2560
2561 // clean up memory
2562 if (!file->cfg->buffer) {
2563 lfs_free(file->cache.buffer);
2564 }
2565
2566 file->flags &= ~LFS_F_OPENED;
2567 LFS_TRACE("lfs_file_close -> %d", err);
2568 return err;
2569 }
2570
lfs_file_relocate(lfs_t * lfs,lfs_file_t * file)2571 static int lfs_file_relocate(lfs_t *lfs, lfs_file_t *file) {
2572 LFS_ASSERT(file->flags & LFS_F_OPENED);
2573
2574 while (true) {
2575 // just relocate what exists into new block
2576 lfs_block_t nblock;
2577 int err = lfs_alloc(lfs, &nblock);
2578 if (err) {
2579 return err;
2580 }
2581
2582 err = lfs_bd_erase(lfs, nblock);
2583 if (err) {
2584 if (err == LFS_ERR_CORRUPT) {
2585 goto relocate;
2586 }
2587 return err;
2588 }
2589
2590 // either read from dirty cache or disk
2591 for (lfs_off_t i = 0; i < file->off; i++) {
2592 uint8_t data;
2593 if (file->flags & LFS_F_INLINE) {
2594 err = lfs_dir_getread(lfs, &file->m,
2595 // note we evict inline files before they can be dirty
2596 NULL, &file->cache, file->off-i,
2597 LFS_MKTAG(0xfff, 0x1ff, 0),
2598 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
2599 i, &data, 1);
2600 if (err) {
2601 return err;
2602 }
2603 } else {
2604 err = lfs_bd_read(lfs,
2605 &file->cache, &lfs->rcache, file->off-i,
2606 file->block, i, &data, 1);
2607 if (err) {
2608 return err;
2609 }
2610 }
2611
2612 err = lfs_bd_prog(lfs,
2613 &lfs->pcache, &lfs->rcache, true,
2614 nblock, i, &data, 1);
2615 if (err) {
2616 if (err == LFS_ERR_CORRUPT) {
2617 goto relocate;
2618 }
2619 return err;
2620 }
2621 }
2622
2623 // copy over new state of file
2624 memcpy(file->cache.buffer, lfs->pcache.buffer, lfs->cfg->cache_size);
2625 file->cache.block = lfs->pcache.block;
2626 file->cache.off = lfs->pcache.off;
2627 file->cache.size = lfs->pcache.size;
2628 lfs_cache_zero(lfs, &lfs->pcache);
2629
2630 file->block = nblock;
2631 file->flags |= LFS_F_WRITING;
2632 return 0;
2633
2634 relocate:
2635 LFS_DEBUG("Bad block at 0x%"PRIx32, nblock);
2636
2637 // just clear cache and try a new block
2638 lfs_cache_drop(lfs, &lfs->pcache);
2639 }
2640 }
2641
lfs_file_outline(lfs_t * lfs,lfs_file_t * file)2642 static int lfs_file_outline(lfs_t *lfs, lfs_file_t *file) {
2643 file->off = file->pos;
2644 lfs_alloc_ack(lfs);
2645 int err = lfs_file_relocate(lfs, file);
2646 if (err) {
2647 return err;
2648 }
2649
2650 file->flags &= ~LFS_F_INLINE;
2651 return 0;
2652 }
2653
lfs_file_flush(lfs_t * lfs,lfs_file_t * file)2654 static int lfs_file_flush(lfs_t *lfs, lfs_file_t *file) {
2655 LFS_ASSERT(file->flags & LFS_F_OPENED);
2656
2657 if (file->flags & LFS_F_READING) {
2658 if (!(file->flags & LFS_F_INLINE)) {
2659 lfs_cache_drop(lfs, &file->cache);
2660 }
2661 file->flags &= ~LFS_F_READING;
2662 }
2663
2664 if (file->flags & LFS_F_WRITING) {
2665 lfs_off_t pos = file->pos;
2666
2667 if (!(file->flags & LFS_F_INLINE)) {
2668 // copy over anything after current branch
2669 lfs_file_t orig = {
2670 .ctz.head = file->ctz.head,
2671 .ctz.size = file->ctz.size,
2672 .flags = LFS_O_RDONLY | LFS_F_OPENED,
2673 .pos = file->pos,
2674 .cache = lfs->rcache,
2675 };
2676 lfs_cache_drop(lfs, &lfs->rcache);
2677
2678 while (file->pos < file->ctz.size) {
2679 // copy over a byte at a time, leave it up to caching
2680 // to make this efficient
2681 uint8_t data;
2682 lfs_ssize_t res = lfs_file_read(lfs, &orig, &data, 1);
2683 if (res < 0) {
2684 return res;
2685 }
2686
2687 res = lfs_file_write(lfs, file, &data, 1);
2688 if (res < 0) {
2689 return res;
2690 }
2691
2692 // keep our reference to the rcache in sync
2693 if (lfs->rcache.block != LFS_BLOCK_NULL) {
2694 lfs_cache_drop(lfs, &orig.cache);
2695 lfs_cache_drop(lfs, &lfs->rcache);
2696 }
2697 }
2698
2699 // write out what we have
2700 while (true) {
2701 int err = lfs_bd_flush(lfs, &file->cache, &lfs->rcache, true);
2702 if (err) {
2703 if (err == LFS_ERR_CORRUPT) {
2704 goto relocate;
2705 }
2706 return err;
2707 }
2708
2709 break;
2710
2711 relocate:
2712 LFS_DEBUG("Bad block at 0x%"PRIx32, file->block);
2713 err = lfs_file_relocate(lfs, file);
2714 if (err) {
2715 return err;
2716 }
2717 }
2718 } else {
2719 file->pos = lfs_max(file->pos, file->ctz.size);
2720 }
2721
2722 // actual file updates
2723 file->ctz.head = file->block;
2724 file->ctz.size = file->pos;
2725 file->flags &= ~LFS_F_WRITING;
2726 file->flags |= LFS_F_DIRTY;
2727
2728 file->pos = pos;
2729 }
2730
2731 return 0;
2732 }
2733
lfs_file_sync(lfs_t * lfs,lfs_file_t * file)2734 int lfs_file_sync(lfs_t *lfs, lfs_file_t *file) {
2735 LFS_TRACE("lfs_file_sync(%p, %p)", (void*)lfs, (void*)file);
2736 LFS_ASSERT(file->flags & LFS_F_OPENED);
2737
2738 if (file->flags & LFS_F_ERRED) {
2739 // it's not safe to do anything if our file errored
2740 LFS_TRACE("lfs_file_sync -> %d", 0);
2741 return 0;
2742 }
2743
2744 int err = lfs_file_flush(lfs, file);
2745 if (err) {
2746 file->flags |= LFS_F_ERRED;
2747 LFS_TRACE("lfs_file_sync -> %d", err);
2748 return err;
2749 }
2750
2751 if ((file->flags & LFS_F_DIRTY) &&
2752 !lfs_pair_isnull(file->m.pair)) {
2753 // update dir entry
2754 uint16_t type;
2755 const void *buffer;
2756 lfs_size_t size;
2757 struct lfs_ctz ctz;
2758 if (file->flags & LFS_F_INLINE) {
2759 // inline the whole file
2760 type = LFS_TYPE_INLINESTRUCT;
2761 buffer = file->cache.buffer;
2762 size = file->ctz.size;
2763 } else {
2764 // update the ctz reference
2765 type = LFS_TYPE_CTZSTRUCT;
2766 // copy ctz so alloc will work during a relocate
2767 ctz = file->ctz;
2768 lfs_ctz_tole32(&ctz);
2769 buffer = &ctz;
2770 size = sizeof(ctz);
2771 }
2772
2773 // commit file data and attributes
2774 err = lfs_dir_commit(lfs, &file->m, LFS_MKATTRS(
2775 {LFS_MKTAG(type, file->id, size), buffer},
2776 {LFS_MKTAG(LFS_FROM_USERATTRS, file->id,
2777 file->cfg->attr_count), file->cfg->attrs}));
2778 if (err) {
2779 file->flags |= LFS_F_ERRED;
2780 LFS_TRACE("lfs_file_sync -> %d", err);
2781 return err;
2782 }
2783
2784 file->flags &= ~LFS_F_DIRTY;
2785 }
2786
2787 LFS_TRACE("lfs_file_sync -> %d", 0);
2788 return 0;
2789 }
2790
lfs_file_read(lfs_t * lfs,lfs_file_t * file,void * buffer,lfs_size_t size)2791 lfs_ssize_t lfs_file_read(lfs_t *lfs, lfs_file_t *file,
2792 void *buffer, lfs_size_t size) {
2793 LFS_TRACE("lfs_file_read(%p, %p, %p, %"PRIu32")",
2794 (void*)lfs, (void*)file, buffer, size);
2795 LFS_ASSERT(file->flags & LFS_F_OPENED);
2796 LFS_ASSERT((file->flags & 3) != LFS_O_WRONLY);
2797
2798 uint8_t *data = buffer;
2799 lfs_size_t nsize = size;
2800
2801 if (file->flags & LFS_F_WRITING) {
2802 // flush out any writes
2803 int err = lfs_file_flush(lfs, file);
2804 if (err) {
2805 LFS_TRACE("lfs_file_read -> %d", err);
2806 return err;
2807 }
2808 }
2809
2810 if (file->pos >= file->ctz.size) {
2811 // eof if past end
2812 LFS_TRACE("lfs_file_read -> %d", 0);
2813 return 0;
2814 }
2815
2816 size = lfs_min(size, file->ctz.size - file->pos);
2817 nsize = size;
2818
2819 while (nsize > 0) {
2820 // check if we need a new block
2821 if (!(file->flags & LFS_F_READING) ||
2822 file->off == lfs->cfg->block_size) {
2823 if (!(file->flags & LFS_F_INLINE)) {
2824 int err = lfs_ctz_find(lfs, NULL, &file->cache,
2825 file->ctz.head, file->ctz.size,
2826 file->pos, &file->block, &file->off);
2827 if (err) {
2828 LFS_TRACE("lfs_file_read -> %d", err);
2829 return err;
2830 }
2831 } else {
2832 file->block = LFS_BLOCK_INLINE;
2833 file->off = file->pos;
2834 }
2835
2836 file->flags |= LFS_F_READING;
2837 }
2838
2839 // read as much as we can in current block
2840 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
2841 if (file->flags & LFS_F_INLINE) {
2842 int err = lfs_dir_getread(lfs, &file->m,
2843 NULL, &file->cache, lfs->cfg->block_size,
2844 LFS_MKTAG(0xfff, 0x1ff, 0),
2845 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, file->id, 0),
2846 file->off, data, diff);
2847 if (err) {
2848 LFS_TRACE("lfs_file_read -> %d", err);
2849 return err;
2850 }
2851 } else {
2852 int err = lfs_bd_read(lfs,
2853 NULL, &file->cache, lfs->cfg->block_size,
2854 file->block, file->off, data, diff);
2855 if (err) {
2856 LFS_TRACE("lfs_file_read -> %d", err);
2857 return err;
2858 }
2859 }
2860
2861 file->pos += diff;
2862 file->off += diff;
2863 data += diff;
2864 nsize -= diff;
2865 }
2866
2867 LFS_TRACE("lfs_file_read -> %"PRId32, size);
2868 return size;
2869 }
2870
lfs_file_write(lfs_t * lfs,lfs_file_t * file,const void * buffer,lfs_size_t size)2871 lfs_ssize_t lfs_file_write(lfs_t *lfs, lfs_file_t *file,
2872 const void *buffer, lfs_size_t size) {
2873 LFS_TRACE("lfs_file_write(%p, %p, %p, %"PRIu32")",
2874 (void*)lfs, (void*)file, buffer, size);
2875 LFS_ASSERT(file->flags & LFS_F_OPENED);
2876 LFS_ASSERT((file->flags & 3) != LFS_O_RDONLY);
2877
2878 const uint8_t *data = buffer;
2879 lfs_size_t nsize = size;
2880
2881 if (file->flags & LFS_F_READING) {
2882 // drop any reads
2883 int err = lfs_file_flush(lfs, file);
2884 if (err) {
2885 LFS_TRACE("lfs_file_write -> %d", err);
2886 return err;
2887 }
2888 }
2889
2890 if ((file->flags & LFS_O_APPEND) && file->pos < file->ctz.size) {
2891 file->pos = file->ctz.size;
2892 }
2893
2894 if (file->pos + size > lfs->file_max) {
2895 // Larger than file limit?
2896 LFS_TRACE("lfs_file_write -> %d", LFS_ERR_FBIG);
2897 return LFS_ERR_FBIG;
2898 }
2899
2900 if (!(file->flags & LFS_F_WRITING) && file->pos > file->ctz.size) {
2901 // fill with zeros
2902 lfs_off_t pos = file->pos;
2903 file->pos = file->ctz.size;
2904
2905 while (file->pos < pos) {
2906 lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
2907 if (res < 0) {
2908 LFS_TRACE("lfs_file_write -> %"PRId32, res);
2909 return res;
2910 }
2911 }
2912 }
2913
2914 if ((file->flags & LFS_F_INLINE) &&
2915 lfs_max(file->pos+nsize, file->ctz.size) >
2916 lfs_min(0x3fe, lfs_min(
2917 lfs->cfg->cache_size, lfs->cfg->block_size/8))) {
2918 // inline file doesn't fit anymore
2919 int err = lfs_file_outline(lfs, file);
2920 if (err) {
2921 file->flags |= LFS_F_ERRED;
2922 LFS_TRACE("lfs_file_write -> %d", err);
2923 return err;
2924 }
2925 }
2926
2927 while (nsize > 0) {
2928 // check if we need a new block
2929 if (!(file->flags & LFS_F_WRITING) ||
2930 file->off == lfs->cfg->block_size) {
2931 if (!(file->flags & LFS_F_INLINE)) {
2932 if (!(file->flags & LFS_F_WRITING) && file->pos > 0) {
2933 // find out which block we're extending from
2934 int err = lfs_ctz_find(lfs, NULL, &file->cache,
2935 file->ctz.head, file->ctz.size,
2936 file->pos-1, &file->block, &file->off);
2937 if (err) {
2938 file->flags |= LFS_F_ERRED;
2939 LFS_TRACE("lfs_file_write -> %d", err);
2940 return err;
2941 }
2942
2943 // mark cache as dirty since we may have read data into it
2944 lfs_cache_zero(lfs, &file->cache);
2945 }
2946
2947 // extend file with new blocks
2948 lfs_alloc_ack(lfs);
2949 int err = lfs_ctz_extend(lfs, &file->cache, &lfs->rcache,
2950 file->block, file->pos,
2951 &file->block, &file->off);
2952 if (err) {
2953 file->flags |= LFS_F_ERRED;
2954 LFS_TRACE("lfs_file_write -> %d", err);
2955 return err;
2956 }
2957 } else {
2958 file->block = LFS_BLOCK_INLINE;
2959 file->off = file->pos;
2960 }
2961
2962 file->flags |= LFS_F_WRITING;
2963 }
2964
2965 // program as much as we can in current block
2966 lfs_size_t diff = lfs_min(nsize, lfs->cfg->block_size - file->off);
2967 while (true) {
2968 int err = lfs_bd_prog(lfs, &file->cache, &lfs->rcache, true,
2969 file->block, file->off, data, diff);
2970 if (err) {
2971 if (err == LFS_ERR_CORRUPT) {
2972 goto relocate;
2973 }
2974 file->flags |= LFS_F_ERRED;
2975 LFS_TRACE("lfs_file_write -> %d", err);
2976 return err;
2977 }
2978
2979 break;
2980 relocate:
2981 err = lfs_file_relocate(lfs, file);
2982 if (err) {
2983 file->flags |= LFS_F_ERRED;
2984 LFS_TRACE("lfs_file_write -> %d", err);
2985 return err;
2986 }
2987 }
2988
2989 file->pos += diff;
2990 file->off += diff;
2991 data += diff;
2992 nsize -= diff;
2993
2994 lfs_alloc_ack(lfs);
2995 }
2996
2997 file->flags &= ~LFS_F_ERRED;
2998 LFS_TRACE("lfs_file_write -> %"PRId32, size);
2999 return size;
3000 }
3001
lfs_file_seek(lfs_t * lfs,lfs_file_t * file,lfs_soff_t off,int whence)3002 lfs_soff_t lfs_file_seek(lfs_t *lfs, lfs_file_t *file,
3003 lfs_soff_t off, int whence) {
3004 LFS_TRACE("lfs_file_seek(%p, %p, %"PRId32", %d)",
3005 (void*)lfs, (void*)file, off, whence);
3006 LFS_ASSERT(file->flags & LFS_F_OPENED);
3007
3008 // write out everything beforehand, may be noop if rdonly
3009 int err = lfs_file_flush(lfs, file);
3010 if (err) {
3011 LFS_TRACE("lfs_file_seek -> %d", err);
3012 return err;
3013 }
3014
3015 // find new pos
3016 lfs_off_t npos = file->pos;
3017 if (whence == LFS_SEEK_SET) {
3018 npos = off;
3019 } else if (whence == LFS_SEEK_CUR) {
3020 npos = file->pos + off;
3021 } else if (whence == LFS_SEEK_END) {
3022 npos = file->ctz.size + off;
3023 }
3024
3025 if (npos > lfs->file_max) {
3026 // file position out of range
3027 LFS_TRACE("lfs_file_seek -> %d", LFS_ERR_INVAL);
3028 return LFS_ERR_INVAL;
3029 }
3030
3031 // update pos
3032 file->pos = npos;
3033 LFS_TRACE("lfs_file_seek -> %"PRId32, npos);
3034 return npos;
3035 }
3036
lfs_file_truncate(lfs_t * lfs,lfs_file_t * file,lfs_off_t size)3037 int lfs_file_truncate(lfs_t *lfs, lfs_file_t *file, lfs_off_t size) {
3038 LFS_TRACE("lfs_file_truncate(%p, %p, %"PRIu32")",
3039 (void*)lfs, (void*)file, size);
3040 LFS_ASSERT(file->flags & LFS_F_OPENED);
3041 LFS_ASSERT((file->flags & 3) != LFS_O_RDONLY);
3042
3043 if (size > LFS_FILE_MAX) {
3044 LFS_TRACE("lfs_file_truncate -> %d", LFS_ERR_INVAL);
3045 return LFS_ERR_INVAL;
3046 }
3047
3048 lfs_off_t pos = file->pos;
3049 lfs_off_t oldsize = lfs_file_size(lfs, file);
3050 if (size < oldsize) {
3051 // need to flush since directly changing metadata
3052 int err = lfs_file_flush(lfs, file);
3053 if (err) {
3054 LFS_TRACE("lfs_file_truncate -> %d", err);
3055 return err;
3056 }
3057
3058 // lookup new head in ctz skip list
3059 err = lfs_ctz_find(lfs, NULL, &file->cache,
3060 file->ctz.head, file->ctz.size,
3061 size, &file->block, &file->off);
3062 if (err) {
3063 LFS_TRACE("lfs_file_truncate -> %d", err);
3064 return err;
3065 }
3066
3067 file->ctz.head = file->block;
3068 file->ctz.size = size;
3069 file->flags |= LFS_F_DIRTY | LFS_F_READING;
3070 } else if (size > oldsize) {
3071 // flush+seek if not already at end
3072 if (file->pos != oldsize) {
3073 lfs_soff_t res = lfs_file_seek(lfs, file, 0, LFS_SEEK_END);
3074 if (res < 0) {
3075 LFS_TRACE("lfs_file_truncate -> %"PRId32, res);
3076 return (int)res;
3077 }
3078 }
3079
3080 // fill with zeros
3081 while (file->pos < size) {
3082 lfs_ssize_t res = lfs_file_write(lfs, file, &(uint8_t){0}, 1);
3083 if (res < 0) {
3084 LFS_TRACE("lfs_file_truncate -> %"PRId32, res);
3085 return (int)res;
3086 }
3087 }
3088 }
3089
3090 // restore pos
3091 lfs_soff_t res = lfs_file_seek(lfs, file, pos, LFS_SEEK_SET);
3092 if (res < 0) {
3093 LFS_TRACE("lfs_file_truncate -> %"PRId32, res);
3094 return (int)res;
3095 }
3096
3097 LFS_TRACE("lfs_file_truncate -> %d", 0);
3098 return 0;
3099 }
3100
lfs_file_tell(lfs_t * lfs,lfs_file_t * file)3101 lfs_soff_t lfs_file_tell(lfs_t *lfs, lfs_file_t *file) {
3102 LFS_TRACE("lfs_file_tell(%p, %p)", (void*)lfs, (void*)file);
3103 LFS_ASSERT(file->flags & LFS_F_OPENED);
3104 (void)lfs;
3105 LFS_TRACE("lfs_file_tell -> %"PRId32, file->pos);
3106 return file->pos;
3107 }
3108
lfs_file_rewind(lfs_t * lfs,lfs_file_t * file)3109 int lfs_file_rewind(lfs_t *lfs, lfs_file_t *file) {
3110 LFS_TRACE("lfs_file_rewind(%p, %p)", (void*)lfs, (void*)file);
3111 lfs_soff_t res = lfs_file_seek(lfs, file, 0, LFS_SEEK_SET);
3112 if (res < 0) {
3113 LFS_TRACE("lfs_file_rewind -> %"PRId32, res);
3114 return (int)res;
3115 }
3116
3117 LFS_TRACE("lfs_file_rewind -> %d", 0);
3118 return 0;
3119 }
3120
lfs_file_size(lfs_t * lfs,lfs_file_t * file)3121 lfs_soff_t lfs_file_size(lfs_t *lfs, lfs_file_t *file) {
3122 LFS_TRACE("lfs_file_size(%p, %p)", (void*)lfs, (void*)file);
3123 LFS_ASSERT(file->flags & LFS_F_OPENED);
3124 (void)lfs;
3125 if (file->flags & LFS_F_WRITING) {
3126 LFS_TRACE("lfs_file_size -> %"PRId32,
3127 lfs_max(file->pos, file->ctz.size));
3128 return lfs_max(file->pos, file->ctz.size);
3129 } else {
3130 LFS_TRACE("lfs_file_size -> %"PRId32, file->ctz.size);
3131 return file->ctz.size;
3132 }
3133 }
3134
3135
3136 /// General fs operations ///
lfs_stat(lfs_t * lfs,const char * path,struct lfs_info * info)3137 int lfs_stat(lfs_t *lfs, const char *path, struct lfs_info *info) {
3138 LFS_TRACE("lfs_stat(%p, \"%s\", %p)", (void*)lfs, path, (void*)info);
3139 lfs_mdir_t cwd;
3140 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3141 if (tag < 0) {
3142 LFS_TRACE("lfs_stat -> %"PRId32, tag);
3143 return (int)tag;
3144 }
3145
3146 int err = lfs_dir_getinfo(lfs, &cwd, lfs_tag_id(tag), info);
3147 LFS_TRACE("lfs_stat -> %d", err);
3148 return err;
3149 }
3150
lfs_remove(lfs_t * lfs,const char * path)3151 int lfs_remove(lfs_t *lfs, const char *path) {
3152 LFS_TRACE("lfs_remove(%p, \"%s\")", (void*)lfs, path);
3153 // deorphan if we haven't yet, needed at most once after poweron
3154 int err = lfs_fs_forceconsistency(lfs);
3155 if (err) {
3156 LFS_TRACE("lfs_remove -> %d", err);
3157 return err;
3158 }
3159
3160 lfs_mdir_t cwd;
3161 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3162 if (tag < 0 || lfs_tag_id(tag) == 0x3ff) {
3163 LFS_TRACE("lfs_remove -> %"PRId32, (tag < 0) ? tag : LFS_ERR_INVAL);
3164 return (tag < 0) ? (int)tag : LFS_ERR_INVAL;
3165 }
3166
3167 struct lfs_mlist dir;
3168 dir.next = lfs->mlist;
3169 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3170 // must be empty before removal
3171 lfs_block_t pair[2];
3172 lfs_stag_t res = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x700, 0x3ff, 0),
3173 LFS_MKTAG(LFS_TYPE_STRUCT, lfs_tag_id(tag), 8), pair);
3174 if (res < 0) {
3175 LFS_TRACE("lfs_remove -> %"PRId32, res);
3176 return (int)res;
3177 }
3178 lfs_pair_fromle32(pair);
3179
3180 err = lfs_dir_fetch(lfs, &dir.m, pair);
3181 if (err) {
3182 LFS_TRACE("lfs_remove -> %d", err);
3183 return err;
3184 }
3185
3186 if (dir.m.count > 0 || dir.m.split) {
3187 LFS_TRACE("lfs_remove -> %d", LFS_ERR_NOTEMPTY);
3188 return LFS_ERR_NOTEMPTY;
3189 }
3190
3191 // mark fs as orphaned
3192 lfs_fs_preporphans(lfs, +1);
3193
3194 // I know it's crazy but yes, dir can be changed by our parent's
3195 // commit (if predecessor is child)
3196 dir.type = 0;
3197 dir.id = 0;
3198 lfs->mlist = &dir;
3199 }
3200
3201 // delete the entry
3202 err = lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3203 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(tag), 0)}));
3204 if (err) {
3205 lfs->mlist = dir.next;
3206 LFS_TRACE("lfs_remove -> %d", err);
3207 return err;
3208 }
3209
3210 lfs->mlist = dir.next;
3211 if (lfs_tag_type3(tag) == LFS_TYPE_DIR) {
3212 // fix orphan
3213 lfs_fs_preporphans(lfs, -1);
3214
3215 err = lfs_fs_pred(lfs, dir.m.pair, &cwd);
3216 if (err) {
3217 LFS_TRACE("lfs_remove -> %d", err);
3218 return err;
3219 }
3220
3221 err = lfs_dir_drop(lfs, &cwd, &dir.m);
3222 if (err) {
3223 LFS_TRACE("lfs_remove -> %d", err);
3224 return err;
3225 }
3226 }
3227
3228 LFS_TRACE("lfs_remove -> %d", 0);
3229 return 0;
3230 }
3231
lfs_rename(lfs_t * lfs,const char * oldpath,const char * newpath)3232 int lfs_rename(lfs_t *lfs, const char *oldpath, const char *newpath) {
3233 LFS_TRACE("lfs_rename(%p, \"%s\", \"%s\")", (void*)lfs, oldpath, newpath);
3234
3235 // deorphan if we haven't yet, needed at most once after poweron
3236 int err = lfs_fs_forceconsistency(lfs);
3237 if (err) {
3238 LFS_TRACE("lfs_rename -> %d", err);
3239 return err;
3240 }
3241
3242 // find old entry
3243 lfs_mdir_t oldcwd;
3244 lfs_stag_t oldtag = lfs_dir_find(lfs, &oldcwd, &oldpath, NULL);
3245 if (oldtag < 0 || lfs_tag_id(oldtag) == 0x3ff) {
3246 LFS_TRACE("lfs_rename -> %"PRId32,
3247 (oldtag < 0) ? oldtag : LFS_ERR_INVAL);
3248 return (oldtag < 0) ? (int)oldtag : LFS_ERR_INVAL;
3249 }
3250
3251 // find new entry
3252 lfs_mdir_t newcwd;
3253 uint16_t newid;
3254 lfs_stag_t prevtag = lfs_dir_find(lfs, &newcwd, &newpath, &newid);
3255 if ((prevtag < 0 || lfs_tag_id(prevtag) == 0x3ff) &&
3256 !(prevtag == LFS_ERR_NOENT && newid != 0x3ff)) {
3257 LFS_TRACE("lfs_rename -> %"PRId32,
3258 (prevtag < 0) ? prevtag : LFS_ERR_INVAL);
3259 return (prevtag < 0) ? (int)prevtag : LFS_ERR_INVAL;
3260 }
3261
3262 // if we're in the same pair there's a few special cases...
3263 bool samepair = (lfs_pair_cmp(oldcwd.pair, newcwd.pair) == 0);
3264 uint16_t newoldid = lfs_tag_id(oldtag);
3265
3266 struct lfs_mlist prevdir;
3267 prevdir.next = lfs->mlist;
3268 if (prevtag == LFS_ERR_NOENT) {
3269 // check that name fits
3270 lfs_size_t nlen = strlen(newpath);
3271 if (nlen > lfs->name_max) {
3272 LFS_TRACE("lfs_rename -> %d", LFS_ERR_NAMETOOLONG);
3273 return LFS_ERR_NAMETOOLONG;
3274 }
3275
3276 // there is a small chance we are being renamed in the same
3277 // directory/ to an id less than our old id, the global update
3278 // to handle this is a bit messy
3279 if (samepair && newid <= newoldid) {
3280 newoldid += 1;
3281 }
3282 } else if (lfs_tag_type3(prevtag) != lfs_tag_type3(oldtag)) {
3283 LFS_TRACE("lfs_rename -> %d", LFS_ERR_ISDIR);
3284 return LFS_ERR_ISDIR;
3285 } else if (samepair && newid == newoldid) {
3286 // we're renaming to ourselves??
3287 LFS_TRACE("lfs_rename -> %d", 0);
3288 return 0;
3289 } else if (lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3290 // must be empty before removal
3291 lfs_block_t prevpair[2];
3292 lfs_stag_t res = lfs_dir_get(lfs, &newcwd, LFS_MKTAG(0x700, 0x3ff, 0),
3293 LFS_MKTAG(LFS_TYPE_STRUCT, newid, 8), prevpair);
3294 if (res < 0) {
3295 LFS_TRACE("lfs_rename -> %"PRId32, res);
3296 return (int)res;
3297 }
3298 lfs_pair_fromle32(prevpair);
3299
3300 // must be empty before removal
3301 err = lfs_dir_fetch(lfs, &prevdir.m, prevpair);
3302 if (err) {
3303 LFS_TRACE("lfs_rename -> %d", err);
3304 return err;
3305 }
3306
3307 if (prevdir.m.count > 0 || prevdir.m.split) {
3308 LFS_TRACE("lfs_rename -> %d", LFS_ERR_NOTEMPTY);
3309 return LFS_ERR_NOTEMPTY;
3310 }
3311
3312 // mark fs as orphaned
3313 lfs_fs_preporphans(lfs, +1);
3314
3315 // I know it's crazy but yes, dir can be changed by our parent's
3316 // commit (if predecessor is child)
3317 prevdir.type = 0;
3318 prevdir.id = 0;
3319 lfs->mlist = &prevdir;
3320 }
3321
3322 if (!samepair) {
3323 lfs_fs_prepmove(lfs, newoldid, oldcwd.pair);
3324 }
3325
3326 // move over all attributes
3327 err = lfs_dir_commit(lfs, &newcwd, LFS_MKATTRS(
3328 {LFS_MKTAG_IF(prevtag != LFS_ERR_NOENT,
3329 LFS_TYPE_DELETE, newid, 0)},
3330 {LFS_MKTAG(LFS_TYPE_CREATE, newid, 0)},
3331 {LFS_MKTAG(lfs_tag_type3(oldtag), newid, strlen(newpath)), newpath},
3332 {LFS_MKTAG(LFS_FROM_MOVE, newid, lfs_tag_id(oldtag)), &oldcwd},
3333 {LFS_MKTAG_IF(samepair,
3334 LFS_TYPE_DELETE, newoldid, 0)}));
3335 if (err) {
3336 lfs->mlist = prevdir.next;
3337 LFS_TRACE("lfs_rename -> %d", err);
3338 return err;
3339 }
3340
3341 // let commit clean up after move (if we're different! otherwise move
3342 // logic already fixed it for us)
3343 if (!samepair && lfs_gstate_hasmove(&lfs->gstate)) {
3344 // prep gstate and delete move id
3345 lfs_fs_prepmove(lfs, 0x3ff, NULL);
3346 err = lfs_dir_commit(lfs, &oldcwd, LFS_MKATTRS(
3347 {LFS_MKTAG(LFS_TYPE_DELETE, lfs_tag_id(oldtag), 0)}));
3348 if (err) {
3349 lfs->mlist = prevdir.next;
3350 LFS_TRACE("lfs_rename -> %d", err);
3351 return err;
3352 }
3353 }
3354
3355 lfs->mlist = prevdir.next;
3356 if (prevtag != LFS_ERR_NOENT && lfs_tag_type3(prevtag) == LFS_TYPE_DIR) {
3357 // fix orphan
3358 lfs_fs_preporphans(lfs, -1);
3359
3360 err = lfs_fs_pred(lfs, prevdir.m.pair, &newcwd);
3361 if (err) {
3362 LFS_TRACE("lfs_rename -> %d", err);
3363 return err;
3364 }
3365
3366 err = lfs_dir_drop(lfs, &newcwd, &prevdir.m);
3367 if (err) {
3368 LFS_TRACE("lfs_rename -> %d", err);
3369 return err;
3370 }
3371 }
3372
3373 LFS_TRACE("lfs_rename -> %d", 0);
3374 return 0;
3375 }
3376
lfs_getattr(lfs_t * lfs,const char * path,uint8_t type,void * buffer,lfs_size_t size)3377 lfs_ssize_t lfs_getattr(lfs_t *lfs, const char *path,
3378 uint8_t type, void *buffer, lfs_size_t size) {
3379 LFS_TRACE("lfs_getattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
3380 (void*)lfs, path, type, buffer, size);
3381 lfs_mdir_t cwd;
3382 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3383 if (tag < 0) {
3384 LFS_TRACE("lfs_getattr -> %"PRId32, tag);
3385 return tag;
3386 }
3387
3388 uint16_t id = lfs_tag_id(tag);
3389 if (id == 0x3ff) {
3390 // special case for root
3391 id = 0;
3392 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3393 if (err) {
3394 LFS_TRACE("lfs_getattr -> %d", err);
3395 return err;
3396 }
3397 }
3398
3399 tag = lfs_dir_get(lfs, &cwd, LFS_MKTAG(0x7ff, 0x3ff, 0),
3400 LFS_MKTAG(LFS_TYPE_USERATTR + type,
3401 id, lfs_min(size, lfs->attr_max)),
3402 buffer);
3403 if (tag < 0) {
3404 if (tag == LFS_ERR_NOENT) {
3405 LFS_TRACE("lfs_getattr -> %d", LFS_ERR_NOATTR);
3406 return LFS_ERR_NOATTR;
3407 }
3408
3409 LFS_TRACE("lfs_getattr -> %"PRId32, tag);
3410 return tag;
3411 }
3412
3413 size = lfs_tag_size(tag);
3414 LFS_TRACE("lfs_getattr -> %"PRId32, size);
3415 return size;
3416 }
3417
lfs_commitattr(lfs_t * lfs,const char * path,uint8_t type,const void * buffer,lfs_size_t size)3418 static int lfs_commitattr(lfs_t *lfs, const char *path,
3419 uint8_t type, const void *buffer, lfs_size_t size) {
3420 lfs_mdir_t cwd;
3421 lfs_stag_t tag = lfs_dir_find(lfs, &cwd, &path, NULL);
3422 if (tag < 0) {
3423 return tag;
3424 }
3425
3426 uint16_t id = lfs_tag_id(tag);
3427 if (id == 0x3ff) {
3428 // special case for root
3429 id = 0;
3430 int err = lfs_dir_fetch(lfs, &cwd, lfs->root);
3431 if (err) {
3432 return err;
3433 }
3434 }
3435
3436 return lfs_dir_commit(lfs, &cwd, LFS_MKATTRS(
3437 {LFS_MKTAG(LFS_TYPE_USERATTR + type, id, size), buffer}));
3438 }
3439
lfs_setattr(lfs_t * lfs,const char * path,uint8_t type,const void * buffer,lfs_size_t size)3440 int lfs_setattr(lfs_t *lfs, const char *path,
3441 uint8_t type, const void *buffer, lfs_size_t size) {
3442 LFS_TRACE("lfs_setattr(%p, \"%s\", %"PRIu8", %p, %"PRIu32")",
3443 (void*)lfs, path, type, buffer, size);
3444 if (size > lfs->attr_max) {
3445 LFS_TRACE("lfs_setattr -> %d", LFS_ERR_NOSPC);
3446 return LFS_ERR_NOSPC;
3447 }
3448
3449 int err = lfs_commitattr(lfs, path, type, buffer, size);
3450 LFS_TRACE("lfs_setattr -> %d", err);
3451 return err;
3452 }
3453
lfs_removeattr(lfs_t * lfs,const char * path,uint8_t type)3454 int lfs_removeattr(lfs_t *lfs, const char *path, uint8_t type) {
3455 LFS_TRACE("lfs_removeattr(%p, \"%s\", %"PRIu8")", (void*)lfs, path, type);
3456 int err = lfs_commitattr(lfs, path, type, NULL, 0x3ff);
3457 LFS_TRACE("lfs_removeattr -> %d", err);
3458 return err;
3459 }
3460
3461
3462 /// Filesystem operations ///
lfs_init(lfs_t * lfs,const struct lfs_config * cfg)3463 static int lfs_init(lfs_t *lfs, const struct lfs_config *cfg) {
3464 lfs->cfg = cfg;
3465 int err = 0;
3466
3467 // validate that the lfs-cfg sizes were initiated properly before
3468 // performing any arithmetic logics with them
3469 LFS_ASSERT(lfs->cfg->read_size != 0);
3470 LFS_ASSERT(lfs->cfg->prog_size != 0);
3471 LFS_ASSERT(lfs->cfg->cache_size != 0);
3472
3473 // check that block size is a multiple of cache size is a multiple
3474 // of prog and read sizes
3475 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->read_size == 0);
3476 LFS_ASSERT(lfs->cfg->cache_size % lfs->cfg->prog_size == 0);
3477 LFS_ASSERT(lfs->cfg->block_size % lfs->cfg->cache_size == 0);
3478
3479 // check that the block size is large enough to fit ctz pointers
3480 LFS_ASSERT(4*lfs_npw2(0xffffffff / (lfs->cfg->block_size-2*4))
3481 <= lfs->cfg->block_size);
3482
3483 // block_cycles = 0 is no longer supported.
3484 //
3485 // block_cycles is the number of erase cycles before littlefs evicts
3486 // metadata logs as a part of wear leveling. Suggested values are in the
3487 // range of 100-1000, or set block_cycles to -1 to disable block-level
3488 // wear-leveling.
3489 LFS_ASSERT(lfs->cfg->block_cycles != 0);
3490
3491
3492 // setup read cache
3493 if (lfs->cfg->read_buffer) {
3494 lfs->rcache.buffer = lfs->cfg->read_buffer;
3495 } else {
3496 lfs->rcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3497 if (!lfs->rcache.buffer) {
3498 err = LFS_ERR_NOMEM;
3499 goto cleanup;
3500 }
3501 }
3502
3503 // setup program cache
3504 if (lfs->cfg->prog_buffer) {
3505 lfs->pcache.buffer = lfs->cfg->prog_buffer;
3506 } else {
3507 lfs->pcache.buffer = lfs_malloc(lfs->cfg->cache_size);
3508 if (!lfs->pcache.buffer) {
3509 err = LFS_ERR_NOMEM;
3510 goto cleanup;
3511 }
3512 }
3513
3514 // zero to avoid information leaks
3515 lfs_cache_zero(lfs, &lfs->rcache);
3516 lfs_cache_zero(lfs, &lfs->pcache);
3517
3518 // setup lookahead, must be multiple of 64-bits, 32-bit aligned
3519 LFS_ASSERT(lfs->cfg->lookahead_size > 0);
3520 LFS_ASSERT(lfs->cfg->lookahead_size % 8 == 0 &&
3521 (uintptr_t)lfs->cfg->lookahead_buffer % 4 == 0);
3522 if (lfs->cfg->lookahead_buffer) {
3523 lfs->free.buffer = lfs->cfg->lookahead_buffer;
3524 } else {
3525 lfs->free.buffer = lfs_malloc(lfs->cfg->lookahead_size);
3526 if (!lfs->free.buffer) {
3527 err = LFS_ERR_NOMEM;
3528 goto cleanup;
3529 }
3530 }
3531
3532 // check that the size limits are sane
3533 LFS_ASSERT(lfs->cfg->name_max <= LFS_NAME_MAX);
3534 lfs->name_max = lfs->cfg->name_max;
3535 if (!lfs->name_max) {
3536 lfs->name_max = LFS_NAME_MAX;
3537 }
3538
3539 LFS_ASSERT(lfs->cfg->file_max <= LFS_FILE_MAX);
3540 lfs->file_max = lfs->cfg->file_max;
3541 if (!lfs->file_max) {
3542 lfs->file_max = LFS_FILE_MAX;
3543 }
3544
3545 LFS_ASSERT(lfs->cfg->attr_max <= LFS_ATTR_MAX);
3546 lfs->attr_max = lfs->cfg->attr_max;
3547 if (!lfs->attr_max) {
3548 lfs->attr_max = LFS_ATTR_MAX;
3549 }
3550
3551 // setup default state
3552 lfs->root[0] = LFS_BLOCK_NULL;
3553 lfs->root[1] = LFS_BLOCK_NULL;
3554 lfs->mlist = NULL;
3555 lfs->seed = 0;
3556 lfs->gdisk = (lfs_gstate_t){0};
3557 lfs->gstate = (lfs_gstate_t){0};
3558 lfs->gdelta = (lfs_gstate_t){0};
3559 #ifdef LFS_MIGRATE
3560 lfs->lfs1 = NULL;
3561 #endif
3562
3563 return 0;
3564
3565 cleanup:
3566 lfs_deinit(lfs);
3567 return err;
3568 }
3569
lfs_deinit(lfs_t * lfs)3570 static int lfs_deinit(lfs_t *lfs) {
3571 // free allocated memory
3572 if (!lfs->cfg->read_buffer) {
3573 lfs_free(lfs->rcache.buffer);
3574 }
3575
3576 if (!lfs->cfg->prog_buffer) {
3577 lfs_free(lfs->pcache.buffer);
3578 }
3579
3580 if (!lfs->cfg->lookahead_buffer) {
3581 lfs_free(lfs->free.buffer);
3582 }
3583
3584 return 0;
3585 }
3586
lfs_format(lfs_t * lfs,const struct lfs_config * cfg)3587 int lfs_format(lfs_t *lfs, const struct lfs_config *cfg) {
3588 LFS_TRACE("lfs_format(%p, %p {.context=%p, "
3589 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
3590 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
3591 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
3592 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
3593 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
3594 ".prog_buffer=%p, .lookahead_buffer=%p, "
3595 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
3596 ".attr_max=%"PRIu32"})",
3597 (void*)lfs, (void*)cfg, cfg->context,
3598 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
3599 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
3600 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
3601 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
3602 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
3603 cfg->name_max, cfg->file_max, cfg->attr_max);
3604 int err = 0;
3605 {
3606 err = lfs_init(lfs, cfg);
3607 if (err) {
3608 LFS_TRACE("lfs_format -> %d", err);
3609 return err;
3610 }
3611
3612 // create free lookahead
3613 memset(lfs->free.buffer, 0, lfs->cfg->lookahead_size);
3614 lfs->free.off = 0;
3615 lfs->free.size = lfs_min(8*lfs->cfg->lookahead_size,
3616 lfs->cfg->block_count);
3617 lfs->free.i = 0;
3618 lfs_alloc_ack(lfs);
3619
3620 // create root dir
3621 lfs_mdir_t root;
3622 err = lfs_dir_alloc(lfs, &root);
3623 if (err) {
3624 goto cleanup;
3625 }
3626
3627 // write one superblock
3628 lfs_superblock_t superblock = {
3629 .version = LFS_DISK_VERSION,
3630 .block_size = lfs->cfg->block_size,
3631 .block_count = lfs->cfg->block_count,
3632 .name_max = lfs->name_max,
3633 .file_max = lfs->file_max,
3634 .attr_max = lfs->attr_max,
3635 };
3636
3637 lfs_superblock_tole32(&superblock);
3638 err = lfs_dir_commit(lfs, &root, LFS_MKATTRS(
3639 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)},
3640 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
3641 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
3642 &superblock}));
3643 if (err) {
3644 goto cleanup;
3645 }
3646
3647 // sanity check that fetch works
3648 err = lfs_dir_fetch(lfs, &root, (const lfs_block_t[2]){0, 1});
3649 if (err) {
3650 goto cleanup;
3651 }
3652
3653 // force compaction to prevent accidentally mounting any
3654 // older version of littlefs that may live on disk
3655 root.erased = false;
3656 err = lfs_dir_commit(lfs, &root, NULL, 0);
3657 if (err) {
3658 goto cleanup;
3659 }
3660 }
3661
3662 cleanup:
3663 lfs_deinit(lfs);
3664 LFS_TRACE("lfs_format -> %d", err);
3665 return err;
3666 }
3667
lfs_mount(lfs_t * lfs,const struct lfs_config * cfg)3668 int lfs_mount(lfs_t *lfs, const struct lfs_config *cfg) {
3669 LFS_TRACE("lfs_mount(%p, %p {.context=%p, "
3670 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
3671 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
3672 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
3673 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
3674 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
3675 ".prog_buffer=%p, .lookahead_buffer=%p, "
3676 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
3677 ".attr_max=%"PRIu32"})",
3678 (void*)lfs, (void*)cfg, cfg->context,
3679 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
3680 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
3681 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
3682 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
3683 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
3684 cfg->name_max, cfg->file_max, cfg->attr_max);
3685 int err = lfs_init(lfs, cfg);
3686 if (err) {
3687 LFS_TRACE("lfs_mount -> %d", err);
3688 return err;
3689 }
3690
3691 // scan directory blocks for superblock and any global updates
3692 lfs_mdir_t dir = {.tail = {0, 1}};
3693 lfs_block_t cycle = 0;
3694 while (!lfs_pair_isnull(dir.tail)) {
3695 if (cycle >= lfs->cfg->block_count/2) {
3696 // loop detected
3697 err = LFS_ERR_CORRUPT;
3698 goto cleanup;
3699 }
3700 cycle += 1;
3701
3702 // fetch next block in tail list
3703 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, &dir, dir.tail,
3704 LFS_MKTAG(0x7ff, 0x3ff, 0),
3705 LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8),
3706 NULL,
3707 lfs_dir_find_match, &(struct lfs_dir_find_match){
3708 lfs, "littlefs", 8});
3709 if (tag < 0) {
3710 err = tag;
3711 goto cleanup;
3712 }
3713
3714 // has superblock?
3715 if (tag && !lfs_tag_isdelete(tag)) {
3716 // update root
3717 lfs->root[0] = dir.pair[0];
3718 lfs->root[1] = dir.pair[1];
3719
3720 // grab superblock
3721 lfs_superblock_t superblock;
3722 tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x7ff, 0x3ff, 0),
3723 LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
3724 &superblock);
3725 if (tag < 0) {
3726 err = tag;
3727 goto cleanup;
3728 }
3729 lfs_superblock_fromle32(&superblock);
3730
3731 // check version
3732 uint16_t major_version = (0xffff & (superblock.version >> 16));
3733 uint16_t minor_version = (0xffff & (superblock.version >> 0));
3734 if ((major_version != LFS_DISK_VERSION_MAJOR ||
3735 minor_version > LFS_DISK_VERSION_MINOR)) {
3736 LFS_ERROR("Invalid version v%"PRIu16".%"PRIu16,
3737 major_version, minor_version);
3738 err = LFS_ERR_INVAL;
3739 goto cleanup;
3740 }
3741
3742 // check superblock configuration
3743 if (superblock.name_max) {
3744 if (superblock.name_max > lfs->name_max) {
3745 LFS_ERROR("Unsupported name_max (%"PRIu32" > %"PRIu32")",
3746 superblock.name_max, lfs->name_max);
3747 err = LFS_ERR_INVAL;
3748 goto cleanup;
3749 }
3750
3751 lfs->name_max = superblock.name_max;
3752 }
3753
3754 if (superblock.file_max) {
3755 if (superblock.file_max > lfs->file_max) {
3756 LFS_ERROR("Unsupported file_max (%"PRIu32" > %"PRIu32")",
3757 superblock.file_max, lfs->file_max);
3758 err = LFS_ERR_INVAL;
3759 goto cleanup;
3760 }
3761
3762 lfs->file_max = superblock.file_max;
3763 }
3764
3765 if (superblock.attr_max) {
3766 if (superblock.attr_max > lfs->attr_max) {
3767 LFS_ERROR("Unsupported attr_max (%"PRIu32" > %"PRIu32")",
3768 superblock.attr_max, lfs->attr_max);
3769 err = LFS_ERR_INVAL;
3770 goto cleanup;
3771 }
3772
3773 lfs->attr_max = superblock.attr_max;
3774 }
3775 }
3776
3777 // has gstate?
3778 err = lfs_dir_getgstate(lfs, &dir, &lfs->gstate);
3779 if (err) {
3780 goto cleanup;
3781 }
3782 }
3783
3784 // found superblock?
3785 if (lfs_pair_isnull(lfs->root)) {
3786 err = LFS_ERR_INVAL;
3787 goto cleanup;
3788 }
3789
3790 // update littlefs with gstate
3791 if (!lfs_gstate_iszero(&lfs->gstate)) {
3792 LFS_DEBUG("Found pending gstate 0x%08"PRIx32"%08"PRIx32"%08"PRIx32,
3793 lfs->gstate.tag,
3794 lfs->gstate.pair[0],
3795 lfs->gstate.pair[1]);
3796 }
3797 lfs->gstate.tag += !lfs_tag_isvalid(lfs->gstate.tag);
3798 lfs->gdisk = lfs->gstate;
3799
3800 // setup free lookahead
3801 lfs_alloc_reset(lfs);
3802
3803 LFS_TRACE("lfs_mount -> %d", 0);
3804 return 0;
3805
3806 cleanup:
3807 lfs_unmount(lfs);
3808 LFS_TRACE("lfs_mount -> %d", err);
3809 return err;
3810 }
3811
lfs_unmount(lfs_t * lfs)3812 int lfs_unmount(lfs_t *lfs) {
3813 LFS_TRACE("lfs_unmount(%p)", (void*)lfs);
3814 int err = lfs_deinit(lfs);
3815 LFS_TRACE("lfs_unmount -> %d", err);
3816 return err;
3817 }
3818
3819
3820 /// Filesystem filesystem operations ///
lfs_fs_traverseraw(lfs_t * lfs,int (* cb)(void * data,lfs_block_t block),void * data,bool includeorphans)3821 int lfs_fs_traverseraw(lfs_t *lfs,
3822 int (*cb)(void *data, lfs_block_t block), void *data,
3823 bool includeorphans) {
3824 // iterate over metadata pairs
3825 lfs_mdir_t dir = {.tail = {0, 1}};
3826
3827 #ifdef LFS_MIGRATE
3828 // also consider v1 blocks during migration
3829 if (lfs->lfs1) {
3830 int err = lfs1_traverse(lfs, cb, data);
3831 if (err) {
3832 return err;
3833 }
3834
3835 dir.tail[0] = lfs->root[0];
3836 dir.tail[1] = lfs->root[1];
3837 }
3838 #endif
3839
3840 lfs_block_t cycle = 0;
3841 while (!lfs_pair_isnull(dir.tail)) {
3842 if (cycle >= lfs->cfg->block_count/2) {
3843 // loop detected
3844 return LFS_ERR_CORRUPT;
3845 }
3846 cycle += 1;
3847
3848 for (int i = 0; i < 2; i++) {
3849 int err = cb(data, dir.tail[i]);
3850 if (err) {
3851 return err;
3852 }
3853 }
3854
3855 // iterate through ids in directory
3856 int err = lfs_dir_fetch(lfs, &dir, dir.tail);
3857 if (err) {
3858 return err;
3859 }
3860
3861 for (uint16_t id = 0; id < dir.count; id++) {
3862 struct lfs_ctz ctz;
3863 lfs_stag_t tag = lfs_dir_get(lfs, &dir, LFS_MKTAG(0x700, 0x3ff, 0),
3864 LFS_MKTAG(LFS_TYPE_STRUCT, id, sizeof(ctz)), &ctz);
3865 if (tag < 0) {
3866 if (tag == LFS_ERR_NOENT) {
3867 continue;
3868 }
3869 return tag;
3870 }
3871 lfs_ctz_fromle32(&ctz);
3872
3873 if (lfs_tag_type3(tag) == LFS_TYPE_CTZSTRUCT) {
3874 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
3875 ctz.head, ctz.size, cb, data);
3876 if (err) {
3877 return err;
3878 }
3879 } else if (includeorphans &&
3880 lfs_tag_type3(tag) == LFS_TYPE_DIRSTRUCT) {
3881 for (int i = 0; i < 2; i++) {
3882 err = cb(data, (&ctz.head)[i]);
3883 if (err) {
3884 return err;
3885 }
3886 }
3887 }
3888 }
3889 }
3890
3891 // iterate over any open files
3892 for (lfs_file_t *f = (lfs_file_t*)lfs->mlist; f; f = f->next) {
3893 if (f->type != LFS_TYPE_REG) {
3894 continue;
3895 }
3896
3897 if ((f->flags & LFS_F_DIRTY) && !(f->flags & LFS_F_INLINE)) {
3898 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
3899 f->ctz.head, f->ctz.size, cb, data);
3900 if (err) {
3901 return err;
3902 }
3903 }
3904
3905 if ((f->flags & LFS_F_WRITING) && !(f->flags & LFS_F_INLINE)) {
3906 int err = lfs_ctz_traverse(lfs, &f->cache, &lfs->rcache,
3907 f->block, f->pos, cb, data);
3908 if (err) {
3909 return err;
3910 }
3911 }
3912 }
3913
3914 return 0;
3915 }
3916
lfs_fs_traverse(lfs_t * lfs,int (* cb)(void * data,lfs_block_t block),void * data)3917 int lfs_fs_traverse(lfs_t *lfs,
3918 int (*cb)(void *data, lfs_block_t block), void *data) {
3919 LFS_TRACE("lfs_fs_traverse(%p, %p, %p)",
3920 (void*)lfs, (void*)(uintptr_t)cb, data);
3921 int err = lfs_fs_traverseraw(lfs, cb, data, true);
3922 LFS_TRACE("lfs_fs_traverse -> %d", 0);
3923 return err;
3924 }
3925
lfs_fs_pred(lfs_t * lfs,const lfs_block_t pair[2],lfs_mdir_t * pdir)3926 static int lfs_fs_pred(lfs_t *lfs,
3927 const lfs_block_t pair[2], lfs_mdir_t *pdir) {
3928 // iterate over all directory directory entries
3929 pdir->tail[0] = 0;
3930 pdir->tail[1] = 1;
3931 lfs_block_t cycle = 0;
3932 while (!lfs_pair_isnull(pdir->tail)) {
3933 if (cycle >= lfs->cfg->block_count/2) {
3934 // loop detected
3935 return LFS_ERR_CORRUPT;
3936 }
3937 cycle += 1;
3938
3939 if (lfs_pair_cmp(pdir->tail, pair) == 0) {
3940 return 0;
3941 }
3942
3943 int err = lfs_dir_fetch(lfs, pdir, pdir->tail);
3944 if (err) {
3945 return err;
3946 }
3947 }
3948
3949 return LFS_ERR_NOENT;
3950 }
3951
3952 struct lfs_fs_parent_match {
3953 lfs_t *lfs;
3954 const lfs_block_t pair[2];
3955 };
3956
lfs_fs_parent_match(void * data,lfs_tag_t tag,const void * buffer)3957 static int lfs_fs_parent_match(void *data,
3958 lfs_tag_t tag, const void *buffer) {
3959 struct lfs_fs_parent_match *find = data;
3960 lfs_t *lfs = find->lfs;
3961 const struct lfs_diskoff *disk = buffer;
3962 (void)tag;
3963
3964 lfs_block_t child[2];
3965 int err = lfs_bd_read(lfs,
3966 &lfs->pcache, &lfs->rcache, lfs->cfg->block_size,
3967 disk->block, disk->off, &child, sizeof(child));
3968 if (err) {
3969 return err;
3970 }
3971
3972 lfs_pair_fromle32(child);
3973 return (lfs_pair_cmp(child, find->pair) == 0) ? LFS_CMP_EQ : LFS_CMP_LT;
3974 }
3975
lfs_fs_parent(lfs_t * lfs,const lfs_block_t pair[2],lfs_mdir_t * parent)3976 static lfs_stag_t lfs_fs_parent(lfs_t *lfs, const lfs_block_t pair[2],
3977 lfs_mdir_t *parent) {
3978 // use fetchmatch with callback to find pairs
3979 parent->tail[0] = 0;
3980 parent->tail[1] = 1;
3981 lfs_block_t cycle = 0;
3982 while (!lfs_pair_isnull(parent->tail)) {
3983 if (cycle >= lfs->cfg->block_count/2) {
3984 // loop detected
3985 return LFS_ERR_CORRUPT;
3986 }
3987 cycle += 1;
3988
3989 lfs_stag_t tag = lfs_dir_fetchmatch(lfs, parent, parent->tail,
3990 LFS_MKTAG(0x7ff, 0, 0x3ff),
3991 LFS_MKTAG(LFS_TYPE_DIRSTRUCT, 0, 8),
3992 NULL,
3993 lfs_fs_parent_match, &(struct lfs_fs_parent_match){
3994 lfs, {pair[0], pair[1]}});
3995 if (tag && tag != LFS_ERR_NOENT) {
3996 return tag;
3997 }
3998 }
3999
4000 return LFS_ERR_NOENT;
4001 }
4002
lfs_fs_relocate(lfs_t * lfs,const lfs_block_t oldpair[2],lfs_block_t newpair[2])4003 static int lfs_fs_relocate(lfs_t *lfs,
4004 const lfs_block_t oldpair[2], lfs_block_t newpair[2]) {
4005 // update internal root
4006 if (lfs_pair_cmp(oldpair, lfs->root) == 0) {
4007 lfs->root[0] = newpair[0];
4008 lfs->root[1] = newpair[1];
4009 }
4010
4011 // update internally tracked dirs
4012 for (struct lfs_mlist *d = lfs->mlist; d; d = d->next) {
4013 if (lfs_pair_cmp(oldpair, d->m.pair) == 0) {
4014 d->m.pair[0] = newpair[0];
4015 d->m.pair[1] = newpair[1];
4016 }
4017
4018 if (d->type == LFS_TYPE_DIR &&
4019 lfs_pair_cmp(oldpair, ((lfs_dir_t*)d)->head) == 0) {
4020 ((lfs_dir_t*)d)->head[0] = newpair[0];
4021 ((lfs_dir_t*)d)->head[1] = newpair[1];
4022 }
4023 }
4024
4025 // find parent
4026 lfs_mdir_t parent;
4027 lfs_stag_t tag = lfs_fs_parent(lfs, oldpair, &parent);
4028 if (tag < 0 && tag != LFS_ERR_NOENT) {
4029 return tag;
4030 }
4031
4032 if (tag != LFS_ERR_NOENT) {
4033 // update disk, this creates a desync
4034 lfs_fs_preporphans(lfs, +1);
4035
4036 // fix pending move in this pair? this looks like an optimization but
4037 // is in fact _required_ since relocating may outdate the move.
4038 uint16_t moveid = 0x3ff;
4039 if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
4040 moveid = lfs_tag_id(lfs->gstate.tag);
4041 LFS_DEBUG("Fixing move while relocating "
4042 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4043 parent.pair[0], parent.pair[1], moveid);
4044 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4045 if (moveid < lfs_tag_id(tag)) {
4046 tag -= LFS_MKTAG(0, 1, 0);
4047 }
4048 }
4049
4050 lfs_pair_tole32(newpair);
4051 int err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
4052 {LFS_MKTAG_IF(moveid != 0x3ff,
4053 LFS_TYPE_DELETE, moveid, 0)},
4054 {tag, newpair}));
4055 lfs_pair_fromle32(newpair);
4056 if (err) {
4057 return err;
4058 }
4059
4060 // next step, clean up orphans
4061 lfs_fs_preporphans(lfs, -1);
4062 }
4063
4064 // find pred
4065 int err = lfs_fs_pred(lfs, oldpair, &parent);
4066 if (err && err != LFS_ERR_NOENT) {
4067 return err;
4068 }
4069
4070 // if we can't find dir, it must be new
4071 if (err != LFS_ERR_NOENT) {
4072 // fix pending move in this pair? this looks like an optimization but
4073 // is in fact _required_ since relocating may outdate the move.
4074 uint16_t moveid = 0x3ff;
4075 if (lfs_gstate_hasmovehere(&lfs->gstate, parent.pair)) {
4076 moveid = lfs_tag_id(lfs->gstate.tag);
4077 LFS_DEBUG("Fixing move while relocating "
4078 "{0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16"\n",
4079 parent.pair[0], parent.pair[1], moveid);
4080 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4081 }
4082
4083 // replace bad pair, either we clean up desync, or no desync occured
4084 lfs_pair_tole32(newpair);
4085 err = lfs_dir_commit(lfs, &parent, LFS_MKATTRS(
4086 {LFS_MKTAG_IF(moveid != 0x3ff,
4087 LFS_TYPE_DELETE, moveid, 0)},
4088 {LFS_MKTAG(LFS_TYPE_TAIL + parent.split, 0x3ff, 8), newpair}));
4089 lfs_pair_fromle32(newpair);
4090 if (err) {
4091 return err;
4092 }
4093 }
4094
4095 return 0;
4096 }
4097
lfs_fs_preporphans(lfs_t * lfs,int8_t orphans)4098 static void lfs_fs_preporphans(lfs_t *lfs, int8_t orphans) {
4099 LFS_ASSERT(lfs_tag_size(lfs->gstate.tag) > 0 || orphans >= 0);
4100 lfs->gstate.tag += orphans;
4101 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x800, 0, 0)) |
4102 ((uint32_t)lfs_gstate_hasorphans(&lfs->gstate) << 31));
4103 }
4104
lfs_fs_prepmove(lfs_t * lfs,uint16_t id,const lfs_block_t pair[2])4105 static void lfs_fs_prepmove(lfs_t *lfs,
4106 uint16_t id, const lfs_block_t pair[2]) {
4107 lfs->gstate.tag = ((lfs->gstate.tag & ~LFS_MKTAG(0x7ff, 0x3ff, 0)) |
4108 ((id != 0x3ff) ? LFS_MKTAG(LFS_TYPE_DELETE, id, 0) : 0));
4109 lfs->gstate.pair[0] = (id != 0x3ff) ? pair[0] : 0;
4110 lfs->gstate.pair[1] = (id != 0x3ff) ? pair[1] : 0;
4111 }
4112
lfs_fs_demove(lfs_t * lfs)4113 static int lfs_fs_demove(lfs_t *lfs) {
4114 if (!lfs_gstate_hasmove(&lfs->gdisk)) {
4115 return 0;
4116 }
4117
4118 // Fix bad moves
4119 LFS_DEBUG("Fixing move {0x%"PRIx32", 0x%"PRIx32"} 0x%"PRIx16,
4120 lfs->gdisk.pair[0],
4121 lfs->gdisk.pair[1],
4122 lfs_tag_id(lfs->gdisk.tag));
4123
4124 // fetch and delete the moved entry
4125 lfs_mdir_t movedir;
4126 int err = lfs_dir_fetch(lfs, &movedir, lfs->gdisk.pair);
4127 if (err) {
4128 return err;
4129 }
4130
4131 // prep gstate and delete move id
4132 uint16_t moveid = lfs_tag_id(lfs->gdisk.tag);
4133 lfs_fs_prepmove(lfs, 0x3ff, NULL);
4134 err = lfs_dir_commit(lfs, &movedir, LFS_MKATTRS(
4135 {LFS_MKTAG(LFS_TYPE_DELETE, moveid, 0)}));
4136 if (err) {
4137 return err;
4138 }
4139
4140 return 0;
4141 }
4142
lfs_fs_deorphan(lfs_t * lfs)4143 static int lfs_fs_deorphan(lfs_t *lfs) {
4144 if (!lfs_gstate_hasorphans(&lfs->gstate)) {
4145 return 0;
4146 }
4147
4148 // Fix any orphans
4149 lfs_mdir_t pdir = {.split = true, .tail = {0, 1}};
4150 lfs_mdir_t dir;
4151
4152 // iterate over all directory directory entries
4153 while (!lfs_pair_isnull(pdir.tail)) {
4154 int err = lfs_dir_fetch(lfs, &dir, pdir.tail);
4155 if (err) {
4156 return err;
4157 }
4158
4159 // check head blocks for orphans
4160 if (!pdir.split) {
4161 // check if we have a parent
4162 lfs_mdir_t parent;
4163 lfs_stag_t tag = lfs_fs_parent(lfs, pdir.tail, &parent);
4164 if (tag < 0 && tag != LFS_ERR_NOENT) {
4165 return tag;
4166 }
4167
4168 if (tag == LFS_ERR_NOENT) {
4169 // we are an orphan
4170 LFS_DEBUG("Fixing orphan {0x%"PRIx32", 0x%"PRIx32"}",
4171 pdir.tail[0], pdir.tail[1]);
4172
4173 err = lfs_dir_drop(lfs, &pdir, &dir);
4174 if (err) {
4175 return err;
4176 }
4177
4178 // refetch tail
4179 continue;
4180 }
4181
4182 lfs_block_t pair[2];
4183 lfs_stag_t res = lfs_dir_get(lfs, &parent,
4184 LFS_MKTAG(0x7ff, 0x3ff, 0), tag, pair);
4185 if (res < 0) {
4186 return res;
4187 }
4188 lfs_pair_fromle32(pair);
4189
4190 if (!lfs_pair_sync(pair, pdir.tail)) {
4191 // we have desynced
4192 LFS_DEBUG("Fixing half-orphan {0x%"PRIx32", 0x%"PRIx32"} "
4193 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4194 pdir.tail[0], pdir.tail[1], pair[0], pair[1]);
4195
4196 lfs_pair_tole32(pair);
4197 err = lfs_dir_commit(lfs, &pdir, LFS_MKATTRS(
4198 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), pair}));
4199 lfs_pair_fromle32(pair);
4200 if (err) {
4201 return err;
4202 }
4203
4204 // refetch tail
4205 continue;
4206 }
4207 }
4208
4209 pdir = dir;
4210 }
4211
4212 // mark orphans as fixed
4213 lfs_fs_preporphans(lfs, -lfs_gstate_getorphans(&lfs->gstate));
4214 return 0;
4215 }
4216
lfs_fs_forceconsistency(lfs_t * lfs)4217 static int lfs_fs_forceconsistency(lfs_t *lfs) {
4218 int err = lfs_fs_demove(lfs);
4219 if (err) {
4220 return err;
4221 }
4222
4223 err = lfs_fs_deorphan(lfs);
4224 if (err) {
4225 return err;
4226 }
4227
4228 return 0;
4229 }
4230
lfs_fs_size_count(void * p,lfs_block_t block)4231 static int lfs_fs_size_count(void *p, lfs_block_t block) {
4232 (void)block;
4233 lfs_size_t *size = p;
4234 *size += 1;
4235 return 0;
4236 }
4237
lfs_fs_size(lfs_t * lfs)4238 lfs_ssize_t lfs_fs_size(lfs_t *lfs) {
4239 LFS_TRACE("lfs_fs_size(%p)", (void*)lfs);
4240 lfs_size_t size = 0;
4241 int err = lfs_fs_traverseraw(lfs, lfs_fs_size_count, &size, false);
4242 if (err) {
4243 LFS_TRACE("lfs_fs_size -> %d", err);
4244 return err;
4245 }
4246
4247 LFS_TRACE("lfs_fs_size -> %d", err);
4248 return size;
4249 }
4250
4251 #ifdef LFS_MIGRATE
4252 ////// Migration from littelfs v1 below this //////
4253
4254 /// Version info ///
4255
4256 // Software library version
4257 // Major (top-nibble), incremented on backwards incompatible changes
4258 // Minor (bottom-nibble), incremented on feature additions
4259 #define LFS1_VERSION 0x00010007
4260 #define LFS1_VERSION_MAJOR (0xffff & (LFS1_VERSION >> 16))
4261 #define LFS1_VERSION_MINOR (0xffff & (LFS1_VERSION >> 0))
4262
4263 // Version of On-disk data structures
4264 // Major (top-nibble), incremented on backwards incompatible changes
4265 // Minor (bottom-nibble), incremented on feature additions
4266 #define LFS1_DISK_VERSION 0x00010001
4267 #define LFS1_DISK_VERSION_MAJOR (0xffff & (LFS1_DISK_VERSION >> 16))
4268 #define LFS1_DISK_VERSION_MINOR (0xffff & (LFS1_DISK_VERSION >> 0))
4269
4270
4271 /// v1 Definitions ///
4272
4273 // File types
4274 enum lfs1_type {
4275 LFS1_TYPE_REG = 0x11,
4276 LFS1_TYPE_DIR = 0x22,
4277 LFS1_TYPE_SUPERBLOCK = 0x2e,
4278 };
4279
4280 typedef struct lfs1 {
4281 lfs_block_t root[2];
4282 } lfs1_t;
4283
4284 typedef struct lfs1_entry {
4285 lfs_off_t off;
4286
4287 struct lfs1_disk_entry {
4288 uint8_t type;
4289 uint8_t elen;
4290 uint8_t alen;
4291 uint8_t nlen;
4292 union {
4293 struct {
4294 lfs_block_t head;
4295 lfs_size_t size;
4296 } file;
4297 lfs_block_t dir[2];
4298 } u;
4299 } d;
4300 } lfs1_entry_t;
4301
4302 typedef struct lfs1_dir {
4303 struct lfs1_dir *next;
4304 lfs_block_t pair[2];
4305 lfs_off_t off;
4306
4307 lfs_block_t head[2];
4308 lfs_off_t pos;
4309
4310 struct lfs1_disk_dir {
4311 uint32_t rev;
4312 lfs_size_t size;
4313 lfs_block_t tail[2];
4314 } d;
4315 } lfs1_dir_t;
4316
4317 typedef struct lfs1_superblock {
4318 lfs_off_t off;
4319
4320 struct lfs1_disk_superblock {
4321 uint8_t type;
4322 uint8_t elen;
4323 uint8_t alen;
4324 uint8_t nlen;
4325 lfs_block_t root[2];
4326 uint32_t block_size;
4327 uint32_t block_count;
4328 uint32_t version;
4329 char magic[8];
4330 } d;
4331 } lfs1_superblock_t;
4332
4333
4334 /// Low-level wrappers v1->v2 ///
lfs1_crc(uint32_t * crc,const void * buffer,size_t size)4335 static void lfs1_crc(uint32_t *crc, const void *buffer, size_t size) {
4336 *crc = lfs_crc(*crc, buffer, size);
4337 }
4338
lfs1_bd_read(lfs_t * lfs,lfs_block_t block,lfs_off_t off,void * buffer,lfs_size_t size)4339 static int lfs1_bd_read(lfs_t *lfs, lfs_block_t block,
4340 lfs_off_t off, void *buffer, lfs_size_t size) {
4341 // if we ever do more than writes to alternating pairs,
4342 // this may need to consider pcache
4343 return lfs_bd_read(lfs, &lfs->pcache, &lfs->rcache, size,
4344 block, off, buffer, size);
4345 }
4346
lfs1_bd_crc(lfs_t * lfs,lfs_block_t block,lfs_off_t off,lfs_size_t size,uint32_t * crc)4347 static int lfs1_bd_crc(lfs_t *lfs, lfs_block_t block,
4348 lfs_off_t off, lfs_size_t size, uint32_t *crc) {
4349 for (lfs_off_t i = 0; i < size; i++) {
4350 uint8_t c;
4351 int err = lfs1_bd_read(lfs, block, off+i, &c, 1);
4352 if (err) {
4353 return err;
4354 }
4355
4356 lfs1_crc(crc, &c, 1);
4357 }
4358
4359 return 0;
4360 }
4361
4362
4363 /// Endian swapping functions ///
lfs1_dir_fromle32(struct lfs1_disk_dir * d)4364 static void lfs1_dir_fromle32(struct lfs1_disk_dir *d) {
4365 d->rev = lfs_fromle32(d->rev);
4366 d->size = lfs_fromle32(d->size);
4367 d->tail[0] = lfs_fromle32(d->tail[0]);
4368 d->tail[1] = lfs_fromle32(d->tail[1]);
4369 }
4370
lfs1_dir_tole32(struct lfs1_disk_dir * d)4371 static void lfs1_dir_tole32(struct lfs1_disk_dir *d) {
4372 d->rev = lfs_tole32(d->rev);
4373 d->size = lfs_tole32(d->size);
4374 d->tail[0] = lfs_tole32(d->tail[0]);
4375 d->tail[1] = lfs_tole32(d->tail[1]);
4376 }
4377
lfs1_entry_fromle32(struct lfs1_disk_entry * d)4378 static void lfs1_entry_fromle32(struct lfs1_disk_entry *d) {
4379 d->u.dir[0] = lfs_fromle32(d->u.dir[0]);
4380 d->u.dir[1] = lfs_fromle32(d->u.dir[1]);
4381 }
4382
lfs1_entry_tole32(struct lfs1_disk_entry * d)4383 static void lfs1_entry_tole32(struct lfs1_disk_entry *d) {
4384 d->u.dir[0] = lfs_tole32(d->u.dir[0]);
4385 d->u.dir[1] = lfs_tole32(d->u.dir[1]);
4386 }
4387
lfs1_superblock_fromle32(struct lfs1_disk_superblock * d)4388 static void lfs1_superblock_fromle32(struct lfs1_disk_superblock *d) {
4389 d->root[0] = lfs_fromle32(d->root[0]);
4390 d->root[1] = lfs_fromle32(d->root[1]);
4391 d->block_size = lfs_fromle32(d->block_size);
4392 d->block_count = lfs_fromle32(d->block_count);
4393 d->version = lfs_fromle32(d->version);
4394 }
4395
4396
4397 ///// Metadata pair and directory operations ///
lfs1_entry_size(const lfs1_entry_t * entry)4398 static inline lfs_size_t lfs1_entry_size(const lfs1_entry_t *entry) {
4399 return 4 + entry->d.elen + entry->d.alen + entry->d.nlen;
4400 }
4401
lfs1_dir_fetch(lfs_t * lfs,lfs1_dir_t * dir,const lfs_block_t pair[2])4402 static int lfs1_dir_fetch(lfs_t *lfs,
4403 lfs1_dir_t *dir, const lfs_block_t pair[2]) {
4404 // copy out pair, otherwise may be aliasing dir
4405 const lfs_block_t tpair[2] = {pair[0], pair[1]};
4406 bool valid = false;
4407
4408 // check both blocks for the most recent revision
4409 for (int i = 0; i < 2; i++) {
4410 struct lfs1_disk_dir test;
4411 int err = lfs1_bd_read(lfs, tpair[i], 0, &test, sizeof(test));
4412 lfs1_dir_fromle32(&test);
4413 if (err) {
4414 if (err == LFS_ERR_CORRUPT) {
4415 continue;
4416 }
4417 return err;
4418 }
4419
4420 if (valid && lfs_scmp(test.rev, dir->d.rev) < 0) {
4421 continue;
4422 }
4423
4424 if ((0x7fffffff & test.size) < sizeof(test)+4 ||
4425 (0x7fffffff & test.size) > lfs->cfg->block_size) {
4426 continue;
4427 }
4428
4429 uint32_t crc = 0xffffffff;
4430 lfs1_dir_tole32(&test);
4431 lfs1_crc(&crc, &test, sizeof(test));
4432 lfs1_dir_fromle32(&test);
4433 err = lfs1_bd_crc(lfs, tpair[i], sizeof(test),
4434 (0x7fffffff & test.size) - sizeof(test), &crc);
4435 if (err) {
4436 if (err == LFS_ERR_CORRUPT) {
4437 continue;
4438 }
4439 return err;
4440 }
4441
4442 if (crc != 0) {
4443 continue;
4444 }
4445
4446 valid = true;
4447
4448 // setup dir in case it's valid
4449 dir->pair[0] = tpair[(i+0) % 2];
4450 dir->pair[1] = tpair[(i+1) % 2];
4451 dir->off = sizeof(dir->d);
4452 dir->d = test;
4453 }
4454
4455 if (!valid) {
4456 LFS_ERROR("Corrupted dir pair at {0x%"PRIx32", 0x%"PRIx32"}",
4457 tpair[0], tpair[1]);
4458 return LFS_ERR_CORRUPT;
4459 }
4460
4461 return 0;
4462 }
4463
lfs1_dir_next(lfs_t * lfs,lfs1_dir_t * dir,lfs1_entry_t * entry)4464 static int lfs1_dir_next(lfs_t *lfs, lfs1_dir_t *dir, lfs1_entry_t *entry) {
4465 while (dir->off + sizeof(entry->d) > (0x7fffffff & dir->d.size)-4) {
4466 if (!(0x80000000 & dir->d.size)) {
4467 entry->off = dir->off;
4468 return LFS_ERR_NOENT;
4469 }
4470
4471 int err = lfs1_dir_fetch(lfs, dir, dir->d.tail);
4472 if (err) {
4473 return err;
4474 }
4475
4476 dir->off = sizeof(dir->d);
4477 dir->pos += sizeof(dir->d) + 4;
4478 }
4479
4480 int err = lfs1_bd_read(lfs, dir->pair[0], dir->off,
4481 &entry->d, sizeof(entry->d));
4482 lfs1_entry_fromle32(&entry->d);
4483 if (err) {
4484 return err;
4485 }
4486
4487 entry->off = dir->off;
4488 dir->off += lfs1_entry_size(entry);
4489 dir->pos += lfs1_entry_size(entry);
4490 return 0;
4491 }
4492
4493 /// littlefs v1 specific operations ///
lfs1_traverse(lfs_t * lfs,int (* cb)(void *,lfs_block_t),void * data)4494 int lfs1_traverse(lfs_t *lfs, int (*cb)(void*, lfs_block_t), void *data) {
4495 if (lfs_pair_isnull(lfs->lfs1->root)) {
4496 return 0;
4497 }
4498
4499 // iterate over metadata pairs
4500 lfs1_dir_t dir;
4501 lfs1_entry_t entry;
4502 lfs_block_t cwd[2] = {0, 1};
4503
4504 while (true) {
4505 for (int i = 0; i < 2; i++) {
4506 int err = cb(data, cwd[i]);
4507 if (err) {
4508 return err;
4509 }
4510 }
4511
4512 int err = lfs1_dir_fetch(lfs, &dir, cwd);
4513 if (err) {
4514 return err;
4515 }
4516
4517 // iterate over contents
4518 while (dir.off + sizeof(entry.d) <= (0x7fffffff & dir.d.size)-4) {
4519 err = lfs1_bd_read(lfs, dir.pair[0], dir.off,
4520 &entry.d, sizeof(entry.d));
4521 lfs1_entry_fromle32(&entry.d);
4522 if (err) {
4523 return err;
4524 }
4525
4526 dir.off += lfs1_entry_size(&entry);
4527 if ((0x70 & entry.d.type) == (0x70 & LFS1_TYPE_REG)) {
4528 err = lfs_ctz_traverse(lfs, NULL, &lfs->rcache,
4529 entry.d.u.file.head, entry.d.u.file.size, cb, data);
4530 if (err) {
4531 return err;
4532 }
4533 }
4534 }
4535
4536 // we also need to check if we contain a threaded v2 directory
4537 lfs_mdir_t dir2 = {.split=true, .tail={cwd[0], cwd[1]}};
4538 while (dir2.split) {
4539 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4540 if (err) {
4541 break;
4542 }
4543
4544 for (int i = 0; i < 2; i++) {
4545 err = cb(data, dir2.pair[i]);
4546 if (err) {
4547 return err;
4548 }
4549 }
4550 }
4551
4552 cwd[0] = dir.d.tail[0];
4553 cwd[1] = dir.d.tail[1];
4554
4555 if (lfs_pair_isnull(cwd)) {
4556 break;
4557 }
4558 }
4559
4560 return 0;
4561 }
4562
lfs1_moved(lfs_t * lfs,const void * e)4563 static int lfs1_moved(lfs_t *lfs, const void *e) {
4564 if (lfs_pair_isnull(lfs->lfs1->root)) {
4565 return 0;
4566 }
4567
4568 // skip superblock
4569 lfs1_dir_t cwd;
4570 int err = lfs1_dir_fetch(lfs, &cwd, (const lfs_block_t[2]){0, 1});
4571 if (err) {
4572 return err;
4573 }
4574
4575 // iterate over all directory directory entries
4576 lfs1_entry_t entry;
4577 while (!lfs_pair_isnull(cwd.d.tail)) {
4578 err = lfs1_dir_fetch(lfs, &cwd, cwd.d.tail);
4579 if (err) {
4580 return err;
4581 }
4582
4583 while (true) {
4584 err = lfs1_dir_next(lfs, &cwd, &entry);
4585 if (err && err != LFS_ERR_NOENT) {
4586 return err;
4587 }
4588
4589 if (err == LFS_ERR_NOENT) {
4590 break;
4591 }
4592
4593 if (!(0x80 & entry.d.type) &&
4594 memcmp(&entry.d.u, e, sizeof(entry.d.u)) == 0) {
4595 return true;
4596 }
4597 }
4598 }
4599
4600 return false;
4601 }
4602
4603 /// Filesystem operations ///
lfs1_mount(lfs_t * lfs,struct lfs1 * lfs1,const struct lfs_config * cfg)4604 static int lfs1_mount(lfs_t *lfs, struct lfs1 *lfs1,
4605 const struct lfs_config *cfg) {
4606 int err = 0;
4607 {
4608 err = lfs_init(lfs, cfg);
4609 if (err) {
4610 return err;
4611 }
4612
4613 lfs->lfs1 = lfs1;
4614 lfs->lfs1->root[0] = LFS_BLOCK_NULL;
4615 lfs->lfs1->root[1] = LFS_BLOCK_NULL;
4616
4617 // setup free lookahead
4618 lfs->free.off = 0;
4619 lfs->free.size = 0;
4620 lfs->free.i = 0;
4621 lfs_alloc_ack(lfs);
4622
4623 // load superblock
4624 lfs1_dir_t dir;
4625 lfs1_superblock_t superblock;
4626 err = lfs1_dir_fetch(lfs, &dir, (const lfs_block_t[2]){0, 1});
4627 if (err && err != LFS_ERR_CORRUPT) {
4628 goto cleanup;
4629 }
4630
4631 if (!err) {
4632 err = lfs1_bd_read(lfs, dir.pair[0], sizeof(dir.d),
4633 &superblock.d, sizeof(superblock.d));
4634 lfs1_superblock_fromle32(&superblock.d);
4635 if (err) {
4636 goto cleanup;
4637 }
4638
4639 lfs->lfs1->root[0] = superblock.d.root[0];
4640 lfs->lfs1->root[1] = superblock.d.root[1];
4641 }
4642
4643 if (err || memcmp(superblock.d.magic, "littlefs", 8) != 0) {
4644 LFS_ERROR("Invalid superblock at {0x%"PRIx32", 0x%"PRIx32"}",
4645 0, 1);
4646 err = LFS_ERR_CORRUPT;
4647 goto cleanup;
4648 }
4649
4650 uint16_t major_version = (0xffff & (superblock.d.version >> 16));
4651 uint16_t minor_version = (0xffff & (superblock.d.version >> 0));
4652 if ((major_version != LFS1_DISK_VERSION_MAJOR ||
4653 minor_version > LFS1_DISK_VERSION_MINOR)) {
4654 LFS_ERROR("Invalid version v%d.%d", major_version, minor_version);
4655 err = LFS_ERR_INVAL;
4656 goto cleanup;
4657 }
4658
4659 return 0;
4660 }
4661
4662 cleanup:
4663 lfs_deinit(lfs);
4664 return err;
4665 }
4666
lfs1_unmount(lfs_t * lfs)4667 static int lfs1_unmount(lfs_t *lfs) {
4668 return lfs_deinit(lfs);
4669 }
4670
4671 /// v1 migration ///
lfs_migrate(lfs_t * lfs,const struct lfs_config * cfg)4672 int lfs_migrate(lfs_t *lfs, const struct lfs_config *cfg) {
4673 LFS_TRACE("lfs_migrate(%p, %p {.context=%p, "
4674 ".read=%p, .prog=%p, .erase=%p, .sync=%p, "
4675 ".read_size=%"PRIu32", .prog_size=%"PRIu32", "
4676 ".block_size=%"PRIu32", .block_count=%"PRIu32", "
4677 ".block_cycles=%"PRIu32", .cache_size=%"PRIu32", "
4678 ".lookahead_size=%"PRIu32", .read_buffer=%p, "
4679 ".prog_buffer=%p, .lookahead_buffer=%p, "
4680 ".name_max=%"PRIu32", .file_max=%"PRIu32", "
4681 ".attr_max=%"PRIu32"})",
4682 (void*)lfs, (void*)cfg, cfg->context,
4683 (void*)(uintptr_t)cfg->read, (void*)(uintptr_t)cfg->prog,
4684 (void*)(uintptr_t)cfg->erase, (void*)(uintptr_t)cfg->sync,
4685 cfg->read_size, cfg->prog_size, cfg->block_size, cfg->block_count,
4686 cfg->block_cycles, cfg->cache_size, cfg->lookahead_size,
4687 cfg->read_buffer, cfg->prog_buffer, cfg->lookahead_buffer,
4688 cfg->name_max, cfg->file_max, cfg->attr_max);
4689 struct lfs1 lfs1;
4690 int err = lfs1_mount(lfs, &lfs1, cfg);
4691 if (err) {
4692 LFS_TRACE("lfs_migrate -> %d", err);
4693 return err;
4694 }
4695
4696 {
4697 // iterate through each directory, copying over entries
4698 // into new directory
4699 lfs1_dir_t dir1;
4700 lfs_mdir_t dir2;
4701 dir1.d.tail[0] = lfs->lfs1->root[0];
4702 dir1.d.tail[1] = lfs->lfs1->root[1];
4703 while (!lfs_pair_isnull(dir1.d.tail)) {
4704 // iterate old dir
4705 err = lfs1_dir_fetch(lfs, &dir1, dir1.d.tail);
4706 if (err) {
4707 goto cleanup;
4708 }
4709
4710 // create new dir and bind as temporary pretend root
4711 err = lfs_dir_alloc(lfs, &dir2);
4712 if (err) {
4713 goto cleanup;
4714 }
4715
4716 dir2.rev = dir1.d.rev;
4717 dir1.head[0] = dir1.pair[0];
4718 dir1.head[1] = dir1.pair[1];
4719 lfs->root[0] = dir2.pair[0];
4720 lfs->root[1] = dir2.pair[1];
4721
4722 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
4723 if (err) {
4724 goto cleanup;
4725 }
4726
4727 while (true) {
4728 lfs1_entry_t entry1;
4729 err = lfs1_dir_next(lfs, &dir1, &entry1);
4730 if (err && err != LFS_ERR_NOENT) {
4731 goto cleanup;
4732 }
4733
4734 if (err == LFS_ERR_NOENT) {
4735 break;
4736 }
4737
4738 // check that entry has not been moved
4739 if (entry1.d.type & 0x80) {
4740 int moved = lfs1_moved(lfs, &entry1.d.u);
4741 if (moved < 0) {
4742 err = moved;
4743 goto cleanup;
4744 }
4745
4746 if (moved) {
4747 continue;
4748 }
4749
4750 entry1.d.type &= ~0x80;
4751 }
4752
4753 // also fetch name
4754 char name[LFS_NAME_MAX+1];
4755 memset(name, 0, sizeof(name));
4756 err = lfs1_bd_read(lfs, dir1.pair[0],
4757 entry1.off + 4+entry1.d.elen+entry1.d.alen,
4758 name, entry1.d.nlen);
4759 if (err) {
4760 goto cleanup;
4761 }
4762
4763 bool isdir = (entry1.d.type == LFS1_TYPE_DIR);
4764
4765 // create entry in new dir
4766 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4767 if (err) {
4768 goto cleanup;
4769 }
4770
4771 uint16_t id;
4772 err = lfs_dir_find(lfs, &dir2, &(const char*){name}, &id);
4773 if (!(err == LFS_ERR_NOENT && id != 0x3ff)) {
4774 err = (err < 0) ? err : LFS_ERR_EXIST;
4775 goto cleanup;
4776 }
4777
4778 lfs1_entry_tole32(&entry1.d);
4779 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4780 {LFS_MKTAG(LFS_TYPE_CREATE, id, 0)},
4781 {LFS_MKTAG_IF_ELSE(isdir,
4782 LFS_TYPE_DIR, id, entry1.d.nlen,
4783 LFS_TYPE_REG, id, entry1.d.nlen),
4784 name},
4785 {LFS_MKTAG_IF_ELSE(isdir,
4786 LFS_TYPE_DIRSTRUCT, id, sizeof(entry1.d.u),
4787 LFS_TYPE_CTZSTRUCT, id, sizeof(entry1.d.u)),
4788 &entry1.d.u}));
4789 lfs1_entry_fromle32(&entry1.d);
4790 if (err) {
4791 goto cleanup;
4792 }
4793 }
4794
4795 if (!lfs_pair_isnull(dir1.d.tail)) {
4796 // find last block and update tail to thread into fs
4797 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4798 if (err) {
4799 goto cleanup;
4800 }
4801
4802 while (dir2.split) {
4803 err = lfs_dir_fetch(lfs, &dir2, dir2.tail);
4804 if (err) {
4805 goto cleanup;
4806 }
4807 }
4808
4809 lfs_pair_tole32(dir2.pair);
4810 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4811 {LFS_MKTAG(LFS_TYPE_SOFTTAIL, 0x3ff, 8), dir1.d.tail}));
4812 lfs_pair_fromle32(dir2.pair);
4813 if (err) {
4814 goto cleanup;
4815 }
4816 }
4817
4818 // Copy over first block to thread into fs. Unfortunately
4819 // if this fails there is not much we can do.
4820 LFS_DEBUG("Migrating {0x%"PRIx32", 0x%"PRIx32"} "
4821 "-> {0x%"PRIx32", 0x%"PRIx32"}",
4822 lfs->root[0], lfs->root[1], dir1.head[0], dir1.head[1]);
4823
4824 err = lfs_bd_erase(lfs, dir1.head[1]);
4825 if (err) {
4826 goto cleanup;
4827 }
4828
4829 err = lfs_dir_fetch(lfs, &dir2, lfs->root);
4830 if (err) {
4831 goto cleanup;
4832 }
4833
4834 for (lfs_off_t i = 0; i < dir2.off; i++) {
4835 uint8_t dat;
4836 err = lfs_bd_read(lfs,
4837 NULL, &lfs->rcache, dir2.off,
4838 dir2.pair[0], i, &dat, 1);
4839 if (err) {
4840 goto cleanup;
4841 }
4842
4843 err = lfs_bd_prog(lfs,
4844 &lfs->pcache, &lfs->rcache, true,
4845 dir1.head[1], i, &dat, 1);
4846 if (err) {
4847 goto cleanup;
4848 }
4849 }
4850
4851 err = lfs_bd_flush(lfs, &lfs->pcache, &lfs->rcache, true);
4852 if (err) {
4853 goto cleanup;
4854 }
4855 }
4856
4857 // Create new superblock. This marks a successful migration!
4858 err = lfs1_dir_fetch(lfs, &dir1, (const lfs_block_t[2]){0, 1});
4859 if (err) {
4860 goto cleanup;
4861 }
4862
4863 dir2.pair[0] = dir1.pair[0];
4864 dir2.pair[1] = dir1.pair[1];
4865 dir2.rev = dir1.d.rev;
4866 dir2.off = sizeof(dir2.rev);
4867 dir2.etag = 0xffffffff;
4868 dir2.count = 0;
4869 dir2.tail[0] = lfs->lfs1->root[0];
4870 dir2.tail[1] = lfs->lfs1->root[1];
4871 dir2.erased = false;
4872 dir2.split = true;
4873
4874 lfs_superblock_t superblock = {
4875 .version = LFS_DISK_VERSION,
4876 .block_size = lfs->cfg->block_size,
4877 .block_count = lfs->cfg->block_count,
4878 .name_max = lfs->name_max,
4879 .file_max = lfs->file_max,
4880 .attr_max = lfs->attr_max,
4881 };
4882
4883 lfs_superblock_tole32(&superblock);
4884 err = lfs_dir_commit(lfs, &dir2, LFS_MKATTRS(
4885 {LFS_MKTAG(LFS_TYPE_CREATE, 0, 0)},
4886 {LFS_MKTAG(LFS_TYPE_SUPERBLOCK, 0, 8), "littlefs"},
4887 {LFS_MKTAG(LFS_TYPE_INLINESTRUCT, 0, sizeof(superblock)),
4888 &superblock}));
4889 if (err) {
4890 goto cleanup;
4891 }
4892
4893 // sanity check that fetch works
4894 err = lfs_dir_fetch(lfs, &dir2, (const lfs_block_t[2]){0, 1});
4895 if (err) {
4896 goto cleanup;
4897 }
4898
4899 // force compaction to prevent accidentally mounting v1
4900 dir2.erased = false;
4901 err = lfs_dir_commit(lfs, &dir2, NULL, 0);
4902 if (err) {
4903 goto cleanup;
4904 }
4905 }
4906
4907 cleanup:
4908 lfs1_unmount(lfs);
4909 LFS_TRACE("lfs_migrate -> %d", err);
4910 return err;
4911 }
4912
4913 #endif
4914