Lines Matching full:fs
11 #include <zephyr/fs/zms.h>
18 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate);
19 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry);
20 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt);
21 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
23 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry,
43 static int zms_lookup_cache_rebuild(struct zms_fs *fs) in zms_lookup_cache_rebuild() argument
53 memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); in zms_lookup_cache_rebuild()
54 addr = fs->ate_wra; in zms_lookup_cache_rebuild()
59 rc = zms_prev_ate(fs, &addr, &ate); in zms_lookup_cache_rebuild()
65 cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)]; in zms_lookup_cache_rebuild()
72 rc = zms_get_sector_cycle(fs, ate_addr, ¤t_cycle); in zms_lookup_cache_rebuild()
81 if (zms_ate_valid_different_sector(fs, &ate, current_cycle)) { in zms_lookup_cache_rebuild()
87 if (addr == fs->ate_wra) { in zms_lookup_cache_rebuild()
95 static void zms_lookup_cache_invalidate(struct zms_fs *fs, uint32_t sector) in zms_lookup_cache_invalidate() argument
97 uint64_t *cache_entry = fs->lookup_cache; in zms_lookup_cache_invalidate()
98 uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; in zms_lookup_cache_invalidate()
110 static inline off_t zms_addr_to_offset(struct zms_fs *fs, uint64_t addr) in zms_addr_to_offset() argument
112 return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr); in zms_addr_to_offset()
116 static inline size_t zms_round_down_write_block_size(struct zms_fs *fs, size_t len) in zms_round_down_write_block_size() argument
118 return len & ~(fs->flash_parameters->write_block_size - 1U); in zms_round_down_write_block_size()
122 static inline size_t zms_round_up_write_block_size(struct zms_fs *fs, size_t len) in zms_round_up_write_block_size() argument
124 return (len + (fs->flash_parameters->write_block_size - 1U)) & in zms_round_up_write_block_size()
125 ~(fs->flash_parameters->write_block_size - 1U); in zms_round_up_write_block_size()
128 /* zms_al_size returns size aligned to fs->write_block_size */
129 static inline size_t zms_al_size(struct zms_fs *fs, size_t len) in zms_al_size() argument
131 size_t write_block_size = fs->flash_parameters->write_block_size; in zms_al_size()
137 return zms_round_up_write_block_size(fs, len); in zms_al_size()
141 static inline uint64_t zms_empty_ate_addr(struct zms_fs *fs, uint64_t addr) in zms_empty_ate_addr() argument
143 return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size; in zms_empty_ate_addr()
147 static inline uint64_t zms_close_ate_addr(struct zms_fs *fs, uint64_t addr) in zms_close_ate_addr() argument
149 return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size; in zms_close_ate_addr()
153 static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) in zms_flash_al_wrt() argument
166 offset = zms_addr_to_offset(fs, addr); in zms_flash_al_wrt()
168 blen = zms_round_down_write_block_size(fs, len); in zms_flash_al_wrt()
170 rc = flash_write(fs->flash_device, offset, data8, blen); in zms_flash_al_wrt()
181 (void)memset(buf + len, fs->flash_parameters->erase_value, in zms_flash_al_wrt()
182 fs->flash_parameters->write_block_size - len); in zms_flash_al_wrt()
184 rc = flash_write(fs->flash_device, offset, buf, in zms_flash_al_wrt()
185 fs->flash_parameters->write_block_size); in zms_flash_al_wrt()
193 static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len) in zms_flash_rd() argument
197 offset = zms_addr_to_offset(fs, addr); in zms_flash_rd()
199 return flash_read(fs->flash_device, offset, data, len); in zms_flash_rd()
203 static int zms_flash_ate_wrt(struct zms_fs *fs, const struct zms_ate *entry) in zms_flash_ate_wrt() argument
207 rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
214 fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra; in zms_flash_ate_wrt()
217 fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
223 static int zms_flash_data_wrt(struct zms_fs *fs, const void *data, size_t len) in zms_flash_data_wrt() argument
227 rc = zms_flash_al_wrt(fs, fs->data_wra, data, len); in zms_flash_data_wrt()
231 fs->data_wra += zms_al_size(fs, len); in zms_flash_data_wrt()
237 static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry) in zms_flash_ate_rd() argument
239 return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate)); in zms_flash_ate_rd()
243 * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size
246 static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) in zms_flash_block_cmp() argument
254 block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE); in zms_flash_block_cmp()
258 rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp); in zms_flash_block_cmp()
277 static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len) in zms_flash_cmp_const() argument
284 block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE); in zms_flash_cmp_const()
289 rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp); in zms_flash_cmp_const()
302 static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len) in zms_flash_block_move() argument
309 block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE); in zms_flash_block_move()
313 rc = zms_flash_rd(fs, addr, buf, bytes_to_copy); in zms_flash_block_move()
317 rc = zms_flash_data_wrt(fs, buf, bytes_to_copy); in zms_flash_block_move()
330 static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr) in zms_flash_erase_sector() argument
335 flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; in zms_flash_erase_sector()
343 offset = zms_addr_to_offset(fs, addr); in zms_flash_erase_sector()
346 fs->sector_size); in zms_flash_erase_sector()
349 zms_lookup_cache_invalidate(fs, SECTOR_NUM(addr)); in zms_flash_erase_sector()
351 rc = flash_erase(fs->flash_device, offset, fs->sector_size); in zms_flash_erase_sector()
357 if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) { in zms_flash_erase_sector()
401 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) in zms_ate_valid() argument
403 return zms_ate_valid_different_sector(fs, entry, fs->sector_cycle); in zms_ate_valid()
412 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry, in zms_ate_valid_different_sector() argument
422 static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr, in zms_get_cycle_on_sector_change() argument
431 rc = zms_get_sector_cycle(fs, addr, cycle_cnt); in zms_get_cycle_on_sector_change()
451 static bool zms_close_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) in zms_close_ate_valid() argument
453 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_close_ate_valid()
454 (entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size)); in zms_close_ate_valid()
463 static bool zms_empty_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) in zms_empty_ate_valid() argument
465 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && in zms_empty_ate_valid()
476 static bool zms_gc_done_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) in zms_gc_done_ate_valid() argument
478 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_gc_done_ate_valid()
488 static int zms_validate_closed_sector(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, in zms_validate_closed_sector() argument
494 rc = zms_get_sector_header(fs, addr, empty_ate, close_ate); in zms_validate_closed_sector()
499 if (zms_empty_ate_valid(fs, empty_ate) && zms_close_ate_valid(fs, close_ate) && in zms_validate_closed_sector()
509 static int zms_flash_write_entry(struct zms_fs *fs, uint32_t id, const void *data, size_t len) in zms_flash_write_entry() argument
519 entry.cycle_cnt = fs->sector_cycle; in zms_flash_write_entry()
526 entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_flash_write_entry()
535 rc = zms_flash_data_wrt(fs, data, len); in zms_flash_write_entry()
541 rc = zms_flash_ate_wrt(fs, &entry); in zms_flash_write_entry()
553 static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr, uint64_t *data_wra) in zms_recover_last_ate() argument
563 *addr -= 2 * fs->ate_size; in zms_recover_last_ate()
571 rc = zms_flash_ate_rd(fs, ate_end_addr, &end_ate); in zms_recover_last_ate()
575 if (zms_ate_valid(fs, &end_ate)) { in zms_recover_last_ate()
579 data_end_addr += end_ate.offset + zms_al_size(fs, end_ate.len); in zms_recover_last_ate()
584 ate_end_addr -= fs->ate_size; in zms_recover_last_ate()
591 static int zms_compute_prev_addr(struct zms_fs *fs, uint64_t *addr) in zms_compute_prev_addr() argument
597 *addr += fs->ate_size; in zms_compute_prev_addr()
598 if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) { in zms_compute_prev_addr()
604 *addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT); in zms_compute_prev_addr()
610 sec_closed = zms_validate_closed_sector(fs, *addr, &empty_ate, &close_ate); in zms_compute_prev_addr()
618 *addr = fs->ate_wra; in zms_compute_prev_addr()
632 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate) in zms_prev_ate() argument
636 rc = zms_flash_ate_rd(fs, *addr, ate); in zms_prev_ate()
641 return zms_compute_prev_addr(fs, addr); in zms_prev_ate()
644 static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr) in zms_sector_advance() argument
647 if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { in zms_sector_advance()
648 *addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT); in zms_sector_advance()
655 static int zms_sector_close(struct zms_fs *fs) in zms_sector_close() argument
663 close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size); in zms_sector_close()
665 close_ate.cycle_cnt = fs->sector_cycle; in zms_sector_close()
679 memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate)); in zms_sector_close()
680 while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) { in zms_sector_close()
681 rc = zms_flash_ate_wrt(fs, &garbage_ate); in zms_sector_close()
687 fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra); in zms_sector_close()
691 (void)zms_flash_ate_wrt(fs, &close_ate); in zms_sector_close()
693 zms_sector_advance(fs, &fs->ate_wra); in zms_sector_close()
695 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_sector_close()
698 fs->sector_cycle = 0; in zms_sector_close()
704 fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; in zms_sector_close()
709 static int zms_add_gc_done_ate(struct zms_fs *fs) in zms_add_gc_done_ate() argument
713 LOG_DBG("Adding gc done ate at %llx", fs->ate_wra); in zms_add_gc_done_ate()
716 gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_add_gc_done_ate()
718 gc_done_ate.cycle_cnt = fs->sector_cycle; in zms_add_gc_done_ate()
722 return zms_flash_ate_wrt(fs, &gc_done_ate); in zms_add_gc_done_ate()
725 static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr) in zms_add_empty_ate() argument
734 LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size)); in zms_add_empty_ate()
741 rc = zms_get_sector_cycle(fs, addr, &cycle_cnt); in zms_add_empty_ate()
754 /* Adding empty ate to this sector changes fs->ate_wra value in zms_add_empty_ate()
757 previous_ate_wra = fs->ate_wra; in zms_add_empty_ate()
758 fs->ate_wra = zms_empty_ate_addr(fs, addr); in zms_add_empty_ate()
759 rc = zms_flash_ate_wrt(fs, &empty_ate); in zms_add_empty_ate()
763 fs->ate_wra = previous_ate_wra; in zms_add_empty_ate()
768 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt) in zms_get_sector_cycle() argument
774 empty_addr = zms_empty_ate_addr(fs, addr); in zms_get_sector_cycle()
777 rc = zms_flash_ate_rd(fs, empty_addr, &empty_ate); in zms_get_sector_cycle()
783 if (zms_empty_ate_valid(fs, &empty_ate)) { in zms_get_sector_cycle()
792 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, in zms_get_sector_header() argument
798 close_addr = zms_close_ate_addr(fs, addr); in zms_get_sector_header()
800 rc = zms_flash_ate_rd(fs, close_addr, close_ate); in zms_get_sector_header()
806 rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate); in zms_get_sector_header()
817 * @param fs Pointer to file system
828 static int zms_find_ate_with_id(struct zms_fs *fs, uint32_t id, uint64_t start_addr, in zms_find_ate_with_id() argument
843 rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); in zms_find_ate_with_id()
851 rc = zms_get_cycle_on_sector_change(fs, wlk_prev_addr, previous_sector_num, in zms_find_ate_with_id()
856 if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { in zms_find_ate_with_id()
874 static int zms_gc(struct zms_fs *fs) in zms_gc() argument
891 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
894 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_gc()
899 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_gc()
906 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
914 previous_cycle = fs->sector_cycle; in zms_gc()
916 sec_addr = (fs->ate_wra & ADDR_SECT_MASK); in zms_gc()
917 zms_sector_advance(fs, &sec_addr); in zms_gc()
918 gc_addr = sec_addr + fs->sector_size - fs->ate_size; in zms_gc()
921 sec_closed = zms_validate_closed_sector(fs, gc_addr, &empty_ate, &close_ate); in zms_gc()
932 fs->sector_cycle = empty_ate.cycle_cnt; in zms_gc()
935 stop_addr = gc_addr - 2 * fs->ate_size; in zms_gc()
944 rc = zms_prev_ate(fs, &gc_addr, &gc_ate); in zms_gc()
949 if (!zms_ate_valid(fs, &gc_ate) || !gc_ate.len) { in zms_gc()
954 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)]; in zms_gc()
957 wlk_addr = fs->ate_wra; in zms_gc()
960 wlk_addr = fs->ate_wra; in zms_gc()
968 rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_gc()
987 gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_gc()
989 rc = zms_flash_block_move(fs, data_addr, gc_ate.len); in zms_gc()
997 rc = zms_flash_ate_wrt(fs, &gc_ate); in zms_gc()
1007 fs->sector_cycle = previous_cycle; in zms_gc()
1012 rc = zms_add_gc_done_ate(fs); in zms_gc()
1018 rc = zms_flash_erase_sector(fs, sec_addr); in zms_gc()
1024 zms_lookup_cache_invalidate(fs, sec_addr >> ADDR_SECT_SHIFT); in zms_gc()
1026 rc = zms_add_empty_ate(fs, sec_addr); in zms_gc()
1031 int zms_clear(struct zms_fs *fs) in zms_clear() argument
1036 if (!fs->ready) { in zms_clear()
1041 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_clear()
1042 for (uint32_t i = 0; i < fs->sector_count; i++) { in zms_clear()
1044 rc = zms_flash_erase_sector(fs, addr); in zms_clear()
1048 rc = zms_add_empty_ate(fs, addr); in zms_clear()
1055 fs->ready = false; in zms_clear()
1058 k_mutex_unlock(&fs->zms_lock); in zms_clear()
1063 static int zms_init(struct zms_fs *fs) in zms_init() argument
1077 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_init()
1083 for (i = 0; i < fs->sector_count; i++) { in zms_init()
1084 addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT)); in zms_init()
1087 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1093 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1109 zms_sector_advance(fs, &addr); in zms_init()
1112 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1118 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1126 /* all sectors are closed, and zms magic number not found. This is not a zms fs */ in zms_init()
1127 if ((closed_sectors == fs->sector_count) && !zms_magic_exist) { in zms_init()
1135 if (i == fs->sector_count) { in zms_init()
1141 rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate); in zms_init()
1145 if (!zms_ate_valid(fs, &first_ate)) { in zms_init()
1146 zms_sector_advance(fs, &addr); in zms_init()
1148 rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); in zms_init()
1153 if (zms_empty_ate_valid(fs, &empty_ate)) { in zms_init()
1165 rc = zms_flash_erase_sector(fs, addr); in zms_init()
1169 rc = zms_add_empty_ate(fs, addr); in zms_init()
1174 rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle); in zms_init()
1177 fs->sector_cycle = 0; in zms_init()
1188 rc = zms_recover_last_ate(fs, &addr, &data_wra); in zms_init()
1196 fs->ate_wra = addr; in zms_init()
1197 fs->data_wra = data_wra; in zms_init()
1199 /* fs->ate_wra should point to the next available entry. This is normally in zms_init()
1204 while (fs->ate_wra >= fs->data_wra) { in zms_init()
1205 rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate); in zms_init()
1209 if (!zms_ate_valid(fs, &last_ate)) { in zms_init()
1217 if ((fs->ate_wra == fs->data_wra) && last_ate.len) { in zms_init()
1223 fs->ate_wra -= fs->ate_size; in zms_init()
1235 addr = zms_close_ate_addr(fs, fs->ate_wra); in zms_init()
1236 zms_sector_advance(fs, &addr); in zms_init()
1239 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1246 /* The sector after fs->ate_wrt is closed. in zms_init()
1252 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1253 addr = fs->ate_wra + fs->ate_size; in zms_init()
1254 while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) { in zms_init()
1255 rc = zms_flash_ate_rd(fs, addr, &gc_done_ate); in zms_init()
1260 if (zms_gc_done_ate_valid(fs, &gc_done_ate)) { in zms_init()
1263 addr += fs->ate_size; in zms_init()
1269 addr = fs->ate_wra & ADDR_SECT_MASK; in zms_init()
1270 zms_sector_advance(fs, &addr); in zms_init()
1271 rc = zms_flash_erase_sector(fs, addr); in zms_init()
1275 rc = zms_add_empty_ate(fs, addr); in zms_init()
1279 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_init()
1283 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_init()
1289 fs->ate_wra &= ADDR_SECT_MASK; in zms_init()
1290 fs->ate_wra += (fs->sector_size - 3 * fs->ate_size); in zms_init()
1291 fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); in zms_init()
1295 * So, temporarily, we set the lookup cache to the end of the fs. in zms_init()
1299 fs->lookup_cache[i] = fs->ate_wra; in zms_init()
1302 rc = zms_gc(fs); in zms_init()
1309 rc = zms_lookup_cache_rebuild(fs); in zms_init()
1315 if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) { in zms_init()
1316 rc = zms_add_gc_done_ate(fs); in zms_init()
1318 k_mutex_unlock(&fs->zms_lock); in zms_init()
1323 int zms_mount(struct zms_fs *fs) in zms_mount() argument
1329 k_mutex_init(&fs->zms_lock); in zms_mount()
1331 fs->flash_parameters = flash_get_parameters(fs->flash_device); in zms_mount()
1332 if (fs->flash_parameters == NULL) { in zms_mount()
1337 fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate)); in zms_mount()
1338 write_block_size = fs->flash_parameters->write_block_size; in zms_mount()
1349 if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) { in zms_mount()
1350 rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); in zms_mount()
1355 if (!fs->sector_size || fs->sector_size % info.size) { in zms_mount()
1364 if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) { in zms_mount()
1366 ZMS_MIN_ATE_NUM * fs->ate_size); in zms_mount()
1370 if (fs->sector_count < 2) { in zms_mount()
1375 rc = zms_init(fs); in zms_mount()
1382 fs->ready = true; in zms_mount()
1384 LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size); in zms_mount()
1385 LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra)); in zms_mount()
1386 LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra)); in zms_mount()
1391 ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len) in zms_write() argument
1400 if (!fs->ready) { in zms_write()
1405 data_size = zms_al_size(fs, len); in zms_write()
1412 if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) || in zms_write()
1419 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_write()
1425 wlk_addr = fs->ate_wra; in zms_write()
1432 int prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr); in zms_write()
1461 rc = zms_flash_block_cmp(fs, rd_addr, data, len); in zms_write()
1482 required_space = data_size + fs->ate_size; in zms_write()
1484 required_space = fs->ate_size; in zms_write()
1488 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_write()
1492 if (gc_count == fs->sector_count) { in zms_write()
1501 * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented in zms_write()
1503 * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs in zms_write()
1506 if ((SECTOR_OFFSET(fs->ate_wra)) && in zms_write()
1507 (fs->ate_wra >= (fs->data_wra + required_space)) && in zms_write()
1508 (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) { in zms_write()
1509 rc = zms_flash_write_entry(fs, id, data, len); in zms_write()
1515 rc = zms_sector_close(fs); in zms_write()
1520 rc = zms_gc(fs); in zms_write()
1529 k_mutex_unlock(&fs->zms_lock); in zms_write()
1533 int zms_delete(struct zms_fs *fs, uint32_t id) in zms_delete() argument
1535 return zms_write(fs, id, NULL, 0); in zms_delete()
1538 ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt) in zms_read_hist() argument
1551 if (!fs->ready) { in zms_read_hist()
1559 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_read_hist()
1566 wlk_addr = fs->ate_wra; in zms_read_hist()
1572 prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_read_hist()
1585 rc = zms_compute_prev_addr(fs, &wlk_prev_addr); in zms_read_hist()
1610 rc = zms_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len)); in zms_read_hist()
1635 ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len) in zms_read() argument
1639 rc = zms_read_hist(fs, id, data, len, 0); in zms_read()
1648 ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id) in zms_get_data_length() argument
1652 rc = zms_read_hist(fs, id, NULL, 0, 0); in zms_get_data_length()
1657 ssize_t zms_calc_free_space(struct zms_fs *fs) in zms_calc_free_space() argument
1674 const uint32_t second_to_last_offset = (2 * fs->ate_size); in zms_calc_free_space()
1676 if (!fs->ready) { in zms_calc_free_space()
1686 free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size); in zms_calc_free_space()
1688 step_addr = fs->ate_wra; in zms_calc_free_space()
1692 rc = zms_prev_ate(fs, &step_addr, &step_ate); in zms_calc_free_space()
1698 rc = zms_get_cycle_on_sector_change(fs, step_prev_addr, previous_sector_num, in zms_calc_free_space()
1708 if (!zms_ate_valid_different_sector(fs, &step_ate, current_cycle) || in zms_calc_free_space()
1715 prev_found = zms_find_ate_with_id(fs, step_ate.id, wlk_addr, step_addr, &wlk_ate, in zms_calc_free_space()
1726 free_space -= zms_al_size(fs, step_ate.len); in zms_calc_free_space()
1728 free_space -= fs->ate_size; in zms_calc_free_space()
1730 } while (step_addr != fs->ate_wra); in zms_calc_free_space()
1733 current_cycle = fs->sector_cycle; in zms_calc_free_space()
1739 for (int i = 0; i < fs->sector_count; i++) { in zms_calc_free_space()
1740 step_addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT)); in zms_calc_free_space()
1743 sec_closed = zms_validate_closed_sector(fs, step_addr, &empty_ate, &close_ate); in zms_calc_free_space()
1753 free_space -= fs->ate_size; in zms_calc_free_space()
1756 fs->sector_cycle = empty_ate.cycle_cnt; in zms_calc_free_space()
1757 rc = zms_recover_last_ate(fs, &step_addr, &data_wra); in zms_calc_free_space()
1762 free_space -= fs->ate_size; in zms_calc_free_space()
1767 fs->sector_cycle = current_cycle; in zms_calc_free_space()
1772 size_t zms_active_sector_free_space(struct zms_fs *fs) in zms_active_sector_free_space() argument
1774 if (!fs->ready) { in zms_active_sector_free_space()
1779 return fs->ate_wra - fs->data_wra - fs->ate_size; in zms_active_sector_free_space()
1782 int zms_sector_use_next(struct zms_fs *fs) in zms_sector_use_next() argument
1786 if (!fs->ready) { in zms_sector_use_next()
1791 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_sector_use_next()
1793 ret = zms_sector_close(fs); in zms_sector_use_next()
1798 ret = zms_gc(fs); in zms_sector_use_next()
1801 k_mutex_unlock(&fs->zms_lock); in zms_sector_use_next()