Lines Matching +full:has +full:- +full:magic +full:- +full:addr
3 * SPDX-License-Identifier: Apache-2.0
18 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate);
20 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt);
21 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
32 /* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */ in zms_lookup_cache_pos()
47 uint64_t addr; in zms_lookup_cache_rebuild() local
53 memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); in zms_lookup_cache_rebuild()
54 addr = fs->ate_wra; in zms_lookup_cache_rebuild()
57 /* Make a copy of 'addr' as it will be advanced by zms_prev_ate() */ in zms_lookup_cache_rebuild()
58 ate_addr = addr; in zms_lookup_cache_rebuild()
59 rc = zms_prev_ate(fs, &addr, &ate); in zms_lookup_cache_rebuild()
65 cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)]; in zms_lookup_cache_rebuild()
73 if (rc == -ENOENT) { in zms_lookup_cache_rebuild()
87 if (addr == fs->ate_wra) { in zms_lookup_cache_rebuild()
97 uint64_t *cache_entry = fs->lookup_cache; in zms_lookup_cache_invalidate()
98 uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; in zms_lookup_cache_invalidate()
110 static inline off_t zms_addr_to_offset(struct zms_fs *fs, uint64_t addr) in zms_addr_to_offset() argument
112 return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr); in zms_addr_to_offset()
118 return len & ~(fs->flash_parameters->write_block_size - 1U); in zms_round_down_write_block_size()
124 return (len + (fs->flash_parameters->write_block_size - 1U)) & in zms_round_up_write_block_size()
125 ~(fs->flash_parameters->write_block_size - 1U); in zms_round_up_write_block_size()
128 /* zms_al_size returns size aligned to fs->write_block_size */
131 size_t write_block_size = fs->flash_parameters->write_block_size; in zms_al_size()
141 static inline uint64_t zms_empty_ate_addr(struct zms_fs *fs, uint64_t addr) in zms_empty_ate_addr() argument
143 return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size; in zms_empty_ate_addr()
147 static inline uint64_t zms_close_ate_addr(struct zms_fs *fs, uint64_t addr) in zms_close_ate_addr() argument
149 return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size; in zms_close_ate_addr()
153 static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) in zms_flash_al_wrt() argument
166 offset = zms_addr_to_offset(fs, addr); in zms_flash_al_wrt()
170 rc = flash_write(fs->flash_device, offset, data8, blen); in zms_flash_al_wrt()
175 len -= blen; in zms_flash_al_wrt()
181 (void)memset(buf + len, fs->flash_parameters->erase_value, in zms_flash_al_wrt()
182 fs->flash_parameters->write_block_size - len); in zms_flash_al_wrt()
184 rc = flash_write(fs->flash_device, offset, buf, in zms_flash_al_wrt()
185 fs->flash_parameters->write_block_size); in zms_flash_al_wrt()
193 static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len) in zms_flash_rd() argument
197 offset = zms_addr_to_offset(fs, addr); in zms_flash_rd()
199 return flash_read(fs->flash_device, offset, data, len); in zms_flash_rd()
207 rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
212 /* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */ in zms_flash_ate_wrt()
213 if (entry->id != ZMS_HEAD_ID) { in zms_flash_ate_wrt()
214 fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra; in zms_flash_ate_wrt()
217 fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
227 rc = zms_flash_al_wrt(fs, fs->data_wra, data, len); in zms_flash_data_wrt()
231 fs->data_wra += zms_al_size(fs, len); in zms_flash_data_wrt()
237 static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry) in zms_flash_ate_rd() argument
239 return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate)); in zms_flash_ate_rd()
242 /* zms_flash_block_cmp compares the data in flash at addr to data
243 * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size
246 static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) in zms_flash_block_cmp() argument
258 rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp); in zms_flash_block_cmp()
266 len -= bytes_to_cmp; in zms_flash_block_cmp()
267 addr += bytes_to_cmp; in zms_flash_block_cmp()
273 /* zms_flash_cmp_const compares the data in flash at addr to a constant
277 static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len) in zms_flash_cmp_const() argument
289 rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp); in zms_flash_cmp_const()
293 len -= bytes_to_cmp; in zms_flash_cmp_const()
294 addr += bytes_to_cmp; in zms_flash_cmp_const()
299 /* flash block move: move a block at addr to the current data write location
302 static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len) in zms_flash_block_move() argument
313 rc = zms_flash_rd(fs, addr, buf, bytes_to_copy); in zms_flash_block_move()
321 len -= bytes_to_copy; in zms_flash_block_move()
322 addr += bytes_to_copy; in zms_flash_block_move()
330 static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr) in zms_flash_erase_sector() argument
335 flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; in zms_flash_erase_sector()
342 addr &= ADDR_SECT_MASK; in zms_flash_erase_sector()
343 offset = zms_addr_to_offset(fs, addr); in zms_flash_erase_sector()
345 LOG_DBG("Erasing flash at offset 0x%lx ( 0x%llx ), len %u", (long)offset, addr, in zms_flash_erase_sector()
346 fs->sector_size); in zms_flash_erase_sector()
349 zms_lookup_cache_invalidate(fs, SECTOR_NUM(addr)); in zms_flash_erase_sector()
351 rc = flash_erase(fs->flash_device, offset, fs->sector_size); in zms_flash_erase_sector()
357 if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) { in zms_flash_erase_sector()
359 rc = -ENXIO; in zms_flash_erase_sector()
372 sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); in zms_ate_crc8_update()
373 entry->crc8 = crc8; in zms_ate_crc8_update()
385 sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); in zms_ate_crc8_check()
386 if (crc8 == entry->crc8) { in zms_ate_crc8_check()
403 return zms_ate_valid_different_sector(fs, entry, fs->sector_cycle); in zms_ate_valid()
415 if ((cycle_cnt != entry->cycle_cnt) || zms_ate_crc8_check(entry)) { in zms_ate_valid_different_sector()
422 static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr, in zms_get_cycle_on_sector_change() argument
430 if (SECTOR_NUM(addr) != previous_sector_num) { in zms_get_cycle_on_sector_change()
431 rc = zms_get_sector_cycle(fs, addr, cycle_cnt); in zms_get_cycle_on_sector_change()
432 if (rc == -ENOENT) { in zms_get_cycle_on_sector_change()
446 * - a valid ate
447 * - with len = 0 and id = ZMS_HEAD_ID
448 * - and offset points to location at ate multiple from sector size
453 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_close_ate_valid()
454 (entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size)); in zms_close_ate_valid()
459 * - a valid ate
460 * - with len = 0xffff and id = 0xffffffff
465 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && in zms_empty_ate_valid()
466 (entry->len == 0xffff) && (entry->id == ZMS_HEAD_ID)); in zms_empty_ate_valid()
471 * - valid ate
472 * - len = 0
473 * - id = 0xffffffff
478 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_gc_done_ate_valid()
479 (entry->id == ZMS_HEAD_ID)); in zms_gc_done_ate_valid()
482 /* Read empty and close ATE of the sector where belongs address "addr" and
488 static int zms_validate_closed_sector(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, in zms_validate_closed_sector() argument
494 rc = zms_get_sector_header(fs, addr, empty_ate, close_ate); in zms_validate_closed_sector()
500 (empty_ate->cycle_cnt == close_ate->cycle_cnt)) { in zms_validate_closed_sector()
519 entry.cycle_cnt = fs->sector_cycle; in zms_flash_write_entry()
526 entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_flash_write_entry()
553 static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr, uint64_t *data_wra) in zms_recover_last_ate() argument
560 LOG_DBG("Recovering last ate from sector %llu", SECTOR_NUM(*addr)); in zms_recover_last_ate()
563 *addr -= 2 * fs->ate_size; in zms_recover_last_ate()
565 ate_end_addr = *addr; in zms_recover_last_ate()
566 data_end_addr = *addr & ADDR_SECT_MASK; in zms_recover_last_ate()
576 /* found a valid ate, update data_end_addr and *addr */ in zms_recover_last_ate()
582 *addr = ate_end_addr; in zms_recover_last_ate()
584 ate_end_addr -= fs->ate_size; in zms_recover_last_ate()
590 /* compute previous addr of ATE */
591 static int zms_compute_prev_addr(struct zms_fs *fs, uint64_t *addr) in zms_compute_prev_addr() argument
597 *addr += fs->ate_size; in zms_compute_prev_addr()
598 if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) { in zms_compute_prev_addr()
603 if (SECTOR_NUM(*addr) == 0U) { in zms_compute_prev_addr()
604 *addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT); in zms_compute_prev_addr()
606 *addr -= (1ULL << ADDR_SECT_SHIFT); in zms_compute_prev_addr()
610 sec_closed = zms_validate_closed_sector(fs, *addr, &empty_ate, &close_ate); in zms_compute_prev_addr()
618 *addr = fs->ate_wra; in zms_compute_prev_addr()
623 (*addr) &= ADDR_SECT_MASK; in zms_compute_prev_addr()
624 (*addr) += close_ate.offset; in zms_compute_prev_addr()
630 * read ate from addr, modify addr to the previous ate
632 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate) in zms_prev_ate() argument
636 rc = zms_flash_ate_rd(fs, *addr, ate); in zms_prev_ate()
641 return zms_compute_prev_addr(fs, addr); in zms_prev_ate()
644 static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr) in zms_sector_advance() argument
646 *addr += (1ULL << ADDR_SECT_SHIFT); in zms_sector_advance()
647 if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { in zms_sector_advance()
648 *addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT); in zms_sector_advance()
663 close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size); in zms_sector_close()
665 close_ate.cycle_cnt = fs->sector_cycle; in zms_sector_close()
673 * - An ATE.cycl_cnt == 0 is written as last ATE of the sector in zms_sector_close()
674 - This ATE was never overwritten in the next 255 cycles because of in zms_sector_close()
676 - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes in zms_sector_close()
679 memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate)); in zms_sector_close()
680 while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) { in zms_sector_close()
687 fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra); in zms_sector_close()
693 zms_sector_advance(fs, &fs->ate_wra); in zms_sector_close()
695 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_sector_close()
696 if (rc == -ENOENT) { in zms_sector_close()
698 fs->sector_cycle = 0; in zms_sector_close()
704 fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; in zms_sector_close()
713 LOG_DBG("Adding gc done ate at %llx", fs->ate_wra); in zms_add_gc_done_ate()
716 gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_add_gc_done_ate()
718 gc_done_ate.cycle_cnt = fs->sector_cycle; in zms_add_gc_done_ate()
725 static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr) in zms_add_empty_ate() argument
732 addr &= ADDR_SECT_MASK; in zms_add_empty_ate()
734 LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size)); in zms_add_empty_ate()
741 rc = zms_get_sector_cycle(fs, addr, &cycle_cnt); in zms_add_empty_ate()
742 if (rc == -ENOENT) { in zms_add_empty_ate()
754 /* Adding empty ate to this sector changes fs->ate_wra value in zms_add_empty_ate()
757 previous_ate_wra = fs->ate_wra; in zms_add_empty_ate()
758 fs->ate_wra = zms_empty_ate_addr(fs, addr); in zms_add_empty_ate()
763 fs->ate_wra = previous_ate_wra; in zms_add_empty_ate()
768 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt) in zms_get_sector_cycle() argument
774 empty_addr = zms_empty_ate_addr(fs, addr); in zms_get_sector_cycle()
789 return -ENOENT; in zms_get_sector_cycle()
792 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, in zms_get_sector_header() argument
798 close_addr = zms_close_ate_addr(fs, addr); in zms_get_sector_header()
806 rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate); in zms_get_sector_header()
870 /* garbage collection: the address ate_wra has been updated to the new sector
871 * that has just been started. The data to gc is in the sector after this new
891 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
892 if (rc == -ENOENT) { in zms_gc()
894 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_gc()
899 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_gc()
906 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
914 previous_cycle = fs->sector_cycle; in zms_gc()
916 sec_addr = (fs->ate_wra & ADDR_SECT_MASK); in zms_gc()
918 gc_addr = sec_addr + fs->sector_size - fs->ate_size; in zms_gc()
932 fs->sector_cycle = empty_ate.cycle_cnt; in zms_gc()
935 stop_addr = gc_addr - 2 * fs->ate_size; in zms_gc()
954 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)]; in zms_gc()
957 wlk_addr = fs->ate_wra; in zms_gc()
960 wlk_addr = fs->ate_wra; in zms_gc()
968 rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_gc()
974 /* if walk_addr has reached the same address as gc_addr, a copy is in zms_gc()
987 gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_gc()
1007 fs->sector_cycle = previous_cycle; in zms_gc()
1034 uint64_t addr; in zms_clear() local
1036 if (!fs->ready) { in zms_clear()
1038 return -EACCES; in zms_clear()
1041 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_clear()
1042 for (uint32_t i = 0; i < fs->sector_count; i++) { in zms_clear()
1043 addr = (uint64_t)i << ADDR_SECT_SHIFT; in zms_clear()
1044 rc = zms_flash_erase_sector(fs, addr); in zms_clear()
1048 rc = zms_add_empty_ate(fs, addr); in zms_clear()
1055 fs->ready = false; in zms_clear()
1058 k_mutex_unlock(&fs->zms_lock); in zms_clear()
1071 uint64_t addr = 0U; in zms_init() local
1077 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_init()
1083 for (i = 0; i < fs->sector_count; i++) { in zms_init()
1084 addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT)); in zms_init()
1087 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1093 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1104 rc = -ENOEXEC; in zms_init()
1109 zms_sector_advance(fs, &addr); in zms_init()
1110 /* addr is pointing to the close ATE */ in zms_init()
1112 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1118 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1126 /* all sectors are closed, and zms magic number not found. This is not a zms fs */ in zms_init()
1127 if ((closed_sectors == fs->sector_count) && !zms_magic_exist) { in zms_init()
1128 rc = -EDEADLK; in zms_init()
1131 /* TODO: add a recovery mechanism here if the ZMS magic number exist but all in zms_init()
1135 if (i == fs->sector_count) { in zms_init()
1138 * Let's check if the last sector has valid ATEs otherwise set in zms_init()
1141 rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate); in zms_init()
1146 zms_sector_advance(fs, &addr); in zms_init()
1148 rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); in zms_init()
1160 rc = -ENOEXEC; in zms_init()
1165 rc = zms_flash_erase_sector(fs, addr); in zms_init()
1169 rc = zms_add_empty_ate(fs, addr); in zms_init()
1174 rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle); in zms_init()
1175 if (rc == -ENOENT) { in zms_init()
1177 fs->sector_cycle = 0; in zms_init()
1184 /* addr contains address of closing ate in the most recent sector, in zms_init()
1188 rc = zms_recover_last_ate(fs, &addr, &data_wra); in zms_init()
1193 /* addr contains address of the last valid ate in the most recent sector in zms_init()
1196 fs->ate_wra = addr; in zms_init()
1197 fs->data_wra = data_wra; in zms_init()
1199 /* fs->ate_wra should point to the next available entry. This is normally in zms_init()
1204 while (fs->ate_wra >= fs->data_wra) { in zms_init()
1205 rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate); in zms_init()
1217 if ((fs->ate_wra == fs->data_wra) && last_ate.len) { in zms_init()
1219 rc = -ESPIPE; in zms_init()
1223 fs->ate_wra -= fs->ate_size; in zms_init()
1227 * or it has never been used or it is a closed sector (GC didn't finish) in zms_init()
1229 * sector, if it is missing, we need to restart gc because it has been interrupted. in zms_init()
1230 * If no valid empty ATE is found then it has never been used. Just erase it by adding in zms_init()
1235 addr = zms_close_ate_addr(fs, fs->ate_wra); in zms_init()
1236 zms_sector_advance(fs, &addr); in zms_init()
1239 sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate); in zms_init()
1246 /* The sector after fs->ate_wrt is closed. in zms_init()
1252 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1253 addr = fs->ate_wra + fs->ate_size; in zms_init()
1254 while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) { in zms_init()
1255 rc = zms_flash_ate_rd(fs, addr, &gc_done_ate); in zms_init()
1263 addr += fs->ate_size; in zms_init()
1269 addr = fs->ate_wra & ADDR_SECT_MASK; in zms_init()
1270 zms_sector_advance(fs, &addr); in zms_init()
1271 rc = zms_flash_erase_sector(fs, addr); in zms_init()
1275 rc = zms_add_empty_ate(fs, addr); in zms_init()
1279 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_init()
1283 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_init()
1289 fs->ate_wra &= ADDR_SECT_MASK; in zms_init()
1290 fs->ate_wra += (fs->sector_size - 3 * fs->ate_size); in zms_init()
1291 fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); in zms_init()
1299 fs->lookup_cache[i] = fs->ate_wra; in zms_init()
1315 if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) { in zms_init()
1318 k_mutex_unlock(&fs->zms_lock); in zms_init()
1329 k_mutex_init(&fs->zms_lock); in zms_mount()
1331 fs->flash_parameters = flash_get_parameters(fs->flash_device); in zms_mount()
1332 if (fs->flash_parameters == NULL) { in zms_mount()
1334 return -EINVAL; in zms_mount()
1337 fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate)); in zms_mount()
1338 write_block_size = fs->flash_parameters->write_block_size; in zms_mount()
1343 return -EINVAL; in zms_mount()
1349 if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) { in zms_mount()
1350 rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); in zms_mount()
1353 return -EINVAL; in zms_mount()
1355 if (!fs->sector_size || fs->sector_size % info.size) { in zms_mount()
1357 return -EINVAL; in zms_mount()
1364 if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) { in zms_mount()
1366 ZMS_MIN_ATE_NUM * fs->ate_size); in zms_mount()
1370 if (fs->sector_count < 2) { in zms_mount()
1371 LOG_ERR("Configuration error - sector count below minimum requirement (2)"); in zms_mount()
1372 return -EINVAL; in zms_mount()
1382 fs->ready = true; in zms_mount()
1384 LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size); in zms_mount()
1385 LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra)); in zms_mount()
1386 LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra)); in zms_mount()
1400 if (!fs->ready) { in zms_write()
1402 return -EACCES; in zms_write()
1407 /* The maximum data size is sector size - 5 ate in zms_write()
1412 if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) || in zms_write()
1414 return -EINVAL; in zms_write()
1419 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_write()
1425 wlk_addr = fs->ate_wra; in zms_write()
1432 int prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr); in zms_write()
1468 /* skip delete entry for non-existing entry */ in zms_write()
1482 required_space = data_size + fs->ate_size; in zms_write()
1484 required_space = fs->ate_size; in zms_write()
1488 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_write()
1492 if (gc_count == fs->sector_count) { in zms_write()
1496 rc = -ENOSPC; in zms_write()
1501 * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented in zms_write()
1503 * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs in zms_write()
1506 if ((SECTOR_OFFSET(fs->ate_wra)) && in zms_write()
1507 (fs->ate_wra >= (fs->data_wra + required_space)) && in zms_write()
1508 (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) { in zms_write()
1529 k_mutex_unlock(&fs->zms_lock); in zms_write()
1551 if (!fs->ready) { in zms_read_hist()
1553 return -EACCES; in zms_read_hist()
1559 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_read_hist()
1562 rc = -ENOENT; in zms_read_hist()
1566 wlk_addr = fs->ate_wra; in zms_read_hist()
1572 prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_read_hist()
1597 return -ENOENT; in zms_read_hist()
1623 return -EIO; in zms_read_hist()
1674 const uint32_t second_to_last_offset = (2 * fs->ate_size); in zms_calc_free_space()
1676 if (!fs->ready) { in zms_calc_free_space()
1678 return -EACCES; in zms_calc_free_space()
1686 free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size); in zms_calc_free_space()
1688 step_addr = fs->ate_wra; in zms_calc_free_space()
1726 free_space -= zms_al_size(fs, step_ate.len); in zms_calc_free_space()
1728 free_space -= fs->ate_size; in zms_calc_free_space()
1730 } while (step_addr != fs->ate_wra); in zms_calc_free_space()
1733 current_cycle = fs->sector_cycle; in zms_calc_free_space()
1739 for (int i = 0; i < fs->sector_count; i++) { in zms_calc_free_space()
1753 free_space -= fs->ate_size; in zms_calc_free_space()
1756 fs->sector_cycle = empty_ate.cycle_cnt; in zms_calc_free_space()
1762 free_space -= fs->ate_size; in zms_calc_free_space()
1767 fs->sector_cycle = current_cycle; in zms_calc_free_space()
1774 if (!fs->ready) { in zms_active_sector_free_space()
1776 return -EACCES; in zms_active_sector_free_space()
1779 return fs->ate_wra - fs->data_wra - fs->ate_size; in zms_active_sector_free_space()
1786 if (!fs->ready) { in zms_sector_use_next()
1788 return -EACCES; in zms_sector_use_next()
1791 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_sector_use_next()
1801 k_mutex_unlock(&fs->zms_lock); in zms_sector_use_next()