Lines Matching refs:fs
19 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate);
20 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry);
39 static int nvs_lookup_cache_rebuild(struct nvs_fs *fs) in nvs_lookup_cache_rebuild() argument
46 memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); in nvs_lookup_cache_rebuild()
47 addr = fs->ate_wra; in nvs_lookup_cache_rebuild()
52 rc = nvs_prev_ate(fs, &addr, &ate); in nvs_lookup_cache_rebuild()
58 cache_entry = &fs->lookup_cache[nvs_lookup_cache_pos(ate.id)]; in nvs_lookup_cache_rebuild()
61 nvs_ate_valid(fs, &ate)) { in nvs_lookup_cache_rebuild()
65 if (addr == fs->ate_wra) { in nvs_lookup_cache_rebuild()
73 static void nvs_lookup_cache_invalidate(struct nvs_fs *fs, uint32_t sector) in nvs_lookup_cache_invalidate() argument
75 uint32_t *cache_entry = fs->lookup_cache; in nvs_lookup_cache_invalidate()
76 uint32_t *const cache_end = &fs->lookup_cache[CONFIG_NVS_LOOKUP_CACHE_SIZE]; in nvs_lookup_cache_invalidate()
89 static inline size_t nvs_al_size(struct nvs_fs *fs, size_t len) in nvs_al_size() argument
91 size_t write_block_size = fs->flash_parameters->write_block_size; in nvs_al_size()
102 static int nvs_flash_al_wrt(struct nvs_fs *fs, uint32_t addr, const void *data, in nvs_flash_al_wrt() argument
116 offset = fs->offset; in nvs_flash_al_wrt()
117 offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); in nvs_flash_al_wrt()
120 blen = len & ~(fs->flash_parameters->write_block_size - 1U); in nvs_flash_al_wrt()
122 rc = flash_write(fs->flash_device, offset, data8, blen); in nvs_flash_al_wrt()
133 (void)memset(buf + len, fs->flash_parameters->erase_value, in nvs_flash_al_wrt()
134 fs->flash_parameters->write_block_size - len); in nvs_flash_al_wrt()
136 rc = flash_write(fs->flash_device, offset, buf, in nvs_flash_al_wrt()
137 fs->flash_parameters->write_block_size); in nvs_flash_al_wrt()
145 static int nvs_flash_rd(struct nvs_fs *fs, uint32_t addr, void *data, in nvs_flash_rd() argument
151 offset = fs->offset; in nvs_flash_rd()
152 offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); in nvs_flash_rd()
155 rc = flash_read(fs->flash_device, offset, data, len); in nvs_flash_rd()
160 static int nvs_flash_ate_wrt(struct nvs_fs *fs, const struct nvs_ate *entry) in nvs_flash_ate_wrt() argument
164 rc = nvs_flash_al_wrt(fs, fs->ate_wra, entry, in nvs_flash_ate_wrt()
169 fs->lookup_cache[nvs_lookup_cache_pos(entry->id)] = fs->ate_wra; in nvs_flash_ate_wrt()
172 fs->ate_wra -= nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_flash_ate_wrt()
178 static int nvs_flash_data_wrt(struct nvs_fs *fs, const void *data, size_t len, bool compute_crc) in nvs_flash_data_wrt() argument
191 aligned_len = len & ~(fs->flash_parameters->write_block_size - 1U); in nvs_flash_data_wrt()
192 rc = nvs_flash_al_wrt(fs, fs->data_wra, data8, aligned_len); in nvs_flash_data_wrt()
193 fs->data_wra += aligned_len; in nvs_flash_data_wrt()
212 rc = nvs_flash_al_wrt(fs, fs->data_wra, buf, len); in nvs_flash_data_wrt()
214 rc = nvs_flash_al_wrt(fs, fs->data_wra, data, len); in nvs_flash_data_wrt()
216 fs->data_wra += nvs_al_size(fs, len); in nvs_flash_data_wrt()
222 static int nvs_flash_ate_rd(struct nvs_fs *fs, uint32_t addr, in nvs_flash_ate_rd() argument
225 return nvs_flash_rd(fs, addr, entry, sizeof(struct nvs_ate)); in nvs_flash_ate_rd()
236 static int nvs_flash_block_cmp(struct nvs_fs *fs, uint32_t addr, const void *data, in nvs_flash_block_cmp() argument
245 NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); in nvs_flash_block_cmp()
249 rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp); in nvs_flash_block_cmp()
268 static int nvs_flash_cmp_const(struct nvs_fs *fs, uint32_t addr, uint8_t value, in nvs_flash_cmp_const() argument
276 NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); in nvs_flash_cmp_const()
280 rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp); in nvs_flash_cmp_const()
300 static int nvs_flash_block_move(struct nvs_fs *fs, uint32_t addr, size_t len) in nvs_flash_block_move() argument
307 NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); in nvs_flash_block_move()
311 rc = nvs_flash_rd(fs, addr, buf, bytes_to_copy); in nvs_flash_block_move()
318 rc = nvs_flash_data_wrt(fs, buf, bytes_to_copy, false); in nvs_flash_block_move()
331 static int nvs_flash_erase_sector(struct nvs_fs *fs, uint32_t addr) in nvs_flash_erase_sector() argument
338 offset = fs->offset; in nvs_flash_erase_sector()
339 offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); in nvs_flash_erase_sector()
342 fs->sector_size); in nvs_flash_erase_sector()
345 nvs_lookup_cache_invalidate(fs, addr >> ADDR_SECT_SHIFT); in nvs_flash_erase_sector()
347 rc = flash_flatten(fs->flash_device, offset, fs->sector_size); in nvs_flash_erase_sector()
353 if (nvs_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, in nvs_flash_erase_sector()
354 fs->sector_size)) { in nvs_flash_erase_sector()
405 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry) in nvs_ate_valid() argument
410 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_ate_valid()
414 (position >= (fs->sector_size - ate_size))) { in nvs_ate_valid()
427 static int nvs_close_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry) in nvs_close_ate_valid() argument
431 if ((!nvs_ate_valid(fs, entry)) || (entry->len != 0U) || in nvs_close_ate_valid()
436 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_close_ate_valid()
437 if ((fs->sector_size - entry->offset) % ate_size) { in nvs_close_ate_valid()
445 static int nvs_flash_wrt_entry(struct nvs_fs *fs, uint16_t id, const void *data, in nvs_flash_wrt_entry() argument
452 entry.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK); in nvs_flash_wrt_entry()
456 rc = nvs_flash_data_wrt(fs, data, len, true); in nvs_flash_wrt_entry()
469 rc = nvs_flash_ate_wrt(fs, &entry); in nvs_flash_wrt_entry()
482 static int nvs_recover_last_ate(struct nvs_fs *fs, uint32_t *addr) in nvs_recover_last_ate() argument
492 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_recover_last_ate()
498 rc = nvs_flash_ate_rd(fs, ate_end_addr, &end_ate); in nvs_recover_last_ate()
502 if (nvs_ate_valid(fs, &end_ate)) { in nvs_recover_last_ate()
517 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate) in nvs_prev_ate() argument
523 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_prev_ate()
525 rc = nvs_flash_ate_rd(fs, *addr, ate); in nvs_prev_ate()
531 if (((*addr) & ADDR_OFFS_MASK) != (fs->sector_size - ate_size)) { in nvs_prev_ate()
537 *addr += ((fs->sector_count - 1) << ADDR_SECT_SHIFT); in nvs_prev_ate()
542 rc = nvs_flash_ate_rd(fs, *addr, &close_ate); in nvs_prev_ate()
547 rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value); in nvs_prev_ate()
550 *addr = fs->ate_wra; in nvs_prev_ate()
556 if (nvs_close_ate_valid(fs, &close_ate)) { in nvs_prev_ate()
569 return nvs_recover_last_ate(fs, addr); in nvs_prev_ate()
572 static void nvs_sector_advance(struct nvs_fs *fs, uint32_t *addr) in nvs_sector_advance() argument
575 if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { in nvs_sector_advance()
576 *addr -= (fs->sector_count << ADDR_SECT_SHIFT); in nvs_sector_advance()
583 static int nvs_sector_close(struct nvs_fs *fs) in nvs_sector_close() argument
588 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_sector_close()
592 close_ate.offset = (uint16_t)((fs->ate_wra + ate_size) & ADDR_OFFS_MASK); in nvs_sector_close()
595 fs->ate_wra &= ADDR_SECT_MASK; in nvs_sector_close()
596 fs->ate_wra += (fs->sector_size - ate_size); in nvs_sector_close()
600 (void)nvs_flash_ate_wrt(fs, &close_ate); in nvs_sector_close()
602 nvs_sector_advance(fs, &fs->ate_wra); in nvs_sector_close()
604 fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; in nvs_sector_close()
609 static int nvs_add_gc_done_ate(struct nvs_fs *fs) in nvs_add_gc_done_ate() argument
613 LOG_DBG("Adding gc done ate at %x", fs->ate_wra & ADDR_OFFS_MASK); in nvs_add_gc_done_ate()
617 gc_done_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK); in nvs_add_gc_done_ate()
620 return nvs_flash_ate_wrt(fs, &gc_done_ate); in nvs_add_gc_done_ate()
627 static int nvs_gc(struct nvs_fs *fs) in nvs_gc() argument
635 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_gc()
637 sec_addr = (fs->ate_wra & ADDR_SECT_MASK); in nvs_gc()
638 nvs_sector_advance(fs, &sec_addr); in nvs_gc()
639 gc_addr = sec_addr + fs->sector_size - ate_size; in nvs_gc()
642 rc = nvs_flash_ate_rd(fs, gc_addr, &close_ate); in nvs_gc()
648 rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value); in nvs_gc()
655 if (nvs_close_ate_valid(fs, &close_ate)) { in nvs_gc()
659 rc = nvs_recover_last_ate(fs, &gc_addr); in nvs_gc()
667 rc = nvs_prev_ate(fs, &gc_addr, &gc_ate); in nvs_gc()
672 if (!nvs_ate_valid(fs, &gc_ate)) { in nvs_gc()
677 wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(gc_ate.id)]; in nvs_gc()
680 wlk_addr = fs->ate_wra; in nvs_gc()
683 wlk_addr = fs->ate_wra; in nvs_gc()
687 rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate); in nvs_gc()
697 (nvs_ate_valid(fs, &wlk_ate))) { in nvs_gc()
700 } while (wlk_addr != fs->ate_wra); in nvs_gc()
712 gc_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK); in nvs_gc()
715 rc = nvs_flash_block_move(fs, data_addr, gc_ate.len); in nvs_gc()
720 rc = nvs_flash_ate_wrt(fs, &gc_ate); in nvs_gc()
735 if (fs->ate_wra >= (fs->data_wra + ate_size)) { in nvs_gc()
736 rc = nvs_add_gc_done_ate(fs); in nvs_gc()
743 rc = nvs_flash_erase_sector(fs, sec_addr); in nvs_gc()
748 static int nvs_startup(struct nvs_fs *fs) in nvs_startup() argument
759 uint8_t erase_value = fs->flash_parameters->erase_value; in nvs_startup()
761 k_mutex_lock(&fs->nvs_lock, K_FOREVER); in nvs_startup()
763 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_startup()
767 for (i = 0; i < fs->sector_count; i++) { in nvs_startup()
769 (uint16_t)(fs->sector_size - ate_size); in nvs_startup()
770 rc = nvs_flash_cmp_const(fs, addr, erase_value, in nvs_startup()
775 nvs_sector_advance(fs, &addr); in nvs_startup()
776 rc = nvs_flash_cmp_const(fs, addr, erase_value, in nvs_startup()
785 if (closed_sectors == fs->sector_count) { in nvs_startup()
788 rc = flash_flatten(fs->flash_device, fs->offset, in nvs_startup()
789 fs->sector_size * fs->sector_count); in nvs_startup()
794 i = fs->sector_count; in nvs_startup()
795 addr = ((fs->sector_count - 1) << ADDR_SECT_SHIFT) + in nvs_startup()
796 (uint16_t)(fs->sector_size - ate_size); in nvs_startup()
803 if (i == fs->sector_count) { in nvs_startup()
809 rc = nvs_flash_cmp_const(fs, addr - ate_size, erase_value, in nvs_startup()
813 nvs_sector_advance(fs, &addr); in nvs_startup()
821 rc = nvs_recover_last_ate(fs, &addr); in nvs_startup()
830 fs->ate_wra = addr; in nvs_startup()
831 fs->data_wra = addr & ADDR_SECT_MASK; in nvs_startup()
833 while (fs->ate_wra >= fs->data_wra) { in nvs_startup()
834 rc = nvs_flash_ate_rd(fs, fs->ate_wra, &last_ate); in nvs_startup()
846 if (nvs_ate_valid(fs, &last_ate)) { in nvs_startup()
848 fs->data_wra = addr & ADDR_SECT_MASK; in nvs_startup()
855 fs->data_wra += nvs_al_size(fs, last_ate.offset + last_ate.len); in nvs_startup()
860 if (fs->ate_wra == fs->data_wra && last_ate.len) { in nvs_startup()
867 fs->ate_wra -= ate_size; in nvs_startup()
876 addr = fs->ate_wra & ADDR_SECT_MASK; in nvs_startup()
877 nvs_sector_advance(fs, &addr); in nvs_startup()
878 rc = nvs_flash_cmp_const(fs, addr, erase_value, fs->sector_size); in nvs_startup()
889 addr = fs->ate_wra + ate_size; in nvs_startup()
890 while ((addr & ADDR_OFFS_MASK) < (fs->sector_size - ate_size)) { in nvs_startup()
891 rc = nvs_flash_ate_rd(fs, addr, &gc_done_ate); in nvs_startup()
895 if (nvs_ate_valid(fs, &gc_done_ate) && in nvs_startup()
907 addr = fs->ate_wra & ADDR_SECT_MASK; in nvs_startup()
908 nvs_sector_advance(fs, &addr); in nvs_startup()
909 rc = nvs_flash_erase_sector(fs, addr); in nvs_startup()
913 rc = nvs_flash_erase_sector(fs, fs->ate_wra); in nvs_startup()
917 fs->ate_wra &= ADDR_SECT_MASK; in nvs_startup()
918 fs->ate_wra += (fs->sector_size - 2 * ate_size); in nvs_startup()
919 fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); in nvs_startup()
927 fs->lookup_cache[i] = fs->ate_wra; in nvs_startup()
930 rc = nvs_gc(fs); in nvs_startup()
935 while (fs->ate_wra > fs->data_wra) { in nvs_startup()
936 empty_len = fs->ate_wra - fs->data_wra; in nvs_startup()
938 rc = nvs_flash_cmp_const(fs, fs->data_wra, erase_value, in nvs_startup()
947 fs->data_wra += fs->flash_parameters->write_block_size; in nvs_startup()
954 if (((fs->ate_wra + 2 * ate_size) == fs->sector_size) && in nvs_startup()
955 (fs->data_wra != (fs->ate_wra & ADDR_SECT_MASK))) { in nvs_startup()
956 rc = nvs_flash_erase_sector(fs, fs->ate_wra); in nvs_startup()
960 fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; in nvs_startup()
967 rc = nvs_lookup_cache_rebuild(fs); in nvs_startup()
973 if ((!rc) && ((fs->ate_wra & ADDR_OFFS_MASK) == in nvs_startup()
974 (fs->sector_size - 2 * ate_size))) { in nvs_startup()
976 rc = nvs_add_gc_done_ate(fs); in nvs_startup()
978 k_mutex_unlock(&fs->nvs_lock); in nvs_startup()
982 int nvs_clear(struct nvs_fs *fs) in nvs_clear() argument
987 if (!fs->ready) { in nvs_clear()
992 for (uint16_t i = 0; i < fs->sector_count; i++) { in nvs_clear()
994 rc = nvs_flash_erase_sector(fs, addr); in nvs_clear()
1001 fs->ready = false; in nvs_clear()
1006 int nvs_mount(struct nvs_fs *fs) in nvs_mount() argument
1012 k_mutex_init(&fs->nvs_lock); in nvs_mount()
1014 fs->flash_parameters = flash_get_parameters(fs->flash_device); in nvs_mount()
1015 if (fs->flash_parameters == NULL) { in nvs_mount()
1020 write_block_size = flash_get_write_block_size(fs->flash_device); in nvs_mount()
1029 rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); in nvs_mount()
1034 if (!fs->sector_size || fs->sector_size % info.size) { in nvs_mount()
1040 if (fs->sector_count < 2) { in nvs_mount()
1045 rc = nvs_startup(fs); in nvs_mount()
1051 fs->ready = true; in nvs_mount()
1053 LOG_INF("%d Sectors of %d bytes", fs->sector_count, fs->sector_size); in nvs_mount()
1055 (fs->ate_wra >> ADDR_SECT_SHIFT), in nvs_mount()
1056 (fs->ate_wra & ADDR_OFFS_MASK)); in nvs_mount()
1058 (fs->data_wra >> ADDR_SECT_SHIFT), in nvs_mount()
1059 (fs->data_wra & ADDR_OFFS_MASK)); in nvs_mount()
1064 ssize_t nvs_write(struct nvs_fs *fs, uint16_t id, const void *data, size_t len) in nvs_write() argument
1073 if (!fs->ready) { in nvs_write()
1078 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_write()
1079 data_size = nvs_al_size(fs, len); in nvs_write()
1087 if ((len > (fs->sector_size - 4 * ate_size - NVS_DATA_CRC_SIZE)) || in nvs_write()
1094 wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)]; in nvs_write()
1100 wlk_addr = fs->ate_wra; in nvs_write()
1106 rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate); in nvs_write()
1110 if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) { in nvs_write()
1114 if (wlk_addr == fs->ate_wra) { in nvs_write()
1139 rc = nvs_flash_block_cmp(fs, rd_addr, data, len + NVS_DATA_CRC_SIZE); in nvs_write()
1157 k_mutex_lock(&fs->nvs_lock, K_FOREVER); in nvs_write()
1161 if (gc_count == fs->sector_count) { in nvs_write()
1169 if (fs->ate_wra >= (fs->data_wra + required_space)) { in nvs_write()
1171 rc = nvs_flash_wrt_entry(fs, id, data, len); in nvs_write()
1179 rc = nvs_sector_close(fs); in nvs_write()
1184 rc = nvs_gc(fs); in nvs_write()
1192 k_mutex_unlock(&fs->nvs_lock); in nvs_write()
1196 int nvs_delete(struct nvs_fs *fs, uint16_t id) in nvs_delete() argument
1198 return nvs_write(fs, id, NULL, 0); in nvs_delete()
1201 ssize_t nvs_read_hist(struct nvs_fs *fs, uint16_t id, void *data, size_t len, in nvs_read_hist() argument
1213 if (!fs->ready) { in nvs_read_hist()
1218 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_read_hist()
1220 if (len > (fs->sector_size - 2 * ate_size)) { in nvs_read_hist()
1227 wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)]; in nvs_read_hist()
1234 wlk_addr = fs->ate_wra; in nvs_read_hist()
1240 rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate); in nvs_read_hist()
1244 if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) { in nvs_read_hist()
1247 if (wlk_addr == fs->ate_wra) { in nvs_read_hist()
1252 if (((wlk_addr == fs->ate_wra) && (wlk_ate.id != id)) || in nvs_read_hist()
1266 rc = nvs_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len - NVS_DATA_CRC_SIZE)); in nvs_read_hist()
1275 rc = nvs_flash_rd(fs, rd_addr, &read_data_crc, sizeof(read_data_crc)); in nvs_read_hist()
1296 ssize_t nvs_read(struct nvs_fs *fs, uint16_t id, void *data, size_t len) in nvs_read() argument
1300 rc = nvs_read_hist(fs, id, data, len, 0); in nvs_read()
1304 ssize_t nvs_calc_free_space(struct nvs_fs *fs) in nvs_calc_free_space() argument
1311 if (!fs->ready) { in nvs_calc_free_space()
1316 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_calc_free_space()
1324 free_space = (fs->sector_count - 1) * (fs->sector_size - (2 * ate_size)); in nvs_calc_free_space()
1326 step_addr = fs->ate_wra; in nvs_calc_free_space()
1329 rc = nvs_prev_ate(fs, &step_addr, &step_ate); in nvs_calc_free_space()
1334 wlk_addr = fs->ate_wra; in nvs_calc_free_space()
1337 rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate); in nvs_calc_free_space()
1342 (wlk_addr == fs->ate_wra)) { in nvs_calc_free_space()
1347 if (nvs_ate_valid(fs, &step_ate)) { in nvs_calc_free_space()
1355 free_space -= nvs_al_size(fs, step_ate.len); in nvs_calc_free_space()
1360 if (step_addr == fs->ate_wra) { in nvs_calc_free_space()
1367 size_t nvs_sector_max_data_size(struct nvs_fs *fs) in nvs_sector_max_data_size() argument
1371 if (!fs->ready) { in nvs_sector_max_data_size()
1376 ate_size = nvs_al_size(fs, sizeof(struct nvs_ate)); in nvs_sector_max_data_size()
1378 return fs->ate_wra - fs->data_wra - ate_size - NVS_DATA_CRC_SIZE; in nvs_sector_max_data_size()
1381 int nvs_sector_use_next(struct nvs_fs *fs) in nvs_sector_use_next() argument
1385 if (!fs->ready) { in nvs_sector_use_next()
1390 k_mutex_lock(&fs->nvs_lock, K_FOREVER); in nvs_sector_use_next()
1392 ret = nvs_sector_close(fs); in nvs_sector_use_next()
1397 ret = nvs_gc(fs); in nvs_sector_use_next()
1400 k_mutex_unlock(&fs->nvs_lock); in nvs_sector_use_next()