Lines Matching +full:partial +full:- +full:erase

3  * SPDX-License-Identifier: Apache-2.0
32 /* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */ in zms_lookup_cache_pos()
53 memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); in zms_lookup_cache_rebuild()
54 addr = fs->ate_wra; in zms_lookup_cache_rebuild()
65 cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)]; in zms_lookup_cache_rebuild()
73 if (rc == -ENOENT) { in zms_lookup_cache_rebuild()
87 if (addr == fs->ate_wra) { in zms_lookup_cache_rebuild()
97 uint64_t *cache_entry = fs->lookup_cache; in zms_lookup_cache_invalidate()
98 uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; in zms_lookup_cache_invalidate()
112 return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr); in zms_addr_to_offset()
118 return len & ~(fs->flash_parameters->write_block_size - 1U); in zms_round_down_write_block_size()
124 return (len + (fs->flash_parameters->write_block_size - 1U)) & in zms_round_up_write_block_size()
125 ~(fs->flash_parameters->write_block_size - 1U); in zms_round_up_write_block_size()
128 /* zms_al_size returns size aligned to fs->write_block_size */
131 size_t write_block_size = fs->flash_parameters->write_block_size; in zms_al_size()
143 return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size; in zms_empty_ate_addr()
149 return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size; in zms_close_ate_addr()
170 rc = flash_write(fs->flash_device, offset, data8, blen); in zms_flash_al_wrt()
175 len -= blen; in zms_flash_al_wrt()
181 (void)memset(buf + len, fs->flash_parameters->erase_value, in zms_flash_al_wrt()
182 fs->flash_parameters->write_block_size - len); in zms_flash_al_wrt()
184 rc = flash_write(fs->flash_device, offset, buf, in zms_flash_al_wrt()
185 fs->flash_parameters->write_block_size); in zms_flash_al_wrt()
199 return flash_read(fs->flash_device, offset, data, len); in zms_flash_rd()
207 rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
212 /* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */ in zms_flash_ate_wrt()
213 if (entry->id != ZMS_HEAD_ID) { in zms_flash_ate_wrt()
214 fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra; in zms_flash_ate_wrt()
217 fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate)); in zms_flash_ate_wrt()
227 rc = zms_flash_al_wrt(fs, fs->data_wra, data, len); in zms_flash_data_wrt()
231 fs->data_wra += zms_al_size(fs, len); in zms_flash_data_wrt()
243 * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size
266 len -= bytes_to_cmp; in zms_flash_block_cmp()
293 len -= bytes_to_cmp; in zms_flash_cmp_const()
321 len -= bytes_to_copy; in zms_flash_block_move()
327 /* erase a sector and verify erase was OK.
335 flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; in zms_flash_erase_sector()
338 /* Do nothing for devices that do not have erase capability */ in zms_flash_erase_sector()
346 fs->sector_size); in zms_flash_erase_sector()
351 rc = flash_erase(fs->flash_device, offset, fs->sector_size); in zms_flash_erase_sector()
357 if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) { in zms_flash_erase_sector()
359 rc = -ENXIO; in zms_flash_erase_sector()
372 sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); in zms_ate_crc8_update()
373 entry->crc8 = crc8; in zms_ate_crc8_update()
385 sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8)); in zms_ate_crc8_check()
386 if (crc8 == entry->crc8) { in zms_ate_crc8_check()
403 return zms_ate_valid_different_sector(fs, entry, fs->sector_cycle); in zms_ate_valid()
415 if ((cycle_cnt != entry->cycle_cnt) || zms_ate_crc8_check(entry)) { in zms_ate_valid_different_sector()
432 if (rc == -ENOENT) { in zms_get_cycle_on_sector_change()
446 * - a valid ate
447 * - with len = 0 and id = ZMS_HEAD_ID
448 * - and offset points to location at ate multiple from sector size
453 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_close_ate_valid()
454 (entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size)); in zms_close_ate_valid()
459 * - a valid ate
460 * - with len = 0xffff and id = 0xffffffff
465 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && in zms_empty_ate_valid()
466 (entry->len == 0xffff) && (entry->id == ZMS_HEAD_ID)); in zms_empty_ate_valid()
471 * - valid ate
472 * - len = 0
473 * - id = 0xffffffff
478 return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) && in zms_gc_done_ate_valid()
479 (entry->id == ZMS_HEAD_ID)); in zms_gc_done_ate_valid()
500 (empty_ate->cycle_cnt == close_ate->cycle_cnt)) { in zms_validate_closed_sector()
519 entry.cycle_cnt = fs->sector_cycle; in zms_flash_write_entry()
526 entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_flash_write_entry()
563 *addr -= 2 * fs->ate_size; in zms_recover_last_ate()
584 ate_end_addr -= fs->ate_size; in zms_recover_last_ate()
597 *addr += fs->ate_size; in zms_compute_prev_addr()
598 if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) { in zms_compute_prev_addr()
604 *addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT); in zms_compute_prev_addr()
606 *addr -= (1ULL << ADDR_SECT_SHIFT); in zms_compute_prev_addr()
618 *addr = fs->ate_wra; in zms_compute_prev_addr()
647 if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { in zms_sector_advance()
648 *addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT); in zms_sector_advance()
663 close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size); in zms_sector_close()
665 close_ate.cycle_cnt = fs->sector_cycle; in zms_sector_close()
673 * - An ATE.cycl_cnt == 0 is written as last ATE of the sector in zms_sector_close()
674 - This ATE was never overwritten in the next 255 cycles because of in zms_sector_close()
676 - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes in zms_sector_close()
679 memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate)); in zms_sector_close()
680 while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) { in zms_sector_close()
687 fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra); in zms_sector_close()
693 zms_sector_advance(fs, &fs->ate_wra); in zms_sector_close()
695 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_sector_close()
696 if (rc == -ENOENT) { in zms_sector_close()
698 fs->sector_cycle = 0; in zms_sector_close()
704 fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; in zms_sector_close()
713 LOG_DBG("Adding gc done ate at %llx", fs->ate_wra); in zms_add_gc_done_ate()
716 gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_add_gc_done_ate()
718 gc_done_ate.cycle_cnt = fs->sector_cycle; in zms_add_gc_done_ate()
734 LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size)); in zms_add_empty_ate()
742 if (rc == -ENOENT) { in zms_add_empty_ate()
754 /* Adding empty ate to this sector changes fs->ate_wra value in zms_add_empty_ate()
757 previous_ate_wra = fs->ate_wra; in zms_add_empty_ate()
758 fs->ate_wra = zms_empty_ate_addr(fs, addr); in zms_add_empty_ate()
763 fs->ate_wra = previous_ate_wra; in zms_add_empty_ate()
789 return -ENOENT; in zms_get_sector_cycle()
806 rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate); in zms_get_sector_header()
891 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
892 if (rc == -ENOENT) { in zms_gc()
893 /* Erase this new unused sector if needed */ in zms_gc()
894 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_gc()
899 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_gc()
906 rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); in zms_gc()
914 previous_cycle = fs->sector_cycle; in zms_gc()
916 sec_addr = (fs->ate_wra & ADDR_SECT_MASK); in zms_gc()
918 gc_addr = sec_addr + fs->sector_size - fs->ate_size; in zms_gc()
932 fs->sector_cycle = empty_ate.cycle_cnt; in zms_gc()
935 stop_addr = gc_addr - 2 * fs->ate_size; in zms_gc()
954 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)]; in zms_gc()
957 wlk_addr = fs->ate_wra; in zms_gc()
960 wlk_addr = fs->ate_wra; in zms_gc()
968 rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_gc()
987 gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra); in zms_gc()
1007 fs->sector_cycle = previous_cycle; in zms_gc()
1017 /* Erase the GC'ed sector when needed */ in zms_gc()
1036 if (!fs->ready) { in zms_clear()
1038 return -EACCES; in zms_clear()
1041 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_clear()
1042 for (uint32_t i = 0; i < fs->sector_count; i++) { in zms_clear()
1055 fs->ready = false; in zms_clear()
1058 k_mutex_unlock(&fs->zms_lock); in zms_clear()
1077 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_init()
1083 for (i = 0; i < fs->sector_count; i++) { in zms_init()
1093 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1104 rc = -ENOEXEC; in zms_init()
1118 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1127 if ((closed_sectors == fs->sector_count) && !zms_magic_exist) { in zms_init()
1128 rc = -EDEADLK; in zms_init()
1135 if (i == fs->sector_count) { in zms_init()
1141 rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate); in zms_init()
1160 rc = -ENOEXEC; in zms_init()
1174 rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle); in zms_init()
1175 if (rc == -ENOENT) { in zms_init()
1177 fs->sector_cycle = 0; in zms_init()
1196 fs->ate_wra = addr; in zms_init()
1197 fs->data_wra = data_wra; in zms_init()
1199 /* fs->ate_wra should point to the next available entry. This is normally in zms_init()
1204 while (fs->ate_wra >= fs->data_wra) { in zms_init()
1205 rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate); in zms_init()
1217 if ((fs->ate_wra == fs->data_wra) && last_ate.len) { in zms_init()
1219 rc = -ESPIPE; in zms_init()
1223 fs->ate_wra -= fs->ate_size; in zms_init()
1230 * If no valid empty ATE is found then it has never been used. Just erase it by adding in zms_init()
1232 * When gc needs to be restarted, first erase the sector by adding an empty in zms_init()
1235 addr = zms_close_ate_addr(fs, fs->ate_wra); in zms_init()
1246 /* The sector after fs->ate_wrt is closed. in zms_init()
1252 fs->sector_cycle = empty_ate.cycle_cnt; in zms_init()
1253 addr = fs->ate_wra + fs->ate_size; in zms_init()
1254 while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) { in zms_init()
1263 addr += fs->ate_size; in zms_init()
1267 /* erase the next sector */ in zms_init()
1269 addr = fs->ate_wra & ADDR_SECT_MASK; in zms_init()
1279 rc = zms_flash_erase_sector(fs, fs->ate_wra); in zms_init()
1283 rc = zms_add_empty_ate(fs, fs->ate_wra); in zms_init()
1289 fs->ate_wra &= ADDR_SECT_MASK; in zms_init()
1290 fs->ate_wra += (fs->sector_size - 3 * fs->ate_size); in zms_init()
1291 fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); in zms_init()
1299 fs->lookup_cache[i] = fs->ate_wra; in zms_init()
1315 if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) { in zms_init()
1318 k_mutex_unlock(&fs->zms_lock); in zms_init()
1329 k_mutex_init(&fs->zms_lock); in zms_mount()
1331 fs->flash_parameters = flash_get_parameters(fs->flash_device); in zms_mount()
1332 if (fs->flash_parameters == NULL) { in zms_mount()
1334 return -EINVAL; in zms_mount()
1337 fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate)); in zms_mount()
1338 write_block_size = fs->flash_parameters->write_block_size; in zms_mount()
1343 return -EINVAL; in zms_mount()
1346 /* When the device need erase operations before write let's check that in zms_mount()
1349 if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) { in zms_mount()
1350 rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); in zms_mount()
1353 return -EINVAL; in zms_mount()
1355 if (!fs->sector_size || fs->sector_size % info.size) { in zms_mount()
1357 return -EINVAL; in zms_mount()
1364 if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) { in zms_mount()
1366 ZMS_MIN_ATE_NUM * fs->ate_size); in zms_mount()
1370 if (fs->sector_count < 2) { in zms_mount()
1371 LOG_ERR("Configuration error - sector count below minimum requirement (2)"); in zms_mount()
1372 return -EINVAL; in zms_mount()
1382 fs->ready = true; in zms_mount()
1384 LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size); in zms_mount()
1385 LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra)); in zms_mount()
1386 LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra)); in zms_mount()
1400 if (!fs->ready) { in zms_write()
1402 return -EACCES; in zms_write()
1407 /* The maximum data size is sector size - 5 ate in zms_write()
1412 if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) || in zms_write()
1414 return -EINVAL; in zms_write()
1419 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_write()
1425 wlk_addr = fs->ate_wra; in zms_write()
1432 int prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr); in zms_write()
1468 /* skip delete entry for non-existing entry */ in zms_write()
1482 required_space = data_size + fs->ate_size; in zms_write()
1484 required_space = fs->ate_size; in zms_write()
1488 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_write()
1492 if (gc_count == fs->sector_count) { in zms_write()
1496 rc = -ENOSPC; in zms_write()
1501 * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented in zms_write()
1503 * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs in zms_write()
1506 if ((SECTOR_OFFSET(fs->ate_wra)) && in zms_write()
1507 (fs->ate_wra >= (fs->data_wra + required_space)) && in zms_write()
1508 (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) { in zms_write()
1529 k_mutex_unlock(&fs->zms_lock); in zms_write()
1551 if (!fs->ready) { in zms_read_hist()
1553 return -EACCES; in zms_read_hist()
1559 wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; in zms_read_hist()
1562 rc = -ENOENT; in zms_read_hist()
1566 wlk_addr = fs->ate_wra; in zms_read_hist()
1572 prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, in zms_read_hist()
1597 return -ENOENT; in zms_read_hist()
1616 /* Do not compute CRC for partial reads as CRC won't match */ in zms_read_hist()
1623 return -EIO; in zms_read_hist()
1674 const uint32_t second_to_last_offset = (2 * fs->ate_size); in zms_calc_free_space()
1676 if (!fs->ready) { in zms_calc_free_space()
1678 return -EACCES; in zms_calc_free_space()
1686 free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size); in zms_calc_free_space()
1688 step_addr = fs->ate_wra; in zms_calc_free_space()
1726 free_space -= zms_al_size(fs, step_ate.len); in zms_calc_free_space()
1728 free_space -= fs->ate_size; in zms_calc_free_space()
1730 } while (step_addr != fs->ate_wra); in zms_calc_free_space()
1733 current_cycle = fs->sector_cycle; in zms_calc_free_space()
1739 for (int i = 0; i < fs->sector_count; i++) { in zms_calc_free_space()
1753 free_space -= fs->ate_size; in zms_calc_free_space()
1756 fs->sector_cycle = empty_ate.cycle_cnt; in zms_calc_free_space()
1762 free_space -= fs->ate_size; in zms_calc_free_space()
1767 fs->sector_cycle = current_cycle; in zms_calc_free_space()
1774 if (!fs->ready) { in zms_active_sector_free_space()
1776 return -EACCES; in zms_active_sector_free_space()
1779 return fs->ate_wra - fs->data_wra - fs->ate_size; in zms_active_sector_free_space()
1786 if (!fs->ready) { in zms_sector_use_next()
1788 return -EACCES; in zms_sector_use_next()
1791 k_mutex_lock(&fs->zms_lock, K_FOREVER); in zms_sector_use_next()
1801 k_mutex_unlock(&fs->zms_lock); in zms_sector_use_next()