1 /* Copyright (c) 2024 BayLibre SAS
2  *
3  * SPDX-License-Identifier: Apache-2.0
4  *
5  * ZMS: Zephyr Memory Storage
6  */
7 
8 #include <string.h>
9 #include <errno.h>
10 #include <inttypes.h>
11 #include <zephyr/fs/zms.h>
12 #include <zephyr/sys/crc.h>
13 #include "zms_priv.h"
14 
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL);
17 
18 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate);
19 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry);
20 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt);
21 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
22 				 struct zms_ate *close_ate);
23 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry,
24 					  uint8_t cycle_cnt);
25 
26 #ifdef CONFIG_ZMS_LOOKUP_CACHE
27 
zms_lookup_cache_pos(uint32_t id)28 static inline size_t zms_lookup_cache_pos(uint32_t id)
29 {
30 	uint32_t hash;
31 
32 	/* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */
33 	hash = id;
34 	hash ^= hash >> 16;
35 	hash *= 0x7feb352dU;
36 	hash ^= hash >> 15;
37 	hash *= 0x846ca68bU;
38 	hash ^= hash >> 16;
39 
40 	return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE;
41 }
42 
zms_lookup_cache_rebuild(struct zms_fs * fs)43 static int zms_lookup_cache_rebuild(struct zms_fs *fs)
44 {
45 	int rc;
46 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
47 	uint64_t addr;
48 	uint64_t ate_addr;
49 	uint64_t *cache_entry;
50 	uint8_t current_cycle;
51 	struct zms_ate ate;
52 
53 	memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache));
54 	addr = fs->ate_wra;
55 
56 	while (true) {
57 		/* Make a copy of 'addr' as it will be advanced by zms_prev_ate() */
58 		ate_addr = addr;
59 		rc = zms_prev_ate(fs, &addr, &ate);
60 
61 		if (rc) {
62 			return rc;
63 		}
64 
65 		cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)];
66 
67 		if (ate.id != ZMS_HEAD_ID && *cache_entry == ZMS_LOOKUP_CACHE_NO_ADDR) {
68 			/* read the ate cycle only when we change the sector
69 			 * or if it is the first read
70 			 */
71 			if (SECTOR_NUM(ate_addr) != previous_sector_num) {
72 				rc = zms_get_sector_cycle(fs, ate_addr, &current_cycle);
73 				if (rc == -ENOENT) {
74 					/* sector never used */
75 					current_cycle = 0;
76 				} else if (rc) {
77 					/* bad flash read */
78 					return rc;
79 				}
80 			}
81 			if (zms_ate_valid_different_sector(fs, &ate, current_cycle)) {
82 				*cache_entry = ate_addr;
83 			}
84 			previous_sector_num = SECTOR_NUM(ate_addr);
85 		}
86 
87 		if (addr == fs->ate_wra) {
88 			break;
89 		}
90 	}
91 
92 	return 0;
93 }
94 
zms_lookup_cache_invalidate(struct zms_fs * fs,uint32_t sector)95 static void zms_lookup_cache_invalidate(struct zms_fs *fs, uint32_t sector)
96 {
97 	uint64_t *cache_entry = fs->lookup_cache;
98 	uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE];
99 
100 	for (; cache_entry < cache_end; ++cache_entry) {
101 		if (SECTOR_NUM(*cache_entry) == sector) {
102 			*cache_entry = ZMS_LOOKUP_CACHE_NO_ADDR;
103 		}
104 	}
105 }
106 
107 #endif /* CONFIG_ZMS_LOOKUP_CACHE */
108 
109 /* Helper to compute offset given the address */
zms_addr_to_offset(struct zms_fs * fs,uint64_t addr)110 static inline off_t zms_addr_to_offset(struct zms_fs *fs, uint64_t addr)
111 {
112 	return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr);
113 }
114 
115 /* Helper to round down len to the closest multiple of write_block_size  */
zms_round_down_write_block_size(struct zms_fs * fs,size_t len)116 static inline size_t zms_round_down_write_block_size(struct zms_fs *fs, size_t len)
117 {
118 	return len & ~(fs->flash_parameters->write_block_size - 1U);
119 }
120 
121 /* Helper to round up len to multiple of write_block_size */
zms_round_up_write_block_size(struct zms_fs * fs,size_t len)122 static inline size_t zms_round_up_write_block_size(struct zms_fs *fs, size_t len)
123 {
124 	return (len + (fs->flash_parameters->write_block_size - 1U)) &
125 	       ~(fs->flash_parameters->write_block_size - 1U);
126 }
127 
128 /* zms_al_size returns size aligned to fs->write_block_size */
zms_al_size(struct zms_fs * fs,size_t len)129 static inline size_t zms_al_size(struct zms_fs *fs, size_t len)
130 {
131 	size_t write_block_size = fs->flash_parameters->write_block_size;
132 
133 	if (write_block_size <= 1U) {
134 		return len;
135 	}
136 
137 	return zms_round_up_write_block_size(fs, len);
138 }
139 
140 /* Helper to get empty ATE address */
zms_empty_ate_addr(struct zms_fs * fs,uint64_t addr)141 static inline uint64_t zms_empty_ate_addr(struct zms_fs *fs, uint64_t addr)
142 {
143 	return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size;
144 }
145 
146 /* Helper to get close ATE address */
zms_close_ate_addr(struct zms_fs * fs,uint64_t addr)147 static inline uint64_t zms_close_ate_addr(struct zms_fs *fs, uint64_t addr)
148 {
149 	return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size;
150 }
151 
152 /* Aligned memory write */
zms_flash_al_wrt(struct zms_fs * fs,uint64_t addr,const void * data,size_t len)153 static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len)
154 {
155 	const uint8_t *data8 = (const uint8_t *)data;
156 	int rc = 0;
157 	off_t offset;
158 	size_t blen;
159 	uint8_t buf[ZMS_BLOCK_SIZE];
160 
161 	if (!len) {
162 		/* Nothing to write, avoid changing the flash protection */
163 		return 0;
164 	}
165 
166 	offset = zms_addr_to_offset(fs, addr);
167 
168 	blen = zms_round_down_write_block_size(fs, len);
169 	if (blen > 0) {
170 		rc = flash_write(fs->flash_device, offset, data8, blen);
171 		if (rc) {
172 			/* flash write error */
173 			goto end;
174 		}
175 		len -= blen;
176 		offset += blen;
177 		data8 += blen;
178 	}
179 	if (len) {
180 		memcpy(buf, data8, len);
181 		(void)memset(buf + len, fs->flash_parameters->erase_value,
182 			     fs->flash_parameters->write_block_size - len);
183 
184 		rc = flash_write(fs->flash_device, offset, buf,
185 				 fs->flash_parameters->write_block_size);
186 	}
187 
188 end:
189 	return rc;
190 }
191 
192 /* basic flash read from zms address */
zms_flash_rd(struct zms_fs * fs,uint64_t addr,void * data,size_t len)193 static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len)
194 {
195 	off_t offset;
196 
197 	offset = zms_addr_to_offset(fs, addr);
198 
199 	return flash_read(fs->flash_device, offset, data, len);
200 }
201 
202 /* allocation entry write */
zms_flash_ate_wrt(struct zms_fs * fs,const struct zms_ate * entry)203 static int zms_flash_ate_wrt(struct zms_fs *fs, const struct zms_ate *entry)
204 {
205 	int rc;
206 
207 	rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate));
208 	if (rc) {
209 		goto end;
210 	}
211 #ifdef CONFIG_ZMS_LOOKUP_CACHE
212 	/* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */
213 	if (entry->id != ZMS_HEAD_ID) {
214 		fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra;
215 	}
216 #endif
217 	fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate));
218 end:
219 	return rc;
220 }
221 
222 /* data write */
zms_flash_data_wrt(struct zms_fs * fs,const void * data,size_t len)223 static int zms_flash_data_wrt(struct zms_fs *fs, const void *data, size_t len)
224 {
225 	int rc;
226 
227 	rc = zms_flash_al_wrt(fs, fs->data_wra, data, len);
228 	if (rc < 0) {
229 		return rc;
230 	}
231 	fs->data_wra += zms_al_size(fs, len);
232 
233 	return 0;
234 }
235 
236 /* flash ate read */
zms_flash_ate_rd(struct zms_fs * fs,uint64_t addr,struct zms_ate * entry)237 static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry)
238 {
239 	return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate));
240 }
241 
242 /* zms_flash_block_cmp compares the data in flash at addr to data
243  * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size
244  * returns 0 if equal, 1 if not equal, errcode if error
245  */
zms_flash_block_cmp(struct zms_fs * fs,uint64_t addr,const void * data,size_t len)246 static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len)
247 {
248 	const uint8_t *data8 = (const uint8_t *)data;
249 	int rc;
250 	size_t bytes_to_cmp;
251 	size_t block_size;
252 	uint8_t buf[ZMS_BLOCK_SIZE];
253 
254 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
255 
256 	while (len) {
257 		bytes_to_cmp = MIN(block_size, len);
258 		rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp);
259 		if (rc) {
260 			return rc;
261 		}
262 		rc = memcmp(data8, buf, bytes_to_cmp);
263 		if (rc) {
264 			return 1;
265 		}
266 		len -= bytes_to_cmp;
267 		addr += bytes_to_cmp;
268 		data8 += bytes_to_cmp;
269 	}
270 	return 0;
271 }
272 
273 /* zms_flash_cmp_const compares the data in flash at addr to a constant
274  * value. returns 0 if all data in flash is equal to value, 1 if not equal,
275  * errcode if error
276  */
zms_flash_cmp_const(struct zms_fs * fs,uint64_t addr,uint8_t value,size_t len)277 static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len)
278 {
279 	int rc;
280 	size_t bytes_to_cmp;
281 	size_t block_size;
282 	uint8_t cmp[ZMS_BLOCK_SIZE];
283 
284 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
285 
286 	(void)memset(cmp, value, block_size);
287 	while (len) {
288 		bytes_to_cmp = MIN(block_size, len);
289 		rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
290 		if (rc) {
291 			return rc;
292 		}
293 		len -= bytes_to_cmp;
294 		addr += bytes_to_cmp;
295 	}
296 	return 0;
297 }
298 
299 /* flash block move: move a block at addr to the current data write location
300  * and updates the data write location.
301  */
zms_flash_block_move(struct zms_fs * fs,uint64_t addr,size_t len)302 static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len)
303 {
304 	int rc;
305 	size_t bytes_to_copy;
306 	size_t block_size;
307 	uint8_t buf[ZMS_BLOCK_SIZE];
308 
309 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
310 
311 	while (len) {
312 		bytes_to_copy = MIN(block_size, len);
313 		rc = zms_flash_rd(fs, addr, buf, bytes_to_copy);
314 		if (rc) {
315 			return rc;
316 		}
317 		rc = zms_flash_data_wrt(fs, buf, bytes_to_copy);
318 		if (rc) {
319 			return rc;
320 		}
321 		len -= bytes_to_copy;
322 		addr += bytes_to_copy;
323 	}
324 	return 0;
325 }
326 
327 /* erase a sector and verify erase was OK.
328  * return 0 if OK, errorcode on error.
329  */
zms_flash_erase_sector(struct zms_fs * fs,uint64_t addr)330 static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr)
331 {
332 	int rc;
333 	off_t offset;
334 	bool ebw_required =
335 		flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT;
336 
337 	if (!ebw_required) {
338 		/* Do nothing for devices that do not have erase capability */
339 		return 0;
340 	}
341 
342 	addr &= ADDR_SECT_MASK;
343 	offset = zms_addr_to_offset(fs, addr);
344 
345 	LOG_DBG("Erasing flash at offset 0x%lx ( 0x%llx ), len %u", (long)offset, addr,
346 		fs->sector_size);
347 
348 #ifdef CONFIG_ZMS_LOOKUP_CACHE
349 	zms_lookup_cache_invalidate(fs, SECTOR_NUM(addr));
350 #endif
351 	rc = flash_erase(fs->flash_device, offset, fs->sector_size);
352 
353 	if (rc) {
354 		return rc;
355 	}
356 
357 	if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) {
358 		LOG_ERR("Failure while erasing the sector at offset 0x%lx", (long)offset);
359 		rc = -ENXIO;
360 	}
361 
362 	return rc;
363 }
364 
365 /* crc update on allocation entry */
zms_ate_crc8_update(struct zms_ate * entry)366 static void zms_ate_crc8_update(struct zms_ate *entry)
367 {
368 	uint8_t crc8;
369 
370 	/* crc8 field is the first element of the structure, do not include it */
371 	crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8),
372 			  sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8));
373 	entry->crc8 = crc8;
374 }
375 
376 /* crc check on allocation entry
377  * returns 0 if OK, 1 on crc fail
378  */
zms_ate_crc8_check(const struct zms_ate * entry)379 static int zms_ate_crc8_check(const struct zms_ate *entry)
380 {
381 	uint8_t crc8;
382 
383 	/* crc8 field is the first element of the structure, do not include it */
384 	crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8),
385 			  sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8));
386 	if (crc8 == entry->crc8) {
387 		return 0;
388 	}
389 
390 	return 1;
391 }
392 
393 /* zms_ate_valid validates an ate in the current sector by checking if the ate crc is valid
394  * and its cycle cnt matches the cycle cnt of the active sector
395  *
396  * return 1 if ATE is valid,
397  *        0 otherwise
398  *
399  * see: zms_ate_valid_different_sector
400  */
zms_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)401 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
402 {
403 	return zms_ate_valid_different_sector(fs, entry, fs->sector_cycle);
404 }
405 
406 /* zms_ate_valid_different_sector validates an ate that is in a different
407  * sector than the active one. It takes as argument the cycle_cnt of the
408  * sector where the ATE to be validated is stored
409  *     return 1 if crc8 and cycle_cnt are valid,
410  *            0 otherwise
411  */
zms_ate_valid_different_sector(struct zms_fs * fs,const struct zms_ate * entry,uint8_t cycle_cnt)412 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry,
413 					  uint8_t cycle_cnt)
414 {
415 	if ((cycle_cnt != entry->cycle_cnt) || zms_ate_crc8_check(entry)) {
416 		return 0;
417 	}
418 
419 	return 1;
420 }
421 
zms_get_cycle_on_sector_change(struct zms_fs * fs,uint64_t addr,int previous_sector_num,uint8_t * cycle_cnt)422 static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr,
423 						 int previous_sector_num, uint8_t *cycle_cnt)
424 {
425 	int rc;
426 
427 	/* read the ate cycle only when we change the sector
428 	 * or if it is the first read
429 	 */
430 	if (SECTOR_NUM(addr) != previous_sector_num) {
431 		rc = zms_get_sector_cycle(fs, addr, cycle_cnt);
432 		if (rc == -ENOENT) {
433 			/* sector never used */
434 			*cycle_cnt = 0;
435 		} else if (rc) {
436 			/* bad flash read */
437 			return rc;
438 		}
439 	}
440 
441 	return 0;
442 }
443 
444 /* zms_close_ate_valid validates a sector close ate.
445  * A valid sector close ate should be:
446  * - a valid ate
447  * - with len = 0 and id = ZMS_HEAD_ID
448  * - and offset points to location at ate multiple from sector size
449  * return true if valid, false otherwise
450  */
zms_close_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)451 static bool zms_close_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
452 {
453 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) &&
454 		(entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size));
455 }
456 
457 /* zms_empty_ate_valid validates an sector empty ate.
458  * A valid sector empty ate should be:
459  * - a valid ate
460  * - with len = 0xffff and id = 0xffffffff
461  * return true if valid, false otherwise
462  */
zms_empty_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)463 static bool zms_empty_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
464 {
465 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) &&
466 		(entry->len == 0xffff) && (entry->id == ZMS_HEAD_ID));
467 }
468 
469 /* zms_gc_done_ate_valid validates a garbage collector done ATE
470  * Valid gc_done_ate:
471  * - valid ate
472  * - len = 0
473  * - id = 0xffffffff
474  * return true if valid, false otherwise
475  */
zms_gc_done_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)476 static bool zms_gc_done_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
477 {
478 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) &&
479 		(entry->id == ZMS_HEAD_ID));
480 }
481 
482 /* Read empty and close ATE of the sector where belongs address "addr" and
483  * validates that the sector is closed.
484  * retval: 0 if sector is not close
485  * retval: 1 is sector is closed
486  * retval: < 0 if read of the header failed.
487  */
zms_validate_closed_sector(struct zms_fs * fs,uint64_t addr,struct zms_ate * empty_ate,struct zms_ate * close_ate)488 static int zms_validate_closed_sector(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
489 				      struct zms_ate *close_ate)
490 {
491 	int rc;
492 
493 	/* read the header ATEs */
494 	rc = zms_get_sector_header(fs, addr, empty_ate, close_ate);
495 	if (rc) {
496 		return rc;
497 	}
498 
499 	if (zms_empty_ate_valid(fs, empty_ate) && zms_close_ate_valid(fs, close_ate) &&
500 	    (empty_ate->cycle_cnt == close_ate->cycle_cnt)) {
501 		/* Closed sector validated */
502 		return 1;
503 	}
504 
505 	return 0;
506 }
507 
508 /* store an entry in flash */
zms_flash_write_entry(struct zms_fs * fs,uint32_t id,const void * data,size_t len)509 static int zms_flash_write_entry(struct zms_fs *fs, uint32_t id, const void *data, size_t len)
510 {
511 	int rc;
512 	struct zms_ate entry;
513 
514 	/* Initialize all members to 0 */
515 	memset(&entry, 0, sizeof(struct zms_ate));
516 
517 	entry.id = id;
518 	entry.len = (uint16_t)len;
519 	entry.cycle_cnt = fs->sector_cycle;
520 
521 	if (len > ZMS_DATA_IN_ATE_SIZE) {
522 		/* only compute CRC if len is greater than 8 bytes */
523 		if (IS_ENABLED(CONFIG_ZMS_DATA_CRC)) {
524 			entry.data_crc = crc32_ieee(data, len);
525 		}
526 		entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
527 	} else if ((len > 0) && (len <= ZMS_DATA_IN_ATE_SIZE)) {
528 		/* Copy data into entry for small data ( < 8B) */
529 		memcpy(&entry.data, data, len);
530 	}
531 
532 	zms_ate_crc8_update(&entry);
533 
534 	if (len > ZMS_DATA_IN_ATE_SIZE) {
535 		rc = zms_flash_data_wrt(fs, data, len);
536 		if (rc) {
537 			return rc;
538 		}
539 	}
540 
541 	rc = zms_flash_ate_wrt(fs, &entry);
542 	if (rc) {
543 		return rc;
544 	}
545 
546 	return 0;
547 }
548 
549 /* end of flash routines */
550 
551 /* Search for the last valid ATE written in a sector and also update data write address
552  */
zms_recover_last_ate(struct zms_fs * fs,uint64_t * addr,uint64_t * data_wra)553 static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr, uint64_t *data_wra)
554 {
555 	uint64_t data_end_addr;
556 	uint64_t ate_end_addr;
557 	struct zms_ate end_ate;
558 	int rc;
559 
560 	LOG_DBG("Recovering last ate from sector %llu", SECTOR_NUM(*addr));
561 
562 	/* skip close and empty ATE */
563 	*addr -= 2 * fs->ate_size;
564 
565 	ate_end_addr = *addr;
566 	data_end_addr = *addr & ADDR_SECT_MASK;
567 	/* Initialize the data_wra to the first address of the sector */
568 	*data_wra = data_end_addr;
569 
570 	while (ate_end_addr > data_end_addr) {
571 		rc = zms_flash_ate_rd(fs, ate_end_addr, &end_ate);
572 		if (rc) {
573 			return rc;
574 		}
575 		if (zms_ate_valid(fs, &end_ate)) {
576 			/* found a valid ate, update data_end_addr and *addr */
577 			data_end_addr &= ADDR_SECT_MASK;
578 			if (end_ate.len > ZMS_DATA_IN_ATE_SIZE) {
579 				data_end_addr += end_ate.offset + zms_al_size(fs, end_ate.len);
580 				*data_wra = data_end_addr;
581 			}
582 			*addr = ate_end_addr;
583 		}
584 		ate_end_addr -= fs->ate_size;
585 	}
586 
587 	return 0;
588 }
589 
590 /* compute previous addr of ATE */
zms_compute_prev_addr(struct zms_fs * fs,uint64_t * addr)591 static int zms_compute_prev_addr(struct zms_fs *fs, uint64_t *addr)
592 {
593 	int sec_closed;
594 	struct zms_ate empty_ate;
595 	struct zms_ate close_ate;
596 
597 	*addr += fs->ate_size;
598 	if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) {
599 		return 0;
600 	}
601 
602 	/* last ate in sector, do jump to previous sector */
603 	if (SECTOR_NUM(*addr) == 0U) {
604 		*addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT);
605 	} else {
606 		*addr -= (1ULL << ADDR_SECT_SHIFT);
607 	}
608 
609 	/* verify if the sector is closed */
610 	sec_closed = zms_validate_closed_sector(fs, *addr, &empty_ate, &close_ate);
611 	if (sec_closed < 0) {
612 		return sec_closed;
613 	}
614 
615 	/* Non Closed Sector */
616 	if (!sec_closed) {
617 		/* at the end of filesystem */
618 		*addr = fs->ate_wra;
619 		return 0;
620 	}
621 
622 	/* Update the address here because the header ATEs are valid.*/
623 	(*addr) &= ADDR_SECT_MASK;
624 	(*addr) += close_ate.offset;
625 
626 	return 0;
627 }
628 
629 /* walking through allocation entry list, from newest to oldest entries
630  * read ate from addr, modify addr to the previous ate
631  */
zms_prev_ate(struct zms_fs * fs,uint64_t * addr,struct zms_ate * ate)632 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate)
633 {
634 	int rc;
635 
636 	rc = zms_flash_ate_rd(fs, *addr, ate);
637 	if (rc) {
638 		return rc;
639 	}
640 
641 	return zms_compute_prev_addr(fs, addr);
642 }
643 
zms_sector_advance(struct zms_fs * fs,uint64_t * addr)644 static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr)
645 {
646 	*addr += (1ULL << ADDR_SECT_SHIFT);
647 	if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) {
648 		*addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT);
649 	}
650 }
651 
652 /* allocation entry close (this closes the current sector) by writing offset
653  * of last ate to the sector end.
654  */
zms_sector_close(struct zms_fs * fs)655 static int zms_sector_close(struct zms_fs *fs)
656 {
657 	int rc;
658 	struct zms_ate close_ate;
659 	struct zms_ate garbage_ate;
660 
661 	close_ate.id = ZMS_HEAD_ID;
662 	close_ate.len = 0U;
663 	close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size);
664 	close_ate.metadata = 0xffffffff;
665 	close_ate.cycle_cnt = fs->sector_cycle;
666 
667 	/* When we close the sector, we must write all non used ATE with
668 	 * a non valid (Junk) ATE.
669 	 * This is needed to avoid some corner cases where some ATEs are
670 	 * not overwritten and become valid when the cycle counter wrap again
671 	 * to the same cycle counter of the old ATE.
672 	 * Example :
673 	 * - An ATE.cycl_cnt == 0 is written as last ATE of the sector
674 	   - This ATE was never overwritten in the next 255 cycles because of
675 	     large data size
676 	   - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes
677 	     valid even if it is not the case.
678 	 */
679 	memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate));
680 	while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) {
681 		rc = zms_flash_ate_wrt(fs, &garbage_ate);
682 		if (rc) {
683 			return rc;
684 		}
685 	}
686 
687 	fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra);
688 
689 	zms_ate_crc8_update(&close_ate);
690 
691 	(void)zms_flash_ate_wrt(fs, &close_ate);
692 
693 	zms_sector_advance(fs, &fs->ate_wra);
694 
695 	rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
696 	if (rc == -ENOENT) {
697 		/* sector never used */
698 		fs->sector_cycle = 0;
699 	} else if (rc) {
700 		/* bad flash read */
701 		return rc;
702 	}
703 
704 	fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
705 
706 	return 0;
707 }
708 
zms_add_gc_done_ate(struct zms_fs * fs)709 static int zms_add_gc_done_ate(struct zms_fs *fs)
710 {
711 	struct zms_ate gc_done_ate;
712 
713 	LOG_DBG("Adding gc done ate at %llx", fs->ate_wra);
714 	gc_done_ate.id = ZMS_HEAD_ID;
715 	gc_done_ate.len = 0U;
716 	gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
717 	gc_done_ate.metadata = 0xffffffff;
718 	gc_done_ate.cycle_cnt = fs->sector_cycle;
719 
720 	zms_ate_crc8_update(&gc_done_ate);
721 
722 	return zms_flash_ate_wrt(fs, &gc_done_ate);
723 }
724 
zms_add_empty_ate(struct zms_fs * fs,uint64_t addr)725 static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr)
726 {
727 	struct zms_ate empty_ate;
728 	uint8_t cycle_cnt;
729 	int rc = 0;
730 	uint64_t previous_ate_wra;
731 
732 	addr &= ADDR_SECT_MASK;
733 
734 	LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size));
735 	empty_ate.id = ZMS_HEAD_ID;
736 	empty_ate.len = 0xffff;
737 	empty_ate.offset = 0U;
738 	empty_ate.metadata =
739 		FIELD_PREP(ZMS_MAGIC_NUMBER_MASK, ZMS_MAGIC_NUMBER) | ZMS_DEFAULT_VERSION;
740 
741 	rc = zms_get_sector_cycle(fs, addr, &cycle_cnt);
742 	if (rc == -ENOENT) {
743 		/* sector never used */
744 		cycle_cnt = 0;
745 	} else if (rc) {
746 		/* bad flash read */
747 		return rc;
748 	}
749 
750 	/* increase cycle counter */
751 	empty_ate.cycle_cnt = (cycle_cnt + 1) % BIT(8);
752 	zms_ate_crc8_update(&empty_ate);
753 
754 	/* Adding empty ate to this sector changes fs->ate_wra value
755 	 * Restore the ate_wra of the current sector after this
756 	 */
757 	previous_ate_wra = fs->ate_wra;
758 	fs->ate_wra = zms_empty_ate_addr(fs, addr);
759 	rc = zms_flash_ate_wrt(fs, &empty_ate);
760 	if (rc) {
761 		return rc;
762 	}
763 	fs->ate_wra = previous_ate_wra;
764 
765 	return 0;
766 }
767 
zms_get_sector_cycle(struct zms_fs * fs,uint64_t addr,uint8_t * cycle_cnt)768 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt)
769 {
770 	int rc;
771 	struct zms_ate empty_ate;
772 	uint64_t empty_addr;
773 
774 	empty_addr = zms_empty_ate_addr(fs, addr);
775 
776 	/* read the cycle counter of the current sector */
777 	rc = zms_flash_ate_rd(fs, empty_addr, &empty_ate);
778 	if (rc < 0) {
779 		/* flash error */
780 		return rc;
781 	}
782 
783 	if (zms_empty_ate_valid(fs, &empty_ate)) {
784 		*cycle_cnt = empty_ate.cycle_cnt;
785 		return 0;
786 	}
787 
788 	/* there is no empty ATE in this sector */
789 	return -ENOENT;
790 }
791 
zms_get_sector_header(struct zms_fs * fs,uint64_t addr,struct zms_ate * empty_ate,struct zms_ate * close_ate)792 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
793 				 struct zms_ate *close_ate)
794 {
795 	int rc;
796 	uint64_t close_addr;
797 
798 	close_addr = zms_close_ate_addr(fs, addr);
799 	/* read the second ate in the sector to get the close ATE */
800 	rc = zms_flash_ate_rd(fs, close_addr, close_ate);
801 	if (rc) {
802 		return rc;
803 	}
804 
805 	/* read the first ate in the sector to get the empty ATE */
806 	rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate);
807 	if (rc) {
808 		return rc;
809 	}
810 
811 	return 0;
812 }
813 
814 /**
815  * @brief Helper to find an ATE using its ID
816  *
817  * @param fs Pointer to file system
818  * @param id Id of the entry to be found
819  * @param start_addr Address from where the search will start
820  * @param end_addr Address where the search will stop
821  * @param ate pointer to the found ATE if it exists
822  * @param ate_addr Pointer to the address of the found ATE
823  *
824  * @retval 0 No ATE is found
825  * @retval 1 valid ATE with same ID found
826  * @retval < 0 An error happened
827  */
zms_find_ate_with_id(struct zms_fs * fs,uint32_t id,uint64_t start_addr,uint64_t end_addr,struct zms_ate * ate,uint64_t * ate_addr)828 static int zms_find_ate_with_id(struct zms_fs *fs, uint32_t id, uint64_t start_addr,
829 				uint64_t end_addr, struct zms_ate *ate, uint64_t *ate_addr)
830 {
831 	int rc;
832 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
833 	uint64_t wlk_prev_addr;
834 	uint64_t wlk_addr;
835 	int prev_found = 0;
836 	struct zms_ate wlk_ate;
837 	uint8_t current_cycle;
838 
839 	wlk_addr = start_addr;
840 
841 	do {
842 		wlk_prev_addr = wlk_addr;
843 		rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate);
844 		if (rc) {
845 			return rc;
846 		}
847 		if (wlk_ate.id == id) {
848 			/* read the ate cycle only when we change the sector or if it is
849 			 * the first read ( previous_sector_num == ZMS_INVALID_SECTOR_NUM).
850 			 */
851 			rc = zms_get_cycle_on_sector_change(fs, wlk_prev_addr, previous_sector_num,
852 							    &current_cycle);
853 			if (rc) {
854 				return rc;
855 			}
856 			if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) {
857 				prev_found = 1;
858 				break;
859 			}
860 			previous_sector_num = SECTOR_NUM(wlk_prev_addr);
861 		}
862 	} while (wlk_addr != end_addr);
863 
864 	*ate = wlk_ate;
865 	*ate_addr = wlk_prev_addr;
866 
867 	return prev_found;
868 }
869 
870 /* garbage collection: the address ate_wra has been updated to the new sector
871  * that has just been started. The data to gc is in the sector after this new
872  * sector.
873  */
zms_gc(struct zms_fs * fs)874 static int zms_gc(struct zms_fs *fs)
875 {
876 	int rc;
877 	int sec_closed;
878 	struct zms_ate close_ate;
879 	struct zms_ate gc_ate;
880 	struct zms_ate wlk_ate;
881 	struct zms_ate empty_ate;
882 	uint64_t sec_addr;
883 	uint64_t gc_addr;
884 	uint64_t gc_prev_addr;
885 	uint64_t wlk_addr;
886 	uint64_t wlk_prev_addr;
887 	uint64_t data_addr;
888 	uint64_t stop_addr;
889 	uint8_t previous_cycle = 0;
890 
891 	rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
892 	if (rc == -ENOENT) {
893 		/* Erase this new unused sector if needed */
894 		rc = zms_flash_erase_sector(fs, fs->ate_wra);
895 		if (rc) {
896 			return rc;
897 		}
898 		/* sector never used */
899 		rc = zms_add_empty_ate(fs, fs->ate_wra);
900 		if (rc) {
901 			return rc;
902 		}
903 		/* At this step we are sure that empty ATE exist.
904 		 * If not, then there is an I/O problem.
905 		 */
906 		rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
907 		if (rc) {
908 			return rc;
909 		}
910 	} else if (rc) {
911 		/* bad flash read */
912 		return rc;
913 	}
914 	previous_cycle = fs->sector_cycle;
915 
916 	sec_addr = (fs->ate_wra & ADDR_SECT_MASK);
917 	zms_sector_advance(fs, &sec_addr);
918 	gc_addr = sec_addr + fs->sector_size - fs->ate_size;
919 
920 	/* verify if the sector is closed */
921 	sec_closed = zms_validate_closed_sector(fs, gc_addr, &empty_ate, &close_ate);
922 	if (sec_closed < 0) {
923 		return sec_closed;
924 	}
925 
926 	/* if the sector is not closed don't do gc */
927 	if (!sec_closed) {
928 		goto gc_done;
929 	}
930 
931 	/* update sector_cycle */
932 	fs->sector_cycle = empty_ate.cycle_cnt;
933 
934 	/* stop_addr points to the first ATE before the header ATEs */
935 	stop_addr = gc_addr - 2 * fs->ate_size;
936 	/* At this step empty & close ATEs are valid.
937 	 * let's start the GC
938 	 */
939 	gc_addr &= ADDR_SECT_MASK;
940 	gc_addr += close_ate.offset;
941 
942 	do {
943 		gc_prev_addr = gc_addr;
944 		rc = zms_prev_ate(fs, &gc_addr, &gc_ate);
945 		if (rc) {
946 			return rc;
947 		}
948 
949 		if (!zms_ate_valid(fs, &gc_ate) || !gc_ate.len) {
950 			continue;
951 		}
952 
953 #ifdef CONFIG_ZMS_LOOKUP_CACHE
954 		wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)];
955 
956 		if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
957 			wlk_addr = fs->ate_wra;
958 		}
959 #else
960 		wlk_addr = fs->ate_wra;
961 #endif
962 
963 		/* Initialize the wlk_prev_addr as if no previous ID will be found */
964 		wlk_prev_addr = gc_prev_addr;
965 		/* Search for a previous valid ATE with the same ID. If it doesn't exist
966 		 * then wlk_prev_addr will be equal to gc_prev_addr.
967 		 */
968 		rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate,
969 					  &wlk_prev_addr);
970 		if (rc < 0) {
971 			return rc;
972 		}
973 
974 		/* if walk_addr has reached the same address as gc_addr, a copy is
975 		 * needed unless it is a deleted item.
976 		 */
977 		if (wlk_prev_addr == gc_prev_addr) {
978 			/* copy needed */
979 			LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len);
980 
981 			if (gc_ate.len > ZMS_DATA_IN_ATE_SIZE) {
982 				/* Copy Data only when len > 8
983 				 * Otherwise, Data is already inside ATE
984 				 */
985 				data_addr = (gc_prev_addr & ADDR_SECT_MASK);
986 				data_addr += gc_ate.offset;
987 				gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
988 
989 				rc = zms_flash_block_move(fs, data_addr, gc_ate.len);
990 				if (rc) {
991 					return rc;
992 				}
993 			}
994 
995 			gc_ate.cycle_cnt = previous_cycle;
996 			zms_ate_crc8_update(&gc_ate);
997 			rc = zms_flash_ate_wrt(fs, &gc_ate);
998 			if (rc) {
999 				return rc;
1000 			}
1001 		}
1002 	} while (gc_prev_addr != stop_addr);
1003 
1004 gc_done:
1005 
1006 	/* restore the previous sector_cycle */
1007 	fs->sector_cycle = previous_cycle;
1008 
1009 	/* Write a GC_done ATE to mark the end of this operation
1010 	 */
1011 
1012 	rc = zms_add_gc_done_ate(fs);
1013 	if (rc) {
1014 		return rc;
1015 	}
1016 
1017 	/* Erase the GC'ed sector when needed */
1018 	rc = zms_flash_erase_sector(fs, sec_addr);
1019 	if (rc) {
1020 		return rc;
1021 	}
1022 
1023 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1024 	zms_lookup_cache_invalidate(fs, sec_addr >> ADDR_SECT_SHIFT);
1025 #endif
1026 	rc = zms_add_empty_ate(fs, sec_addr);
1027 
1028 	return rc;
1029 }
1030 
zms_clear(struct zms_fs * fs)1031 int zms_clear(struct zms_fs *fs)
1032 {
1033 	int rc;
1034 	uint64_t addr;
1035 
1036 	if (!fs->ready) {
1037 		LOG_ERR("zms not initialized");
1038 		return -EACCES;
1039 	}
1040 
1041 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1042 	for (uint32_t i = 0; i < fs->sector_count; i++) {
1043 		addr = (uint64_t)i << ADDR_SECT_SHIFT;
1044 		rc = zms_flash_erase_sector(fs, addr);
1045 		if (rc) {
1046 			goto end;
1047 		}
1048 		rc = zms_add_empty_ate(fs, addr);
1049 		if (rc) {
1050 			goto end;
1051 		}
1052 	}
1053 
1054 	/* zms needs to be reinitialized after clearing */
1055 	fs->ready = false;
1056 
1057 end:
1058 	k_mutex_unlock(&fs->zms_lock);
1059 
1060 	return 0;
1061 }
1062 
zms_init(struct zms_fs * fs)1063 static int zms_init(struct zms_fs *fs)
1064 {
1065 	int rc;
1066 	int sec_closed;
1067 	struct zms_ate last_ate;
1068 	struct zms_ate first_ate;
1069 	struct zms_ate close_ate;
1070 	struct zms_ate empty_ate;
1071 	uint64_t addr = 0U;
1072 	uint64_t data_wra = 0U;
1073 	uint32_t i;
1074 	uint32_t closed_sectors = 0;
1075 	bool zms_magic_exist = false;
1076 
1077 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1078 
1079 	/* step through the sectors to find a open sector following
1080 	 * a closed sector, this is where zms can write.
1081 	 */
1082 
1083 	for (i = 0; i < fs->sector_count; i++) {
1084 		addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT));
1085 
1086 		/* verify if the sector is closed */
1087 		sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1088 		if (sec_closed < 0) {
1089 			rc = sec_closed;
1090 			goto end;
1091 		}
1092 		/* update cycle count */
1093 		fs->sector_cycle = empty_ate.cycle_cnt;
1094 
1095 		if (sec_closed == 1) {
1096 			/* closed sector */
1097 			closed_sectors++;
1098 			/* Let's verify that this is a ZMS storage system */
1099 			if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) {
1100 				zms_magic_exist = true;
1101 				/* Let's check that we support this ZMS version */
1102 				if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) {
1103 					LOG_ERR("ZMS Version is not supported");
1104 					rc = -ENOEXEC;
1105 					goto end;
1106 				}
1107 			}
1108 
1109 			zms_sector_advance(fs, &addr);
1110 			/* addr is pointing to the close ATE */
1111 			/* verify if the sector is Open */
1112 			sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1113 			if (sec_closed < 0) {
1114 				rc = sec_closed;
1115 				goto end;
1116 			}
1117 			/* update cycle count */
1118 			fs->sector_cycle = empty_ate.cycle_cnt;
1119 
1120 			if (!sec_closed) {
1121 				/* We found an Open sector following a closed one */
1122 				break;
1123 			}
1124 		}
1125 	}
1126 	/* all sectors are closed, and zms magic number not found. This is not a zms fs */
1127 	if ((closed_sectors == fs->sector_count) && !zms_magic_exist) {
1128 		rc = -EDEADLK;
1129 		goto end;
1130 	}
1131 	/* TODO: add a recovery mechanism here if the ZMS magic number exist but all
1132 	 * sectors are closed
1133 	 */
1134 
1135 	if (i == fs->sector_count) {
1136 		/* none of the sectors were closed, which means that the first
1137 		 * sector is the one in use, except if there are only 2 sectors.
1138 		 * Let's check if the last sector has valid ATEs otherwise set
1139 		 * the open sector to the first one.
1140 		 */
1141 		rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate);
1142 		if (rc) {
1143 			goto end;
1144 		}
1145 		if (!zms_ate_valid(fs, &first_ate)) {
1146 			zms_sector_advance(fs, &addr);
1147 		}
1148 		rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate);
1149 		if (rc) {
1150 			goto end;
1151 		}
1152 
1153 		if (zms_empty_ate_valid(fs, &empty_ate)) {
1154 			/* Empty ATE is valid, let's verify that this is a ZMS storage system */
1155 			if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) {
1156 				zms_magic_exist = true;
1157 				/* Let's check the version */
1158 				if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) {
1159 					LOG_ERR("ZMS Version is not supported");
1160 					rc = -ENOEXEC;
1161 					goto end;
1162 				}
1163 			}
1164 		} else {
1165 			rc = zms_flash_erase_sector(fs, addr);
1166 			if (rc) {
1167 				goto end;
1168 			}
1169 			rc = zms_add_empty_ate(fs, addr);
1170 			if (rc) {
1171 				goto end;
1172 			}
1173 		}
1174 		rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle);
1175 		if (rc == -ENOENT) {
1176 			/* sector never used */
1177 			fs->sector_cycle = 0;
1178 		} else if (rc) {
1179 			/* bad flash read */
1180 			goto end;
1181 		}
1182 	}
1183 
1184 	/* addr contains address of closing ate in the most recent sector,
1185 	 * search for the last valid ate using the recover_last_ate routine
1186 	 * and also update the data_wra
1187 	 */
1188 	rc = zms_recover_last_ate(fs, &addr, &data_wra);
1189 	if (rc) {
1190 		goto end;
1191 	}
1192 
1193 	/* addr contains address of the last valid ate in the most recent sector
1194 	 * data_wra contains the data write address of the current sector
1195 	 */
1196 	fs->ate_wra = addr;
1197 	fs->data_wra = data_wra;
1198 
1199 	/* fs->ate_wra should point to the next available entry. This is normally
1200 	 * the next position after the one found by the recovery function.
1201 	 * Let's verify that it doesn't contain any valid ATE, otherwise search for
1202 	 * an empty position
1203 	 */
1204 	while (fs->ate_wra >= fs->data_wra) {
1205 		rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate);
1206 		if (rc) {
1207 			goto end;
1208 		}
1209 		if (!zms_ate_valid(fs, &last_ate)) {
1210 			/* found empty location */
1211 			break;
1212 		}
1213 
1214 		/* ate on the last position within the sector is
1215 		 * reserved for deletion an entry
1216 		 */
1217 		if ((fs->ate_wra == fs->data_wra) && last_ate.len) {
1218 			/* not a delete ate */
1219 			rc = -ESPIPE;
1220 			goto end;
1221 		}
1222 
1223 		fs->ate_wra -= fs->ate_size;
1224 	}
1225 
1226 	/* The sector after the write sector is either empty with a valid empty ATE (regular case)
1227 	 * or it has never been used or it is a closed sector (GC didn't finish)
1228 	 * If it is a closed sector we must look for a valid GC done ATE in the current write
1229 	 * sector, if it is missing, we need to restart gc because it has been interrupted.
1230 	 * If no valid empty ATE is found then it has never been used. Just erase it by adding
1231 	 * a valid empty ATE.
1232 	 * When gc needs to be restarted, first erase the sector by adding an empty
1233 	 * ATE otherwise the data might not fit into the sector.
1234 	 */
1235 	addr = zms_close_ate_addr(fs, fs->ate_wra);
1236 	zms_sector_advance(fs, &addr);
1237 
1238 	/* verify if the sector is closed */
1239 	sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1240 	if (sec_closed < 0) {
1241 		rc = sec_closed;
1242 		goto end;
1243 	}
1244 
1245 	if (sec_closed == 1) {
1246 		/* The sector after fs->ate_wrt is closed.
1247 		 * Look for a marker (gc_done_ate) that indicates that gc was finished.
1248 		 */
1249 		bool gc_done_marker = false;
1250 		struct zms_ate gc_done_ate;
1251 
1252 		fs->sector_cycle = empty_ate.cycle_cnt;
1253 		addr = fs->ate_wra + fs->ate_size;
1254 		while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) {
1255 			rc = zms_flash_ate_rd(fs, addr, &gc_done_ate);
1256 			if (rc) {
1257 				goto end;
1258 			}
1259 
1260 			if (zms_gc_done_ate_valid(fs, &gc_done_ate)) {
1261 				break;
1262 			}
1263 			addr += fs->ate_size;
1264 		}
1265 
1266 		if (gc_done_marker) {
1267 			/* erase the next sector */
1268 			LOG_INF("GC Done marker found");
1269 			addr = fs->ate_wra & ADDR_SECT_MASK;
1270 			zms_sector_advance(fs, &addr);
1271 			rc = zms_flash_erase_sector(fs, addr);
1272 			if (rc < 0) {
1273 				goto end;
1274 			}
1275 			rc = zms_add_empty_ate(fs, addr);
1276 			goto end;
1277 		}
1278 		LOG_INF("No GC Done marker found: restarting gc");
1279 		rc = zms_flash_erase_sector(fs, fs->ate_wra);
1280 		if (rc) {
1281 			goto end;
1282 		}
1283 		rc = zms_add_empty_ate(fs, fs->ate_wra);
1284 		if (rc) {
1285 			goto end;
1286 		}
1287 
1288 		/* Let's point to the first writable position */
1289 		fs->ate_wra &= ADDR_SECT_MASK;
1290 		fs->ate_wra += (fs->sector_size - 3 * fs->ate_size);
1291 		fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK);
1292 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1293 		/**
1294 		 * At this point, the lookup cache wasn't built but the gc function need to use it.
1295 		 * So, temporarily, we set the lookup cache to the end of the fs.
1296 		 * The cache will be rebuilt afterwards
1297 		 **/
1298 		for (i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) {
1299 			fs->lookup_cache[i] = fs->ate_wra;
1300 		}
1301 #endif
1302 		rc = zms_gc(fs);
1303 		goto end;
1304 	}
1305 
1306 end:
1307 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1308 	if (!rc) {
1309 		rc = zms_lookup_cache_rebuild(fs);
1310 	}
1311 #endif
1312 	/* If the sector is empty add a gc done ate to avoid having insufficient
1313 	 * space when doing gc.
1314 	 */
1315 	if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) {
1316 		rc = zms_add_gc_done_ate(fs);
1317 	}
1318 	k_mutex_unlock(&fs->zms_lock);
1319 
1320 	return rc;
1321 }
1322 
zms_mount(struct zms_fs * fs)1323 int zms_mount(struct zms_fs *fs)
1324 {
1325 	int rc;
1326 	struct flash_pages_info info;
1327 	size_t write_block_size;
1328 
1329 	k_mutex_init(&fs->zms_lock);
1330 
1331 	fs->flash_parameters = flash_get_parameters(fs->flash_device);
1332 	if (fs->flash_parameters == NULL) {
1333 		LOG_ERR("Could not obtain flash parameters");
1334 		return -EINVAL;
1335 	}
1336 
1337 	fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate));
1338 	write_block_size = fs->flash_parameters->write_block_size;
1339 
1340 	/* check that the write block size is supported */
1341 	if (write_block_size > ZMS_BLOCK_SIZE || write_block_size == 0) {
1342 		LOG_ERR("Unsupported write block size");
1343 		return -EINVAL;
1344 	}
1345 
1346 	/* When the device need erase operations before write let's check that
1347 	 * sector size is a multiple of pagesize
1348 	 */
1349 	if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) {
1350 		rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info);
1351 		if (rc) {
1352 			LOG_ERR("Unable to get page info");
1353 			return -EINVAL;
1354 		}
1355 		if (!fs->sector_size || fs->sector_size % info.size) {
1356 			LOG_ERR("Invalid sector size");
1357 			return -EINVAL;
1358 		}
1359 	}
1360 
1361 	/* we need at least 5 aligned ATEs size as the minimum sector size
1362 	 * 1 close ATE, 1 empty ATE, 1 GC done ATE, 1 Delete ATE, 1 ID/Value ATE
1363 	 */
1364 	if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) {
1365 		LOG_ERR("Invalid sector size, should be at least %zu",
1366 			ZMS_MIN_ATE_NUM * fs->ate_size);
1367 	}
1368 
1369 	/* check the number of sectors, it should be at least 2 */
1370 	if (fs->sector_count < 2) {
1371 		LOG_ERR("Configuration error - sector count below minimum requirement (2)");
1372 		return -EINVAL;
1373 	}
1374 
1375 	rc = zms_init(fs);
1376 
1377 	if (rc) {
1378 		return rc;
1379 	}
1380 
1381 	/* zms is ready for use */
1382 	fs->ready = true;
1383 
1384 	LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size);
1385 	LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra));
1386 	LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra));
1387 
1388 	return 0;
1389 }
1390 
zms_write(struct zms_fs * fs,uint32_t id,const void * data,size_t len)1391 ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len)
1392 {
1393 	int rc;
1394 	size_t data_size;
1395 	uint64_t wlk_addr;
1396 	uint64_t rd_addr;
1397 	uint32_t gc_count;
1398 	uint32_t required_space = 0U; /* no space, appropriate for delete ate */
1399 
1400 	if (!fs->ready) {
1401 		LOG_ERR("zms not initialized");
1402 		return -EACCES;
1403 	}
1404 
1405 	data_size = zms_al_size(fs, len);
1406 
1407 	/* The maximum data size is sector size - 5 ate
1408 	 * where: 1 ate for data, 1 ate for sector close, 1 ate for empty,
1409 	 * 1 ate for gc done, and 1 ate to always allow a delete.
1410 	 * We cannot also store more than 64 KB of data
1411 	 */
1412 	if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) ||
1413 	    ((len > 0) && (data == NULL))) {
1414 		return -EINVAL;
1415 	}
1416 
1417 	/* find latest entry with same id */
1418 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1419 	wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)];
1420 
1421 	if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
1422 		goto no_cached_entry;
1423 	}
1424 #else
1425 	wlk_addr = fs->ate_wra;
1426 #endif
1427 	rd_addr = wlk_addr;
1428 
1429 #ifdef CONFIG_ZMS_NO_DOUBLE_WRITE
1430 	/* Search for a previous valid ATE with the same ID */
1431 	struct zms_ate wlk_ate;
1432 	int prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr);
1433 	if (prev_found < 0) {
1434 		return prev_found;
1435 	}
1436 
1437 	if (prev_found) {
1438 		/* previous entry found */
1439 		if (len > ZMS_DATA_IN_ATE_SIZE) {
1440 			rd_addr &= ADDR_SECT_MASK;
1441 			rd_addr += wlk_ate.offset;
1442 		}
1443 
1444 		if (len == 0) {
1445 			/* do not try to compare with empty data */
1446 			if (wlk_ate.len == 0U) {
1447 				/* skip delete entry as it is already the
1448 				 * last one
1449 				 */
1450 				return 0;
1451 			}
1452 		} else if (len == wlk_ate.len) {
1453 			/* do not try to compare if lengths are not equal */
1454 			/* compare the data and if equal return 0 */
1455 			if (len <= ZMS_DATA_IN_ATE_SIZE) {
1456 				rc = memcmp(&wlk_ate.data, data, len);
1457 				if (!rc) {
1458 					return 0;
1459 				}
1460 			} else {
1461 				rc = zms_flash_block_cmp(fs, rd_addr, data, len);
1462 				if (rc <= 0) {
1463 					return rc;
1464 				}
1465 			}
1466 		}
1467 	} else {
1468 		/* skip delete entry for non-existing entry */
1469 		if (len == 0) {
1470 			return 0;
1471 		}
1472 	}
1473 #endif
1474 
1475 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1476 no_cached_entry:
1477 #endif
1478 	/* calculate required space if the entry contains data */
1479 	if (data_size) {
1480 		/* Leave space for delete ate */
1481 		if (len > ZMS_DATA_IN_ATE_SIZE) {
1482 			required_space = data_size + fs->ate_size;
1483 		} else {
1484 			required_space = fs->ate_size;
1485 		}
1486 	}
1487 
1488 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1489 
1490 	gc_count = 0;
1491 	while (1) {
1492 		if (gc_count == fs->sector_count) {
1493 			/* gc'ed all sectors, no extra space will be created
1494 			 * by extra gc.
1495 			 */
1496 			rc = -ENOSPC;
1497 			goto end;
1498 		}
1499 
1500 		/* We need to make sure that we leave the ATE at address 0x0 of the sector
1501 		 * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented
1502 		 * after this write by ate_size and it will underflow.
1503 		 * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs
1504 		 * and the second position could be written only be a delete ATE.
1505 		 */
1506 		if ((SECTOR_OFFSET(fs->ate_wra)) &&
1507 		    (fs->ate_wra >= (fs->data_wra + required_space)) &&
1508 		    (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) {
1509 			rc = zms_flash_write_entry(fs, id, data, len);
1510 			if (rc) {
1511 				goto end;
1512 			}
1513 			break;
1514 		}
1515 		rc = zms_sector_close(fs);
1516 		if (rc) {
1517 			LOG_ERR("Failed to close the sector, returned = %d", rc);
1518 			goto end;
1519 		}
1520 		rc = zms_gc(fs);
1521 		if (rc) {
1522 			LOG_ERR("Garbage collection failed, returned = %d", rc);
1523 			goto end;
1524 		}
1525 		gc_count++;
1526 	}
1527 	rc = len;
1528 end:
1529 	k_mutex_unlock(&fs->zms_lock);
1530 	return rc;
1531 }
1532 
zms_delete(struct zms_fs * fs,uint32_t id)1533 int zms_delete(struct zms_fs *fs, uint32_t id)
1534 {
1535 	return zms_write(fs, id, NULL, 0);
1536 }
1537 
zms_read_hist(struct zms_fs * fs,uint32_t id,void * data,size_t len,uint32_t cnt)1538 ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt)
1539 {
1540 	int rc;
1541 	int prev_found = 0;
1542 	uint64_t wlk_addr;
1543 	uint64_t rd_addr = 0;
1544 	uint64_t wlk_prev_addr = 0;
1545 	uint32_t cnt_his;
1546 	struct zms_ate wlk_ate;
1547 #ifdef CONFIG_ZMS_DATA_CRC
1548 	uint32_t computed_data_crc;
1549 #endif
1550 
1551 	if (!fs->ready) {
1552 		LOG_ERR("zms not initialized");
1553 		return -EACCES;
1554 	}
1555 
1556 	cnt_his = 0U;
1557 
1558 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1559 	wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)];
1560 
1561 	if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
1562 		rc = -ENOENT;
1563 		goto err;
1564 	}
1565 #else
1566 	wlk_addr = fs->ate_wra;
1567 #endif
1568 
1569 	while (cnt_his <= cnt) {
1570 		wlk_prev_addr = wlk_addr;
1571 		/* Search for a previous valid ATE with the same ID */
1572 		prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate,
1573 						  &wlk_prev_addr);
1574 		if (prev_found < 0) {
1575 			return prev_found;
1576 		}
1577 		if (prev_found) {
1578 			cnt_his++;
1579 			/* wlk_prev_addr contain the ATE address of the previous found ATE. */
1580 			rd_addr = wlk_prev_addr;
1581 			/*
1582 			 * compute the previous ATE address in case we need to start
1583 			 * the research again.
1584 			 */
1585 			rc = zms_compute_prev_addr(fs, &wlk_prev_addr);
1586 			if (rc) {
1587 				return rc;
1588 			}
1589 			/* wlk_addr will be the start research address in the next loop */
1590 			wlk_addr = wlk_prev_addr;
1591 		} else {
1592 			break;
1593 		}
1594 	}
1595 
1596 	if (((!prev_found) || (wlk_ate.id != id)) || (wlk_ate.len == 0U) || (cnt_his < cnt)) {
1597 		return -ENOENT;
1598 	}
1599 
1600 	if (wlk_ate.len <= ZMS_DATA_IN_ATE_SIZE) {
1601 		/* data is stored in the ATE */
1602 		if (data) {
1603 			memcpy(data, &wlk_ate.data, MIN(len, wlk_ate.len));
1604 		}
1605 	} else {
1606 		rd_addr &= ADDR_SECT_MASK;
1607 		rd_addr += wlk_ate.offset;
1608 		/* do not read or copy data if pointer is NULL */
1609 		if (data) {
1610 			rc = zms_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len));
1611 			if (rc) {
1612 				goto err;
1613 			}
1614 		}
1615 #ifdef CONFIG_ZMS_DATA_CRC
1616 		/* Do not compute CRC for partial reads as CRC won't match */
1617 		if (len >= wlk_ate.len) {
1618 			computed_data_crc = crc32_ieee(data, wlk_ate.len);
1619 			if (computed_data_crc != wlk_ate.data_crc) {
1620 				LOG_ERR("Invalid data CRC: ATE_CRC=0x%08X, "
1621 					"computed_data_crc=0x%08X",
1622 					wlk_ate.data_crc, computed_data_crc);
1623 				return -EIO;
1624 			}
1625 		}
1626 #endif
1627 	}
1628 
1629 	return wlk_ate.len;
1630 
1631 err:
1632 	return rc;
1633 }
1634 
zms_read(struct zms_fs * fs,uint32_t id,void * data,size_t len)1635 ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len)
1636 {
1637 	int rc;
1638 
1639 	rc = zms_read_hist(fs, id, data, len, 0);
1640 	if (rc < 0) {
1641 		return rc;
1642 	}
1643 
1644 	/* returns the minimum between ATE data length and requested length */
1645 	return MIN(rc, len);
1646 }
1647 
zms_get_data_length(struct zms_fs * fs,uint32_t id)1648 ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id)
1649 {
1650 	int rc;
1651 
1652 	rc = zms_read_hist(fs, id, NULL, 0, 0);
1653 
1654 	return rc;
1655 }
1656 
zms_calc_free_space(struct zms_fs * fs)1657 ssize_t zms_calc_free_space(struct zms_fs *fs)
1658 {
1659 	int rc;
1660 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
1661 	int prev_found = 0;
1662 	int sec_closed;
1663 	struct zms_ate step_ate;
1664 	struct zms_ate wlk_ate;
1665 	struct zms_ate empty_ate;
1666 	struct zms_ate close_ate;
1667 	uint64_t step_addr;
1668 	uint64_t wlk_addr;
1669 	uint64_t step_prev_addr;
1670 	uint64_t wlk_prev_addr;
1671 	uint64_t data_wra = 0U;
1672 	uint8_t current_cycle;
1673 	ssize_t free_space = 0;
1674 	const uint32_t second_to_last_offset = (2 * fs->ate_size);
1675 
1676 	if (!fs->ready) {
1677 		LOG_ERR("zms not initialized");
1678 		return -EACCES;
1679 	}
1680 
1681 	/*
1682 	 * There is always a closing ATE , an empty ATE, a GC_done ATE and a reserved ATE for
1683 	 * deletion in each sector.
1684 	 * And there is always one reserved Sector for garbage collection operations
1685 	 */
1686 	free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size);
1687 
1688 	step_addr = fs->ate_wra;
1689 
1690 	do {
1691 		step_prev_addr = step_addr;
1692 		rc = zms_prev_ate(fs, &step_addr, &step_ate);
1693 		if (rc) {
1694 			return rc;
1695 		}
1696 
1697 		/* When changing the sector let's get the new cycle counter */
1698 		rc = zms_get_cycle_on_sector_change(fs, step_prev_addr, previous_sector_num,
1699 						    &current_cycle);
1700 		if (rc) {
1701 			return rc;
1702 		}
1703 		previous_sector_num = SECTOR_NUM(step_prev_addr);
1704 
1705 		/* Invalid and deleted ATEs are free spaces.
1706 		 * Header ATEs are already retrieved from free space
1707 		 */
1708 		if (!zms_ate_valid_different_sector(fs, &step_ate, current_cycle) ||
1709 		    (step_ate.id == ZMS_HEAD_ID) || (step_ate.len == 0)) {
1710 			continue;
1711 		}
1712 
1713 		wlk_addr = step_addr;
1714 		/* Try to find if there is a previous valid ATE with same ID */
1715 		prev_found = zms_find_ate_with_id(fs, step_ate.id, wlk_addr, step_addr, &wlk_ate,
1716 						  &wlk_prev_addr);
1717 		if (prev_found < 0) {
1718 			return prev_found;
1719 		}
1720 
1721 		/* If no previous ATE is found, then this is a valid ATE that cannot be
1722 		 * Garbage Collected
1723 		 */
1724 		if (!prev_found || (wlk_prev_addr == step_prev_addr)) {
1725 			if (step_ate.len > ZMS_DATA_IN_ATE_SIZE) {
1726 				free_space -= zms_al_size(fs, step_ate.len);
1727 			}
1728 			free_space -= fs->ate_size;
1729 		}
1730 	} while (step_addr != fs->ate_wra);
1731 
1732 	/* we must keep the sector_cycle before we start looking into special cases */
1733 	current_cycle = fs->sector_cycle;
1734 
1735 	/* Let's look now for special cases where some sectors have only ATEs with
1736 	 * small data size.
1737 	 */
1738 
1739 	for (int i = 0; i < fs->sector_count; i++) {
1740 		step_addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT));
1741 
1742 		/* verify if the sector is closed */
1743 		sec_closed = zms_validate_closed_sector(fs, step_addr, &empty_ate, &close_ate);
1744 		if (sec_closed < 0) {
1745 			return sec_closed;
1746 		}
1747 
1748 		/* If the sector is closed and its offset is pointing to a position less than the
1749 		 * 3rd to last ATE position in a sector, it means that we need to leave the second
1750 		 * to last ATE empty.
1751 		 */
1752 		if ((sec_closed == 1) && (close_ate.offset <= second_to_last_offset)) {
1753 			free_space -= fs->ate_size;
1754 		} else if (!sec_closed) {
1755 			/* sector is open, let's recover the last ATE */
1756 			fs->sector_cycle = empty_ate.cycle_cnt;
1757 			rc = zms_recover_last_ate(fs, &step_addr, &data_wra);
1758 			if (rc) {
1759 				return rc;
1760 			}
1761 			if (SECTOR_OFFSET(step_addr) <= second_to_last_offset) {
1762 				free_space -= fs->ate_size;
1763 			}
1764 		}
1765 	}
1766 	/* restore sector cycle */
1767 	fs->sector_cycle = current_cycle;
1768 
1769 	return free_space;
1770 }
1771 
zms_active_sector_free_space(struct zms_fs * fs)1772 size_t zms_active_sector_free_space(struct zms_fs *fs)
1773 {
1774 	if (!fs->ready) {
1775 		LOG_ERR("ZMS not initialized");
1776 		return -EACCES;
1777 	}
1778 
1779 	return fs->ate_wra - fs->data_wra - fs->ate_size;
1780 }
1781 
zms_sector_use_next(struct zms_fs * fs)1782 int zms_sector_use_next(struct zms_fs *fs)
1783 {
1784 	int ret;
1785 
1786 	if (!fs->ready) {
1787 		LOG_ERR("ZMS not initialized");
1788 		return -EACCES;
1789 	}
1790 
1791 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1792 
1793 	ret = zms_sector_close(fs);
1794 	if (ret != 0) {
1795 		goto end;
1796 	}
1797 
1798 	ret = zms_gc(fs);
1799 
1800 end:
1801 	k_mutex_unlock(&fs->zms_lock);
1802 	return ret;
1803 }
1804