1 /* Copyright (c) 2018 Laczen
2  * Copyright (c) 2024 BayLibre SAS
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * ZMS: Zephyr Memory Storage
7  */
8 
9 #include <string.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <zephyr/fs/zms.h>
13 #include <zephyr/sys/crc.h>
14 #include "zms_priv.h"
15 
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL);
18 
19 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate);
20 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry);
21 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt);
22 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
23 				 struct zms_ate *close_ate);
24 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry,
25 					  uint8_t cycle_cnt);
26 
27 #ifdef CONFIG_ZMS_LOOKUP_CACHE
28 
zms_lookup_cache_pos(uint32_t id)29 static inline size_t zms_lookup_cache_pos(uint32_t id)
30 {
31 	uint32_t hash;
32 
33 	/* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */
34 	hash = id;
35 	hash ^= hash >> 16;
36 	hash *= 0x7feb352dU;
37 	hash ^= hash >> 15;
38 	hash *= 0x846ca68bU;
39 	hash ^= hash >> 16;
40 
41 	return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE;
42 }
43 
zms_lookup_cache_rebuild(struct zms_fs * fs)44 static int zms_lookup_cache_rebuild(struct zms_fs *fs)
45 {
46 	int rc;
47 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
48 	uint64_t addr;
49 	uint64_t ate_addr;
50 	uint64_t *cache_entry;
51 	uint8_t current_cycle;
52 	struct zms_ate ate;
53 
54 	memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache));
55 	addr = fs->ate_wra;
56 
57 	while (true) {
58 		/* Make a copy of 'addr' as it will be advanced by zms_prev_ate() */
59 		ate_addr = addr;
60 		rc = zms_prev_ate(fs, &addr, &ate);
61 
62 		if (rc) {
63 			return rc;
64 		}
65 
66 		cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)];
67 
68 		if (ate.id != ZMS_HEAD_ID && *cache_entry == ZMS_LOOKUP_CACHE_NO_ADDR) {
69 			/* read the ate cycle only when we change the sector
70 			 * or if it is the first read
71 			 */
72 			if (SECTOR_NUM(ate_addr) != previous_sector_num) {
73 				rc = zms_get_sector_cycle(fs, ate_addr, &current_cycle);
74 				if (rc == -ENOENT) {
75 					/* sector never used */
76 					current_cycle = 0;
77 				} else if (rc) {
78 					/* bad flash read */
79 					return rc;
80 				}
81 			}
82 			if (zms_ate_valid_different_sector(fs, &ate, current_cycle)) {
83 				*cache_entry = ate_addr;
84 			}
85 			previous_sector_num = SECTOR_NUM(ate_addr);
86 		}
87 
88 		if (addr == fs->ate_wra) {
89 			break;
90 		}
91 	}
92 
93 	return 0;
94 }
95 
zms_lookup_cache_invalidate(struct zms_fs * fs,uint32_t sector)96 static void zms_lookup_cache_invalidate(struct zms_fs *fs, uint32_t sector)
97 {
98 	uint64_t *cache_entry = fs->lookup_cache;
99 	uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE];
100 
101 	for (; cache_entry < cache_end; ++cache_entry) {
102 		if (SECTOR_NUM(*cache_entry) == sector) {
103 			*cache_entry = ZMS_LOOKUP_CACHE_NO_ADDR;
104 		}
105 	}
106 }
107 
108 #endif /* CONFIG_ZMS_LOOKUP_CACHE */
109 
110 /* Helper to compute offset given the address */
zms_addr_to_offset(struct zms_fs * fs,uint64_t addr)111 static inline off_t zms_addr_to_offset(struct zms_fs *fs, uint64_t addr)
112 {
113 	return fs->offset + (fs->sector_size * SECTOR_NUM(addr)) + SECTOR_OFFSET(addr);
114 }
115 
116 /* Helper to round down len to the closest multiple of write_block_size  */
zms_round_down_write_block_size(struct zms_fs * fs,size_t len)117 static inline size_t zms_round_down_write_block_size(struct zms_fs *fs, size_t len)
118 {
119 	return len & ~(fs->flash_parameters->write_block_size - 1U);
120 }
121 
122 /* Helper to round up len to multiple of write_block_size */
zms_round_up_write_block_size(struct zms_fs * fs,size_t len)123 static inline size_t zms_round_up_write_block_size(struct zms_fs *fs, size_t len)
124 {
125 	return (len + (fs->flash_parameters->write_block_size - 1U)) &
126 	       ~(fs->flash_parameters->write_block_size - 1U);
127 }
128 
129 /* zms_al_size returns size aligned to fs->write_block_size */
zms_al_size(struct zms_fs * fs,size_t len)130 static inline size_t zms_al_size(struct zms_fs *fs, size_t len)
131 {
132 	size_t write_block_size = fs->flash_parameters->write_block_size;
133 
134 	if (write_block_size <= 1U) {
135 		return len;
136 	}
137 
138 	return zms_round_up_write_block_size(fs, len);
139 }
140 
141 /* Helper to get empty ATE address */
zms_empty_ate_addr(struct zms_fs * fs,uint64_t addr)142 static inline uint64_t zms_empty_ate_addr(struct zms_fs *fs, uint64_t addr)
143 {
144 	return (addr & ADDR_SECT_MASK) + fs->sector_size - fs->ate_size;
145 }
146 
147 /* Helper to get close ATE address */
zms_close_ate_addr(struct zms_fs * fs,uint64_t addr)148 static inline uint64_t zms_close_ate_addr(struct zms_fs *fs, uint64_t addr)
149 {
150 	return (addr & ADDR_SECT_MASK) + fs->sector_size - 2 * fs->ate_size;
151 }
152 
153 /* Aligned memory write */
zms_flash_al_wrt(struct zms_fs * fs,uint64_t addr,const void * data,size_t len)154 static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len)
155 {
156 	const uint8_t *data8 = (const uint8_t *)data;
157 	int rc = 0;
158 	off_t offset;
159 	size_t blen;
160 	uint8_t buf[ZMS_BLOCK_SIZE];
161 
162 	if (!len) {
163 		/* Nothing to write, avoid changing the flash protection */
164 		return 0;
165 	}
166 
167 	offset = zms_addr_to_offset(fs, addr);
168 
169 	blen = zms_round_down_write_block_size(fs, len);
170 	if (blen > 0) {
171 		rc = flash_write(fs->flash_device, offset, data8, blen);
172 		if (rc) {
173 			/* flash write error */
174 			goto end;
175 		}
176 		len -= blen;
177 		offset += blen;
178 		data8 += blen;
179 	}
180 	if (len) {
181 		memcpy(buf, data8, len);
182 		(void)memset(buf + len, fs->flash_parameters->erase_value,
183 			     fs->flash_parameters->write_block_size - len);
184 
185 		rc = flash_write(fs->flash_device, offset, buf,
186 				 fs->flash_parameters->write_block_size);
187 	}
188 
189 end:
190 	return rc;
191 }
192 
193 /* basic flash read from zms address */
zms_flash_rd(struct zms_fs * fs,uint64_t addr,void * data,size_t len)194 static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len)
195 {
196 	off_t offset;
197 
198 	offset = zms_addr_to_offset(fs, addr);
199 
200 	return flash_read(fs->flash_device, offset, data, len);
201 }
202 
203 /* allocation entry write */
zms_flash_ate_wrt(struct zms_fs * fs,const struct zms_ate * entry)204 static int zms_flash_ate_wrt(struct zms_fs *fs, const struct zms_ate *entry)
205 {
206 	int rc;
207 
208 	rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate));
209 	if (rc) {
210 		goto end;
211 	}
212 #ifdef CONFIG_ZMS_LOOKUP_CACHE
213 	/* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */
214 	if (entry->id != ZMS_HEAD_ID) {
215 		fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra;
216 	}
217 #endif
218 	fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate));
219 end:
220 	return rc;
221 }
222 
223 /* data write */
zms_flash_data_wrt(struct zms_fs * fs,const void * data,size_t len)224 static int zms_flash_data_wrt(struct zms_fs *fs, const void *data, size_t len)
225 {
226 	int rc;
227 
228 	rc = zms_flash_al_wrt(fs, fs->data_wra, data, len);
229 	if (rc < 0) {
230 		return rc;
231 	}
232 	fs->data_wra += zms_al_size(fs, len);
233 
234 	return 0;
235 }
236 
237 /* flash ate read */
zms_flash_ate_rd(struct zms_fs * fs,uint64_t addr,struct zms_ate * entry)238 static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry)
239 {
240 	return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate));
241 }
242 
243 /* zms_flash_block_cmp compares the data in flash at addr to data
244  * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size
245  * returns 0 if equal, 1 if not equal, errcode if error
246  */
zms_flash_block_cmp(struct zms_fs * fs,uint64_t addr,const void * data,size_t len)247 static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len)
248 {
249 	const uint8_t *data8 = (const uint8_t *)data;
250 	int rc;
251 	size_t bytes_to_cmp;
252 	size_t block_size;
253 	uint8_t buf[ZMS_BLOCK_SIZE];
254 
255 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
256 
257 	while (len) {
258 		bytes_to_cmp = MIN(block_size, len);
259 		rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp);
260 		if (rc) {
261 			return rc;
262 		}
263 		rc = memcmp(data8, buf, bytes_to_cmp);
264 		if (rc) {
265 			return 1;
266 		}
267 		len -= bytes_to_cmp;
268 		addr += bytes_to_cmp;
269 		data8 += bytes_to_cmp;
270 	}
271 	return 0;
272 }
273 
274 /* zms_flash_cmp_const compares the data in flash at addr to a constant
275  * value. returns 0 if all data in flash is equal to value, 1 if not equal,
276  * errcode if error
277  */
zms_flash_cmp_const(struct zms_fs * fs,uint64_t addr,uint8_t value,size_t len)278 static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len)
279 {
280 	int rc;
281 	size_t bytes_to_cmp;
282 	size_t block_size;
283 	uint8_t cmp[ZMS_BLOCK_SIZE];
284 
285 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
286 
287 	(void)memset(cmp, value, block_size);
288 	while (len) {
289 		bytes_to_cmp = MIN(block_size, len);
290 		rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
291 		if (rc) {
292 			return rc;
293 		}
294 		len -= bytes_to_cmp;
295 		addr += bytes_to_cmp;
296 	}
297 	return 0;
298 }
299 
300 /* flash block move: move a block at addr to the current data write location
301  * and updates the data write location.
302  */
zms_flash_block_move(struct zms_fs * fs,uint64_t addr,size_t len)303 static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len)
304 {
305 	int rc;
306 	size_t bytes_to_copy;
307 	size_t block_size;
308 	uint8_t buf[ZMS_BLOCK_SIZE];
309 
310 	block_size = zms_round_down_write_block_size(fs, ZMS_BLOCK_SIZE);
311 
312 	while (len) {
313 		bytes_to_copy = MIN(block_size, len);
314 		rc = zms_flash_rd(fs, addr, buf, bytes_to_copy);
315 		if (rc) {
316 			return rc;
317 		}
318 		rc = zms_flash_data_wrt(fs, buf, bytes_to_copy);
319 		if (rc) {
320 			return rc;
321 		}
322 		len -= bytes_to_copy;
323 		addr += bytes_to_copy;
324 	}
325 	return 0;
326 }
327 
328 /* erase a sector and verify erase was OK.
329  * return 0 if OK, errorcode on error.
330  */
zms_flash_erase_sector(struct zms_fs * fs,uint64_t addr)331 static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr)
332 {
333 	int rc;
334 	off_t offset;
335 	bool ebw_required =
336 		flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT;
337 
338 	if (!ebw_required) {
339 		/* Do nothing for devices that do not have erase capability */
340 		return 0;
341 	}
342 
343 	addr &= ADDR_SECT_MASK;
344 	offset = zms_addr_to_offset(fs, addr);
345 
346 	LOG_DBG("Erasing flash at offset 0x%lx ( 0x%llx ), len %u", (long)offset, addr,
347 		fs->sector_size);
348 
349 #ifdef CONFIG_ZMS_LOOKUP_CACHE
350 	zms_lookup_cache_invalidate(fs, SECTOR_NUM(addr));
351 #endif
352 	rc = flash_erase(fs->flash_device, offset, fs->sector_size);
353 
354 	if (rc) {
355 		return rc;
356 	}
357 
358 	if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) {
359 		LOG_ERR("Failure while erasing the sector at offset 0x%lx", (long)offset);
360 		rc = -ENXIO;
361 	}
362 
363 	return rc;
364 }
365 
366 /* crc update on allocation entry */
zms_ate_crc8_update(struct zms_ate * entry)367 static void zms_ate_crc8_update(struct zms_ate *entry)
368 {
369 	uint8_t crc8;
370 
371 	/* crc8 field is the first element of the structure, do not include it */
372 	crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8),
373 			  sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8));
374 	entry->crc8 = crc8;
375 }
376 
377 /* crc check on allocation entry
378  * returns 0 if OK, 1 on crc fail
379  */
zms_ate_crc8_check(const struct zms_ate * entry)380 static int zms_ate_crc8_check(const struct zms_ate *entry)
381 {
382 	uint8_t crc8;
383 
384 	/* crc8 field is the first element of the structure, do not include it */
385 	crc8 = crc8_ccitt(0xff, (uint8_t *)entry + SIZEOF_FIELD(struct zms_ate, crc8),
386 			  sizeof(struct zms_ate) - SIZEOF_FIELD(struct zms_ate, crc8));
387 	if (crc8 == entry->crc8) {
388 		return 0;
389 	}
390 
391 	return 1;
392 }
393 
394 /* zms_ate_valid validates an ate in the current sector by checking if the ate crc is valid
395  * and its cycle cnt matches the cycle cnt of the active sector
396  *
397  * return 1 if ATE is valid,
398  *        0 otherwise
399  *
400  * see: zms_ate_valid_different_sector
401  */
zms_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)402 static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
403 {
404 	return zms_ate_valid_different_sector(fs, entry, fs->sector_cycle);
405 }
406 
407 /* zms_ate_valid_different_sector validates an ate that is in a different
408  * sector than the active one. It takes as argument the cycle_cnt of the
409  * sector where the ATE to be validated is stored
410  *     return 1 if crc8 and cycle_cnt are valid,
411  *            0 otherwise
412  */
zms_ate_valid_different_sector(struct zms_fs * fs,const struct zms_ate * entry,uint8_t cycle_cnt)413 static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry,
414 					  uint8_t cycle_cnt)
415 {
416 	if ((cycle_cnt != entry->cycle_cnt) || zms_ate_crc8_check(entry)) {
417 		return 0;
418 	}
419 
420 	return 1;
421 }
422 
zms_get_cycle_on_sector_change(struct zms_fs * fs,uint64_t addr,int previous_sector_num,uint8_t * cycle_cnt)423 static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr,
424 						 int previous_sector_num, uint8_t *cycle_cnt)
425 {
426 	int rc;
427 
428 	/* read the ate cycle only when we change the sector
429 	 * or if it is the first read
430 	 */
431 	if (SECTOR_NUM(addr) != previous_sector_num) {
432 		rc = zms_get_sector_cycle(fs, addr, cycle_cnt);
433 		if (rc == -ENOENT) {
434 			/* sector never used */
435 			*cycle_cnt = 0;
436 		} else if (rc) {
437 			/* bad flash read */
438 			return rc;
439 		}
440 	}
441 
442 	return 0;
443 }
444 
445 /* zms_close_ate_valid validates a sector close ate.
446  * A valid sector close ate should be:
447  * - a valid ate
448  * - with len = 0 and id = ZMS_HEAD_ID
449  * - and offset points to location at ate multiple from sector size
450  * return true if valid, false otherwise
451  */
zms_close_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)452 static bool zms_close_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
453 {
454 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) &&
455 		(entry->id == ZMS_HEAD_ID) && !((fs->sector_size - entry->offset) % fs->ate_size));
456 }
457 
458 /* zms_empty_ate_valid validates an sector empty ate.
459  * A valid sector empty ate should be:
460  * - a valid ate
461  * - with len = 0xffff and id = 0xffffffff
462  * return true if valid, false otherwise
463  */
zms_empty_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)464 static bool zms_empty_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
465 {
466 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) &&
467 		(entry->len == 0xffff) && (entry->id == ZMS_HEAD_ID));
468 }
469 
470 /* zms_gc_done_ate_valid validates a garbage collector done ATE
471  * Valid gc_done_ate:
472  * - valid ate
473  * - len = 0
474  * - id = 0xffffffff
475  * return true if valid, false otherwise
476  */
zms_gc_done_ate_valid(struct zms_fs * fs,const struct zms_ate * entry)477 static bool zms_gc_done_ate_valid(struct zms_fs *fs, const struct zms_ate *entry)
478 {
479 	return (zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt) && (!entry->len) &&
480 		(entry->id == ZMS_HEAD_ID));
481 }
482 
483 /* Read empty and close ATE of the sector where belongs address "addr" and
484  * validates that the sector is closed.
485  * retval: 0 if sector is not close
486  * retval: 1 is sector is closed
487  * retval: < 0 if read of the header failed.
488  */
zms_validate_closed_sector(struct zms_fs * fs,uint64_t addr,struct zms_ate * empty_ate,struct zms_ate * close_ate)489 static int zms_validate_closed_sector(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
490 				      struct zms_ate *close_ate)
491 {
492 	int rc;
493 
494 	/* read the header ATEs */
495 	rc = zms_get_sector_header(fs, addr, empty_ate, close_ate);
496 	if (rc) {
497 		return rc;
498 	}
499 
500 	if (zms_empty_ate_valid(fs, empty_ate) && zms_close_ate_valid(fs, close_ate) &&
501 	    (empty_ate->cycle_cnt == close_ate->cycle_cnt)) {
502 		/* Closed sector validated */
503 		return 1;
504 	}
505 
506 	return 0;
507 }
508 
509 /* store an entry in flash */
zms_flash_write_entry(struct zms_fs * fs,uint32_t id,const void * data,size_t len)510 static int zms_flash_write_entry(struct zms_fs *fs, uint32_t id, const void *data, size_t len)
511 {
512 	int rc;
513 	struct zms_ate entry;
514 
515 	/* Initialize all members to 0 */
516 	memset(&entry, 0, sizeof(struct zms_ate));
517 
518 	entry.id = id;
519 	entry.len = (uint16_t)len;
520 	entry.cycle_cnt = fs->sector_cycle;
521 
522 	if (len > ZMS_DATA_IN_ATE_SIZE) {
523 		/* only compute CRC if len is greater than 8 bytes */
524 		if (IS_ENABLED(CONFIG_ZMS_DATA_CRC)) {
525 			entry.data_crc = crc32_ieee(data, len);
526 		}
527 		entry.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
528 	} else if ((len > 0) && (len <= ZMS_DATA_IN_ATE_SIZE)) {
529 		/* Copy data into entry for small data ( < 8B) */
530 		memcpy(&entry.data, data, len);
531 	}
532 
533 	zms_ate_crc8_update(&entry);
534 
535 	if (len > ZMS_DATA_IN_ATE_SIZE) {
536 		rc = zms_flash_data_wrt(fs, data, len);
537 		if (rc) {
538 			return rc;
539 		}
540 	}
541 
542 	rc = zms_flash_ate_wrt(fs, &entry);
543 	if (rc) {
544 		return rc;
545 	}
546 
547 	return 0;
548 }
549 
550 /* end of flash routines */
551 
552 /* Search for the last valid ATE written in a sector and also update data write address
553  */
zms_recover_last_ate(struct zms_fs * fs,uint64_t * addr,uint64_t * data_wra)554 static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr, uint64_t *data_wra)
555 {
556 	uint64_t data_end_addr;
557 	uint64_t ate_end_addr;
558 	struct zms_ate end_ate;
559 	int rc;
560 
561 	LOG_DBG("Recovering last ate from sector %llu", SECTOR_NUM(*addr));
562 
563 	/* skip close and empty ATE */
564 	*addr -= 2 * fs->ate_size;
565 
566 	ate_end_addr = *addr;
567 	data_end_addr = *addr & ADDR_SECT_MASK;
568 	/* Initialize the data_wra to the first address of the sector */
569 	*data_wra = data_end_addr;
570 
571 	while (ate_end_addr > data_end_addr) {
572 		rc = zms_flash_ate_rd(fs, ate_end_addr, &end_ate);
573 		if (rc) {
574 			return rc;
575 		}
576 		if (zms_ate_valid(fs, &end_ate)) {
577 			/* found a valid ate, update data_end_addr and *addr */
578 			data_end_addr &= ADDR_SECT_MASK;
579 			if (end_ate.len > ZMS_DATA_IN_ATE_SIZE) {
580 				data_end_addr += end_ate.offset + zms_al_size(fs, end_ate.len);
581 				*data_wra = data_end_addr;
582 			}
583 			*addr = ate_end_addr;
584 		}
585 		ate_end_addr -= fs->ate_size;
586 	}
587 
588 	return 0;
589 }
590 
591 /* compute previous addr of ATE */
zms_compute_prev_addr(struct zms_fs * fs,uint64_t * addr)592 static int zms_compute_prev_addr(struct zms_fs *fs, uint64_t *addr)
593 {
594 	int sec_closed;
595 	struct zms_ate empty_ate;
596 	struct zms_ate close_ate;
597 
598 	*addr += fs->ate_size;
599 	if ((SECTOR_OFFSET(*addr)) != (fs->sector_size - 2 * fs->ate_size)) {
600 		return 0;
601 	}
602 
603 	/* last ate in sector, do jump to previous sector */
604 	if (SECTOR_NUM(*addr) == 0U) {
605 		*addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT);
606 	} else {
607 		*addr -= (1ULL << ADDR_SECT_SHIFT);
608 	}
609 
610 	/* verify if the sector is closed */
611 	sec_closed = zms_validate_closed_sector(fs, *addr, &empty_ate, &close_ate);
612 	if (sec_closed < 0) {
613 		return sec_closed;
614 	}
615 
616 	/* Non Closed Sector */
617 	if (!sec_closed) {
618 		/* at the end of filesystem */
619 		*addr = fs->ate_wra;
620 		return 0;
621 	}
622 
623 	/* Update the address here because the header ATEs are valid.*/
624 	(*addr) &= ADDR_SECT_MASK;
625 	(*addr) += close_ate.offset;
626 
627 	return 0;
628 }
629 
630 /* walking through allocation entry list, from newest to oldest entries
631  * read ate from addr, modify addr to the previous ate
632  */
zms_prev_ate(struct zms_fs * fs,uint64_t * addr,struct zms_ate * ate)633 static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate)
634 {
635 	int rc;
636 
637 	rc = zms_flash_ate_rd(fs, *addr, ate);
638 	if (rc) {
639 		return rc;
640 	}
641 
642 	return zms_compute_prev_addr(fs, addr);
643 }
644 
zms_sector_advance(struct zms_fs * fs,uint64_t * addr)645 static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr)
646 {
647 	*addr += (1ULL << ADDR_SECT_SHIFT);
648 	if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) {
649 		*addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT);
650 	}
651 }
652 
653 /* allocation entry close (this closes the current sector) by writing offset
654  * of last ate to the sector end.
655  */
zms_sector_close(struct zms_fs * fs)656 static int zms_sector_close(struct zms_fs *fs)
657 {
658 	int rc;
659 	struct zms_ate close_ate;
660 	struct zms_ate garbage_ate;
661 
662 	close_ate.id = ZMS_HEAD_ID;
663 	close_ate.len = 0U;
664 	close_ate.offset = (uint32_t)SECTOR_OFFSET(fs->ate_wra + fs->ate_size);
665 	close_ate.metadata = 0xffffffff;
666 	close_ate.cycle_cnt = fs->sector_cycle;
667 
668 	/* When we close the sector, we must write all non used ATE with
669 	 * a non valid (Junk) ATE.
670 	 * This is needed to avoid some corner cases where some ATEs are
671 	 * not overwritten and become valid when the cycle counter wrap again
672 	 * to the same cycle counter of the old ATE.
673 	 * Example :
674 	 * - An ATE.cycl_cnt == 0 is written as last ATE of the sector
675 	   - This ATE was never overwritten in the next 255 cycles because of
676 	     large data size
677 	   - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes
678 	     valid even if it is not the case.
679 	 */
680 	memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate));
681 	while (SECTOR_OFFSET(fs->ate_wra) && (fs->ate_wra >= fs->data_wra)) {
682 		rc = zms_flash_ate_wrt(fs, &garbage_ate);
683 		if (rc) {
684 			return rc;
685 		}
686 	}
687 
688 	fs->ate_wra = zms_close_ate_addr(fs, fs->ate_wra);
689 
690 	zms_ate_crc8_update(&close_ate);
691 
692 	(void)zms_flash_ate_wrt(fs, &close_ate);
693 
694 	zms_sector_advance(fs, &fs->ate_wra);
695 
696 	rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
697 	if (rc == -ENOENT) {
698 		/* sector never used */
699 		fs->sector_cycle = 0;
700 	} else if (rc) {
701 		/* bad flash read */
702 		return rc;
703 	}
704 
705 	fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
706 
707 	return 0;
708 }
709 
zms_add_gc_done_ate(struct zms_fs * fs)710 static int zms_add_gc_done_ate(struct zms_fs *fs)
711 {
712 	struct zms_ate gc_done_ate;
713 
714 	LOG_DBG("Adding gc done ate at %llx", fs->ate_wra);
715 	gc_done_ate.id = ZMS_HEAD_ID;
716 	gc_done_ate.len = 0U;
717 	gc_done_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
718 	gc_done_ate.metadata = 0xffffffff;
719 	gc_done_ate.cycle_cnt = fs->sector_cycle;
720 
721 	zms_ate_crc8_update(&gc_done_ate);
722 
723 	return zms_flash_ate_wrt(fs, &gc_done_ate);
724 }
725 
zms_add_empty_ate(struct zms_fs * fs,uint64_t addr)726 static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr)
727 {
728 	struct zms_ate empty_ate;
729 	uint8_t cycle_cnt;
730 	int rc = 0;
731 	uint64_t previous_ate_wra;
732 
733 	addr &= ADDR_SECT_MASK;
734 
735 	LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - fs->ate_size));
736 	empty_ate.id = ZMS_HEAD_ID;
737 	empty_ate.len = 0xffff;
738 	empty_ate.offset = 0U;
739 	empty_ate.metadata =
740 		FIELD_PREP(ZMS_MAGIC_NUMBER_MASK, ZMS_MAGIC_NUMBER) | ZMS_DEFAULT_VERSION;
741 
742 	rc = zms_get_sector_cycle(fs, addr, &cycle_cnt);
743 	if (rc == -ENOENT) {
744 		/* sector never used */
745 		cycle_cnt = 0;
746 	} else if (rc) {
747 		/* bad flash read */
748 		return rc;
749 	}
750 
751 	/* increase cycle counter */
752 	empty_ate.cycle_cnt = (cycle_cnt + 1) % BIT(8);
753 	zms_ate_crc8_update(&empty_ate);
754 
755 	/* Adding empty ate to this sector changes fs->ate_wra value
756 	 * Restore the ate_wra of the current sector after this
757 	 */
758 	previous_ate_wra = fs->ate_wra;
759 	fs->ate_wra = zms_empty_ate_addr(fs, addr);
760 	rc = zms_flash_ate_wrt(fs, &empty_ate);
761 	if (rc) {
762 		return rc;
763 	}
764 	fs->ate_wra = previous_ate_wra;
765 
766 	return 0;
767 }
768 
zms_get_sector_cycle(struct zms_fs * fs,uint64_t addr,uint8_t * cycle_cnt)769 static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt)
770 {
771 	int rc;
772 	struct zms_ate empty_ate;
773 	uint64_t empty_addr;
774 
775 	empty_addr = zms_empty_ate_addr(fs, addr);
776 
777 	/* read the cycle counter of the current sector */
778 	rc = zms_flash_ate_rd(fs, empty_addr, &empty_ate);
779 	if (rc < 0) {
780 		/* flash error */
781 		return rc;
782 	}
783 
784 	if (zms_empty_ate_valid(fs, &empty_ate)) {
785 		*cycle_cnt = empty_ate.cycle_cnt;
786 		return 0;
787 	}
788 
789 	/* there is no empty ATE in this sector */
790 	return -ENOENT;
791 }
792 
zms_get_sector_header(struct zms_fs * fs,uint64_t addr,struct zms_ate * empty_ate,struct zms_ate * close_ate)793 static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate,
794 				 struct zms_ate *close_ate)
795 {
796 	int rc;
797 	uint64_t close_addr;
798 
799 	close_addr = zms_close_ate_addr(fs, addr);
800 	/* read the second ate in the sector to get the close ATE */
801 	rc = zms_flash_ate_rd(fs, close_addr, close_ate);
802 	if (rc) {
803 		return rc;
804 	}
805 
806 	/* read the first ate in the sector to get the empty ATE */
807 	rc = zms_flash_ate_rd(fs, close_addr + fs->ate_size, empty_ate);
808 	if (rc) {
809 		return rc;
810 	}
811 
812 	return 0;
813 }
814 
815 /**
816  * @brief Helper to find an ATE using its ID
817  *
818  * @param fs Pointer to file system
819  * @param id Id of the entry to be found
820  * @param start_addr Address from where the search will start
821  * @param end_addr Address where the search will stop
822  * @param ate pointer to the found ATE if it exists
823  * @param ate_addr Pointer to the address of the found ATE
824  *
825  * @retval 0 No ATE is found
826  * @retval 1 valid ATE with same ID found
827  * @retval < 0 An error happened
828  */
zms_find_ate_with_id(struct zms_fs * fs,uint32_t id,uint64_t start_addr,uint64_t end_addr,struct zms_ate * ate,uint64_t * ate_addr)829 static int zms_find_ate_with_id(struct zms_fs *fs, uint32_t id, uint64_t start_addr,
830 				uint64_t end_addr, struct zms_ate *ate, uint64_t *ate_addr)
831 {
832 	int rc;
833 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
834 	uint64_t wlk_prev_addr;
835 	uint64_t wlk_addr;
836 	int prev_found = 0;
837 	struct zms_ate wlk_ate;
838 	uint8_t current_cycle;
839 
840 	wlk_addr = start_addr;
841 
842 	do {
843 		wlk_prev_addr = wlk_addr;
844 		rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate);
845 		if (rc) {
846 			return rc;
847 		}
848 		if (wlk_ate.id == id) {
849 			/* read the ate cycle only when we change the sector or if it is
850 			 * the first read ( previous_sector_num == ZMS_INVALID_SECTOR_NUM).
851 			 */
852 			rc = zms_get_cycle_on_sector_change(fs, wlk_prev_addr, previous_sector_num,
853 							    &current_cycle);
854 			if (rc) {
855 				return rc;
856 			}
857 			if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) {
858 				prev_found = 1;
859 				break;
860 			}
861 			previous_sector_num = SECTOR_NUM(wlk_prev_addr);
862 		}
863 	} while (wlk_addr != end_addr);
864 
865 	*ate = wlk_ate;
866 	*ate_addr = wlk_prev_addr;
867 
868 	return prev_found;
869 }
870 
871 /* garbage collection: the address ate_wra has been updated to the new sector
872  * that has just been started. The data to gc is in the sector after this new
873  * sector.
874  */
zms_gc(struct zms_fs * fs)875 static int zms_gc(struct zms_fs *fs)
876 {
877 	int rc;
878 	int sec_closed;
879 	struct zms_ate close_ate;
880 	struct zms_ate gc_ate;
881 	struct zms_ate wlk_ate;
882 	struct zms_ate empty_ate;
883 	uint64_t sec_addr;
884 	uint64_t gc_addr;
885 	uint64_t gc_prev_addr;
886 	uint64_t wlk_addr;
887 	uint64_t wlk_prev_addr;
888 	uint64_t data_addr;
889 	uint64_t stop_addr;
890 	uint8_t previous_cycle = 0;
891 
892 	rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
893 	if (rc == -ENOENT) {
894 		/* Erase this new unused sector if needed */
895 		rc = zms_flash_erase_sector(fs, fs->ate_wra);
896 		if (rc) {
897 			return rc;
898 		}
899 		/* sector never used */
900 		rc = zms_add_empty_ate(fs, fs->ate_wra);
901 		if (rc) {
902 			return rc;
903 		}
904 		/* At this step we are sure that empty ATE exist.
905 		 * If not, then there is an I/O problem.
906 		 */
907 		rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle);
908 		if (rc) {
909 			return rc;
910 		}
911 	} else if (rc) {
912 		/* bad flash read */
913 		return rc;
914 	}
915 	previous_cycle = fs->sector_cycle;
916 
917 	sec_addr = (fs->ate_wra & ADDR_SECT_MASK);
918 	zms_sector_advance(fs, &sec_addr);
919 	gc_addr = sec_addr + fs->sector_size - fs->ate_size;
920 
921 	/* verify if the sector is closed */
922 	sec_closed = zms_validate_closed_sector(fs, gc_addr, &empty_ate, &close_ate);
923 	if (sec_closed < 0) {
924 		return sec_closed;
925 	}
926 
927 	/* if the sector is not closed don't do gc */
928 	if (!sec_closed) {
929 		goto gc_done;
930 	}
931 
932 	/* update sector_cycle */
933 	fs->sector_cycle = empty_ate.cycle_cnt;
934 
935 	/* stop_addr points to the first ATE before the header ATEs */
936 	stop_addr = gc_addr - 2 * fs->ate_size;
937 	/* At this step empty & close ATEs are valid.
938 	 * let's start the GC
939 	 */
940 	gc_addr &= ADDR_SECT_MASK;
941 	gc_addr += close_ate.offset;
942 
943 	do {
944 		gc_prev_addr = gc_addr;
945 		rc = zms_prev_ate(fs, &gc_addr, &gc_ate);
946 		if (rc) {
947 			return rc;
948 		}
949 
950 		if (!zms_ate_valid(fs, &gc_ate) || !gc_ate.len) {
951 			continue;
952 		}
953 
954 #ifdef CONFIG_ZMS_LOOKUP_CACHE
955 		wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)];
956 
957 		if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
958 			wlk_addr = fs->ate_wra;
959 		}
960 #else
961 		wlk_addr = fs->ate_wra;
962 #endif
963 
964 		/* Initialize the wlk_prev_addr as if no previous ID will be found */
965 		wlk_prev_addr = gc_prev_addr;
966 		/* Search for a previous valid ATE with the same ID. If it doesn't exist
967 		 * then wlk_prev_addr will be equal to gc_prev_addr.
968 		 */
969 		rc = zms_find_ate_with_id(fs, gc_ate.id, wlk_addr, fs->ate_wra, &wlk_ate,
970 					  &wlk_prev_addr);
971 		if (rc < 0) {
972 			return rc;
973 		}
974 
975 		/* if walk_addr has reached the same address as gc_addr, a copy is
976 		 * needed unless it is a deleted item.
977 		 */
978 		if (wlk_prev_addr == gc_prev_addr) {
979 			/* copy needed */
980 			LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len);
981 
982 			if (gc_ate.len > ZMS_DATA_IN_ATE_SIZE) {
983 				/* Copy Data only when len > 8
984 				 * Otherwise, Data is already inside ATE
985 				 */
986 				data_addr = (gc_prev_addr & ADDR_SECT_MASK);
987 				data_addr += gc_ate.offset;
988 				gc_ate.offset = (uint32_t)SECTOR_OFFSET(fs->data_wra);
989 
990 				rc = zms_flash_block_move(fs, data_addr, gc_ate.len);
991 				if (rc) {
992 					return rc;
993 				}
994 			}
995 
996 			gc_ate.cycle_cnt = previous_cycle;
997 			zms_ate_crc8_update(&gc_ate);
998 			rc = zms_flash_ate_wrt(fs, &gc_ate);
999 			if (rc) {
1000 				return rc;
1001 			}
1002 		}
1003 	} while (gc_prev_addr != stop_addr);
1004 
1005 gc_done:
1006 
1007 	/* restore the previous sector_cycle */
1008 	fs->sector_cycle = previous_cycle;
1009 
1010 	/* Write a GC_done ATE to mark the end of this operation
1011 	 */
1012 
1013 	rc = zms_add_gc_done_ate(fs);
1014 	if (rc) {
1015 		return rc;
1016 	}
1017 
1018 	/* Erase the GC'ed sector when needed */
1019 	rc = zms_flash_erase_sector(fs, sec_addr);
1020 	if (rc) {
1021 		return rc;
1022 	}
1023 
1024 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1025 	zms_lookup_cache_invalidate(fs, sec_addr >> ADDR_SECT_SHIFT);
1026 #endif
1027 	rc = zms_add_empty_ate(fs, sec_addr);
1028 
1029 	return rc;
1030 }
1031 
zms_clear(struct zms_fs * fs)1032 int zms_clear(struct zms_fs *fs)
1033 {
1034 	int rc;
1035 	uint64_t addr;
1036 
1037 	if (!fs->ready) {
1038 		LOG_ERR("zms not initialized");
1039 		return -EACCES;
1040 	}
1041 
1042 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1043 	for (uint32_t i = 0; i < fs->sector_count; i++) {
1044 		addr = (uint64_t)i << ADDR_SECT_SHIFT;
1045 		rc = zms_flash_erase_sector(fs, addr);
1046 		if (rc) {
1047 			goto end;
1048 		}
1049 		rc = zms_add_empty_ate(fs, addr);
1050 		if (rc) {
1051 			goto end;
1052 		}
1053 	}
1054 
1055 	/* zms needs to be reinitialized after clearing */
1056 	fs->ready = false;
1057 
1058 end:
1059 	k_mutex_unlock(&fs->zms_lock);
1060 
1061 	return 0;
1062 }
1063 
zms_init(struct zms_fs * fs)1064 static int zms_init(struct zms_fs *fs)
1065 {
1066 	int rc;
1067 	int sec_closed;
1068 	struct zms_ate last_ate;
1069 	struct zms_ate first_ate;
1070 	struct zms_ate close_ate;
1071 	struct zms_ate empty_ate;
1072 	uint64_t addr = 0U;
1073 	uint64_t data_wra = 0U;
1074 	uint32_t i;
1075 	uint32_t closed_sectors = 0;
1076 	bool zms_magic_exist = false;
1077 
1078 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1079 
1080 	/* step through the sectors to find a open sector following
1081 	 * a closed sector, this is where zms can write.
1082 	 */
1083 
1084 	for (i = 0; i < fs->sector_count; i++) {
1085 		addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT));
1086 
1087 		/* verify if the sector is closed */
1088 		sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1089 		if (sec_closed < 0) {
1090 			rc = sec_closed;
1091 			goto end;
1092 		}
1093 		/* update cycle count */
1094 		fs->sector_cycle = empty_ate.cycle_cnt;
1095 
1096 		if (sec_closed == 1) {
1097 			/* closed sector */
1098 			closed_sectors++;
1099 			/* Let's verify that this is a ZMS storage system */
1100 			if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) {
1101 				zms_magic_exist = true;
1102 				/* Let's check that we support this ZMS version */
1103 				if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) {
1104 					LOG_ERR("ZMS Version is not supported");
1105 					rc = -EPROTONOSUPPORT;
1106 					goto end;
1107 				}
1108 			}
1109 
1110 			zms_sector_advance(fs, &addr);
1111 			/* addr is pointing to the close ATE */
1112 			/* verify if the sector is Open */
1113 			sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1114 			if (sec_closed < 0) {
1115 				rc = sec_closed;
1116 				goto end;
1117 			}
1118 			/* update cycle count */
1119 			fs->sector_cycle = empty_ate.cycle_cnt;
1120 
1121 			if (!sec_closed) {
1122 				/* We found an Open sector following a closed one */
1123 				break;
1124 			}
1125 		}
1126 	}
1127 	/* all sectors are closed, and zms magic number not found. This is not a zms fs */
1128 	if ((closed_sectors == fs->sector_count) && !zms_magic_exist) {
1129 		rc = -ENOTSUP;
1130 		goto end;
1131 	}
1132 	/* TODO: add a recovery mechanism here if the ZMS magic number exist but all
1133 	 * sectors are closed
1134 	 */
1135 
1136 	if (i == fs->sector_count) {
1137 		/* none of the sectors were closed, which means that the first
1138 		 * sector is the one in use, except if there are only 2 sectors.
1139 		 * Let's check if the last sector has valid ATEs otherwise set
1140 		 * the open sector to the first one.
1141 		 */
1142 		rc = zms_flash_ate_rd(fs, addr - fs->ate_size, &first_ate);
1143 		if (rc) {
1144 			goto end;
1145 		}
1146 		if (!zms_ate_valid(fs, &first_ate)) {
1147 			zms_sector_advance(fs, &addr);
1148 		}
1149 		rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate);
1150 		if (rc) {
1151 			goto end;
1152 		}
1153 
1154 		if (zms_empty_ate_valid(fs, &empty_ate)) {
1155 			/* Empty ATE is valid, let's verify that this is a ZMS storage system */
1156 			if (ZMS_GET_MAGIC_NUMBER(empty_ate.metadata) == ZMS_MAGIC_NUMBER) {
1157 				zms_magic_exist = true;
1158 				/* Let's check the version */
1159 				if (ZMS_GET_VERSION(empty_ate.metadata) != ZMS_DEFAULT_VERSION) {
1160 					LOG_ERR("ZMS Version is not supported");
1161 					rc = -EPROTONOSUPPORT;
1162 					goto end;
1163 				}
1164 			}
1165 		} else {
1166 			rc = zms_flash_erase_sector(fs, addr);
1167 			if (rc) {
1168 				goto end;
1169 			}
1170 			rc = zms_add_empty_ate(fs, addr);
1171 			if (rc) {
1172 				goto end;
1173 			}
1174 		}
1175 		rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle);
1176 		if (rc == -ENOENT) {
1177 			/* sector never used */
1178 			fs->sector_cycle = 0;
1179 		} else if (rc) {
1180 			/* bad flash read */
1181 			goto end;
1182 		}
1183 	}
1184 
1185 	/* addr contains address of closing ate in the most recent sector,
1186 	 * search for the last valid ate using the recover_last_ate routine
1187 	 * and also update the data_wra
1188 	 */
1189 	rc = zms_recover_last_ate(fs, &addr, &data_wra);
1190 	if (rc) {
1191 		goto end;
1192 	}
1193 
1194 	/* addr contains address of the last valid ate in the most recent sector
1195 	 * data_wra contains the data write address of the current sector
1196 	 */
1197 	fs->ate_wra = addr;
1198 	fs->data_wra = data_wra;
1199 
1200 	/* fs->ate_wra should point to the next available entry. This is normally
1201 	 * the next position after the one found by the recovery function.
1202 	 * Let's verify that it doesn't contain any valid ATE, otherwise search for
1203 	 * an empty position
1204 	 */
1205 	while (fs->ate_wra >= fs->data_wra) {
1206 		rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate);
1207 		if (rc) {
1208 			goto end;
1209 		}
1210 		if (!zms_ate_valid(fs, &last_ate)) {
1211 			/* found empty location */
1212 			break;
1213 		}
1214 
1215 		/* ate on the last position within the sector is
1216 		 * reserved for deletion an entry
1217 		 */
1218 		if ((fs->ate_wra == fs->data_wra) && last_ate.len) {
1219 			/* not a delete ate */
1220 			rc = -ESPIPE;
1221 			goto end;
1222 		}
1223 
1224 		fs->ate_wra -= fs->ate_size;
1225 	}
1226 
1227 	/* The sector after the write sector is either empty with a valid empty ATE (regular case)
1228 	 * or it has never been used or it is a closed sector (GC didn't finish)
1229 	 * If it is a closed sector we must look for a valid GC done ATE in the current write
1230 	 * sector, if it is missing, we need to restart gc because it has been interrupted.
1231 	 * If no valid empty ATE is found then it has never been used. Just erase it by adding
1232 	 * a valid empty ATE.
1233 	 * When gc needs to be restarted, first erase the sector by adding an empty
1234 	 * ATE otherwise the data might not fit into the sector.
1235 	 */
1236 	addr = zms_close_ate_addr(fs, fs->ate_wra);
1237 	zms_sector_advance(fs, &addr);
1238 
1239 	/* verify if the sector is closed */
1240 	sec_closed = zms_validate_closed_sector(fs, addr, &empty_ate, &close_ate);
1241 	if (sec_closed < 0) {
1242 		rc = sec_closed;
1243 		goto end;
1244 	}
1245 
1246 	if (sec_closed == 1) {
1247 		/* The sector after fs->ate_wrt is closed.
1248 		 * Look for a marker (gc_done_ate) that indicates that gc was finished.
1249 		 */
1250 		bool gc_done_marker = false;
1251 		struct zms_ate gc_done_ate;
1252 
1253 		fs->sector_cycle = empty_ate.cycle_cnt;
1254 		addr = fs->ate_wra + fs->ate_size;
1255 		while (SECTOR_OFFSET(addr) < (fs->sector_size - 2 * fs->ate_size)) {
1256 			rc = zms_flash_ate_rd(fs, addr, &gc_done_ate);
1257 			if (rc) {
1258 				goto end;
1259 			}
1260 
1261 			if (zms_gc_done_ate_valid(fs, &gc_done_ate)) {
1262 				break;
1263 			}
1264 			addr += fs->ate_size;
1265 		}
1266 
1267 		if (gc_done_marker) {
1268 			/* erase the next sector */
1269 			LOG_INF("GC Done marker found");
1270 			addr = fs->ate_wra & ADDR_SECT_MASK;
1271 			zms_sector_advance(fs, &addr);
1272 			rc = zms_flash_erase_sector(fs, addr);
1273 			if (rc < 0) {
1274 				goto end;
1275 			}
1276 			rc = zms_add_empty_ate(fs, addr);
1277 			goto end;
1278 		}
1279 		LOG_INF("No GC Done marker found: restarting gc");
1280 		rc = zms_flash_erase_sector(fs, fs->ate_wra);
1281 		if (rc) {
1282 			goto end;
1283 		}
1284 		rc = zms_add_empty_ate(fs, fs->ate_wra);
1285 		if (rc) {
1286 			goto end;
1287 		}
1288 
1289 		/* Let's point to the first writable position */
1290 		fs->ate_wra &= ADDR_SECT_MASK;
1291 		fs->ate_wra += (fs->sector_size - 3 * fs->ate_size);
1292 		fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK);
1293 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1294 		/**
1295 		 * At this point, the lookup cache wasn't built but the gc function need to use it.
1296 		 * So, temporarily, we set the lookup cache to the end of the fs.
1297 		 * The cache will be rebuilt afterwards
1298 		 **/
1299 		for (i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) {
1300 			fs->lookup_cache[i] = fs->ate_wra;
1301 		}
1302 #endif
1303 		rc = zms_gc(fs);
1304 		goto end;
1305 	}
1306 
1307 end:
1308 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1309 	if (!rc) {
1310 		rc = zms_lookup_cache_rebuild(fs);
1311 	}
1312 #endif
1313 	/* If the sector is empty add a gc done ate to avoid having insufficient
1314 	 * space when doing gc.
1315 	 */
1316 	if ((!rc) && (SECTOR_OFFSET(fs->ate_wra) == (fs->sector_size - 3 * fs->ate_size))) {
1317 		rc = zms_add_gc_done_ate(fs);
1318 	}
1319 	k_mutex_unlock(&fs->zms_lock);
1320 
1321 	return rc;
1322 }
1323 
zms_mount(struct zms_fs * fs)1324 int zms_mount(struct zms_fs *fs)
1325 {
1326 	int rc;
1327 	struct flash_pages_info info;
1328 	size_t write_block_size;
1329 
1330 	k_mutex_init(&fs->zms_lock);
1331 
1332 	fs->flash_parameters = flash_get_parameters(fs->flash_device);
1333 	if (fs->flash_parameters == NULL) {
1334 		LOG_ERR("Could not obtain flash parameters");
1335 		return -EINVAL;
1336 	}
1337 
1338 	fs->ate_size = zms_al_size(fs, sizeof(struct zms_ate));
1339 	write_block_size = fs->flash_parameters->write_block_size;
1340 
1341 	/* check that the write block size is supported */
1342 	if (write_block_size > ZMS_BLOCK_SIZE || write_block_size == 0) {
1343 		LOG_ERR("Unsupported write block size");
1344 		return -EINVAL;
1345 	}
1346 
1347 	/* When the device need erase operations before write let's check that
1348 	 * sector size is a multiple of pagesize
1349 	 */
1350 	if (flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT) {
1351 		rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info);
1352 		if (rc) {
1353 			LOG_ERR("Unable to get page info");
1354 			return -EINVAL;
1355 		}
1356 		if (!fs->sector_size || fs->sector_size % info.size) {
1357 			LOG_ERR("Invalid sector size");
1358 			return -EINVAL;
1359 		}
1360 	}
1361 
1362 	/* we need at least 5 aligned ATEs size as the minimum sector size
1363 	 * 1 close ATE, 1 empty ATE, 1 GC done ATE, 1 Delete ATE, 1 ID/Value ATE
1364 	 */
1365 	if (fs->sector_size < ZMS_MIN_ATE_NUM * fs->ate_size) {
1366 		LOG_ERR("Invalid sector size, should be at least %zu",
1367 			ZMS_MIN_ATE_NUM * fs->ate_size);
1368 		return -EINVAL;
1369 	}
1370 
1371 	/* check the number of sectors, it should be at least 2 */
1372 	if (fs->sector_count < 2) {
1373 		LOG_ERR("Configuration error - sector count below minimum requirement (2)");
1374 		return -EINVAL;
1375 	}
1376 
1377 	rc = zms_init(fs);
1378 
1379 	if (rc) {
1380 		return rc;
1381 	}
1382 
1383 	/* zms is ready for use */
1384 	fs->ready = true;
1385 
1386 	LOG_INF("%u Sectors of %u bytes", fs->sector_count, fs->sector_size);
1387 	LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), SECTOR_OFFSET(fs->ate_wra));
1388 	LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), SECTOR_OFFSET(fs->data_wra));
1389 
1390 	return 0;
1391 }
1392 
zms_write(struct zms_fs * fs,uint32_t id,const void * data,size_t len)1393 ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len)
1394 {
1395 	int rc;
1396 	size_t data_size;
1397 	uint64_t wlk_addr;
1398 	uint64_t rd_addr;
1399 	uint32_t gc_count;
1400 	uint32_t required_space = 0U; /* no space, appropriate for delete ate */
1401 
1402 	if (!fs->ready) {
1403 		LOG_ERR("zms not initialized");
1404 		return -EACCES;
1405 	}
1406 
1407 	data_size = zms_al_size(fs, len);
1408 
1409 	/* The maximum data size is sector size - 5 ate
1410 	 * where: 1 ate for data, 1 ate for sector close, 1 ate for empty,
1411 	 * 1 ate for gc done, and 1 ate to always allow a delete.
1412 	 * We cannot also store more than 64 KB of data
1413 	 */
1414 	if ((len > (fs->sector_size - 5 * fs->ate_size)) || (len > UINT16_MAX) ||
1415 	    ((len > 0) && (data == NULL))) {
1416 		return -EINVAL;
1417 	}
1418 
1419 	/* find latest entry with same id */
1420 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1421 	wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)];
1422 
1423 	if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
1424 		goto no_cached_entry;
1425 	}
1426 #else
1427 	wlk_addr = fs->ate_wra;
1428 #endif
1429 	rd_addr = wlk_addr;
1430 
1431 #ifdef CONFIG_ZMS_NO_DOUBLE_WRITE
1432 	/* Search for a previous valid ATE with the same ID */
1433 	struct zms_ate wlk_ate;
1434 	int prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate, &rd_addr);
1435 	if (prev_found < 0) {
1436 		return prev_found;
1437 	}
1438 
1439 	if (prev_found) {
1440 		/* previous entry found */
1441 		if (len > ZMS_DATA_IN_ATE_SIZE) {
1442 			rd_addr &= ADDR_SECT_MASK;
1443 			rd_addr += wlk_ate.offset;
1444 		}
1445 
1446 		if (len == 0) {
1447 			/* do not try to compare with empty data */
1448 			if (wlk_ate.len == 0U) {
1449 				/* skip delete entry as it is already the
1450 				 * last one
1451 				 */
1452 				return 0;
1453 			}
1454 		} else if (len == wlk_ate.len) {
1455 			/* do not try to compare if lengths are not equal */
1456 			/* compare the data and if equal return 0 */
1457 			if (len <= ZMS_DATA_IN_ATE_SIZE) {
1458 				rc = memcmp(&wlk_ate.data, data, len);
1459 				if (!rc) {
1460 					return 0;
1461 				}
1462 			} else {
1463 				rc = zms_flash_block_cmp(fs, rd_addr, data, len);
1464 				if (rc <= 0) {
1465 					return rc;
1466 				}
1467 			}
1468 		}
1469 	} else {
1470 		/* skip delete entry for non-existing entry */
1471 		if (len == 0) {
1472 			return 0;
1473 		}
1474 	}
1475 #endif
1476 
1477 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1478 no_cached_entry:
1479 #endif
1480 	/* calculate required space if the entry contains data */
1481 	if (data_size) {
1482 		/* Leave space for delete ate */
1483 		if (len > ZMS_DATA_IN_ATE_SIZE) {
1484 			required_space = data_size + fs->ate_size;
1485 		} else {
1486 			required_space = fs->ate_size;
1487 		}
1488 	}
1489 
1490 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1491 
1492 	gc_count = 0;
1493 	while (1) {
1494 		if (gc_count == fs->sector_count) {
1495 			/* gc'ed all sectors, no extra space will be created
1496 			 * by extra gc.
1497 			 */
1498 			rc = -ENOSPC;
1499 			goto end;
1500 		}
1501 
1502 		/* We need to make sure that we leave the ATE at address 0x0 of the sector
1503 		 * empty (even for delete ATE). Otherwise, the fs->ate_wra will be decremented
1504 		 * after this write by ate_size and it will underflow.
1505 		 * So the first position of a sector (fs->ate_wra = 0x0) is forbidden for ATEs
1506 		 * and the second position could be written only be a delete ATE.
1507 		 */
1508 		if ((SECTOR_OFFSET(fs->ate_wra)) &&
1509 		    (fs->ate_wra >= (fs->data_wra + required_space)) &&
1510 		    (SECTOR_OFFSET(fs->ate_wra - fs->ate_size) || !len)) {
1511 			rc = zms_flash_write_entry(fs, id, data, len);
1512 			if (rc) {
1513 				goto end;
1514 			}
1515 			break;
1516 		}
1517 		rc = zms_sector_close(fs);
1518 		if (rc) {
1519 			LOG_ERR("Failed to close the sector, returned = %d", rc);
1520 			goto end;
1521 		}
1522 		rc = zms_gc(fs);
1523 		if (rc) {
1524 			LOG_ERR("Garbage collection failed, returned = %d", rc);
1525 			goto end;
1526 		}
1527 		gc_count++;
1528 	}
1529 	rc = len;
1530 end:
1531 	k_mutex_unlock(&fs->zms_lock);
1532 	return rc;
1533 }
1534 
zms_delete(struct zms_fs * fs,uint32_t id)1535 int zms_delete(struct zms_fs *fs, uint32_t id)
1536 {
1537 	return zms_write(fs, id, NULL, 0);
1538 }
1539 
zms_read_hist(struct zms_fs * fs,uint32_t id,void * data,size_t len,uint32_t cnt)1540 ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt)
1541 {
1542 	int rc;
1543 	int prev_found = 0;
1544 	uint64_t wlk_addr;
1545 	uint64_t rd_addr = 0;
1546 	uint64_t wlk_prev_addr = 0;
1547 	uint32_t cnt_his;
1548 	struct zms_ate wlk_ate;
1549 #ifdef CONFIG_ZMS_DATA_CRC
1550 	uint32_t computed_data_crc;
1551 #endif
1552 
1553 	if (!fs->ready) {
1554 		LOG_ERR("zms not initialized");
1555 		return -EACCES;
1556 	}
1557 
1558 	cnt_his = 0U;
1559 
1560 #ifdef CONFIG_ZMS_LOOKUP_CACHE
1561 	wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)];
1562 
1563 	if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) {
1564 		rc = -ENOENT;
1565 		goto err;
1566 	}
1567 #else
1568 	wlk_addr = fs->ate_wra;
1569 #endif
1570 
1571 	while (cnt_his <= cnt) {
1572 		wlk_prev_addr = wlk_addr;
1573 		/* Search for a previous valid ATE with the same ID */
1574 		prev_found = zms_find_ate_with_id(fs, id, wlk_addr, fs->ate_wra, &wlk_ate,
1575 						  &wlk_prev_addr);
1576 		if (prev_found < 0) {
1577 			return prev_found;
1578 		}
1579 		if (prev_found) {
1580 			cnt_his++;
1581 			/* wlk_prev_addr contain the ATE address of the previous found ATE. */
1582 			rd_addr = wlk_prev_addr;
1583 			/*
1584 			 * compute the previous ATE address in case we need to start
1585 			 * the research again.
1586 			 */
1587 			rc = zms_compute_prev_addr(fs, &wlk_prev_addr);
1588 			if (rc) {
1589 				return rc;
1590 			}
1591 			/* wlk_addr will be the start research address in the next loop */
1592 			wlk_addr = wlk_prev_addr;
1593 		} else {
1594 			break;
1595 		}
1596 	}
1597 
1598 	if (((!prev_found) || (wlk_ate.id != id)) || (wlk_ate.len == 0U) || (cnt_his < cnt)) {
1599 		return -ENOENT;
1600 	}
1601 
1602 	if (wlk_ate.len <= ZMS_DATA_IN_ATE_SIZE) {
1603 		/* data is stored in the ATE */
1604 		if (data) {
1605 			memcpy(data, &wlk_ate.data, MIN(len, wlk_ate.len));
1606 		}
1607 	} else {
1608 		rd_addr &= ADDR_SECT_MASK;
1609 		rd_addr += wlk_ate.offset;
1610 		/* do not read or copy data if pointer is NULL */
1611 		if (data) {
1612 			rc = zms_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len));
1613 			if (rc) {
1614 				goto err;
1615 			}
1616 		}
1617 #ifdef CONFIG_ZMS_DATA_CRC
1618 		/* Do not compute CRC for partial reads as CRC won't match */
1619 		if (len >= wlk_ate.len) {
1620 			computed_data_crc = crc32_ieee(data, wlk_ate.len);
1621 			if (computed_data_crc != wlk_ate.data_crc) {
1622 				LOG_ERR("Invalid data CRC: ATE_CRC=0x%08X, "
1623 					"computed_data_crc=0x%08X",
1624 					wlk_ate.data_crc, computed_data_crc);
1625 				return -EIO;
1626 			}
1627 		}
1628 #endif
1629 	}
1630 
1631 	return wlk_ate.len;
1632 
1633 err:
1634 	return rc;
1635 }
1636 
zms_read(struct zms_fs * fs,uint32_t id,void * data,size_t len)1637 ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len)
1638 {
1639 	int rc;
1640 
1641 	rc = zms_read_hist(fs, id, data, len, 0);
1642 	if (rc < 0) {
1643 		return rc;
1644 	}
1645 
1646 	/* returns the minimum between ATE data length and requested length */
1647 	return MIN(rc, len);
1648 }
1649 
zms_get_data_length(struct zms_fs * fs,uint32_t id)1650 ssize_t zms_get_data_length(struct zms_fs *fs, uint32_t id)
1651 {
1652 	int rc;
1653 
1654 	rc = zms_read_hist(fs, id, NULL, 0, 0);
1655 
1656 	return rc;
1657 }
1658 
zms_calc_free_space(struct zms_fs * fs)1659 ssize_t zms_calc_free_space(struct zms_fs *fs)
1660 {
1661 	int rc;
1662 	int previous_sector_num = ZMS_INVALID_SECTOR_NUM;
1663 	int prev_found = 0;
1664 	int sec_closed;
1665 	struct zms_ate step_ate;
1666 	struct zms_ate wlk_ate;
1667 	struct zms_ate empty_ate;
1668 	struct zms_ate close_ate;
1669 	uint64_t step_addr;
1670 	uint64_t wlk_addr;
1671 	uint64_t step_prev_addr;
1672 	uint64_t wlk_prev_addr;
1673 	uint64_t data_wra = 0U;
1674 	uint8_t current_cycle;
1675 	ssize_t free_space = 0;
1676 	const uint32_t second_to_last_offset = (2 * fs->ate_size);
1677 
1678 	if (!fs->ready) {
1679 		LOG_ERR("zms not initialized");
1680 		return -EACCES;
1681 	}
1682 
1683 	/*
1684 	 * There is always a closing ATE , an empty ATE, a GC_done ATE and a reserved ATE for
1685 	 * deletion in each sector.
1686 	 * And there is always one reserved Sector for garbage collection operations
1687 	 */
1688 	free_space = (fs->sector_count - 1) * (fs->sector_size - 4 * fs->ate_size);
1689 
1690 	step_addr = fs->ate_wra;
1691 
1692 	do {
1693 		step_prev_addr = step_addr;
1694 		rc = zms_prev_ate(fs, &step_addr, &step_ate);
1695 		if (rc) {
1696 			return rc;
1697 		}
1698 
1699 		/* When changing the sector let's get the new cycle counter */
1700 		rc = zms_get_cycle_on_sector_change(fs, step_prev_addr, previous_sector_num,
1701 						    &current_cycle);
1702 		if (rc) {
1703 			return rc;
1704 		}
1705 		previous_sector_num = SECTOR_NUM(step_prev_addr);
1706 
1707 		/* Invalid and deleted ATEs are free spaces.
1708 		 * Header ATEs are already retrieved from free space
1709 		 */
1710 		if (!zms_ate_valid_different_sector(fs, &step_ate, current_cycle) ||
1711 		    (step_ate.id == ZMS_HEAD_ID) || (step_ate.len == 0)) {
1712 			continue;
1713 		}
1714 
1715 		wlk_addr = step_addr;
1716 		/* Try to find if there is a previous valid ATE with same ID */
1717 		prev_found = zms_find_ate_with_id(fs, step_ate.id, wlk_addr, step_addr, &wlk_ate,
1718 						  &wlk_prev_addr);
1719 		if (prev_found < 0) {
1720 			return prev_found;
1721 		}
1722 
1723 		/* If no previous ATE is found, then this is a valid ATE that cannot be
1724 		 * Garbage Collected
1725 		 */
1726 		if (!prev_found || (wlk_prev_addr == step_prev_addr)) {
1727 			if (step_ate.len > ZMS_DATA_IN_ATE_SIZE) {
1728 				free_space -= zms_al_size(fs, step_ate.len);
1729 			}
1730 			free_space -= fs->ate_size;
1731 		}
1732 	} while (step_addr != fs->ate_wra);
1733 
1734 	/* we must keep the sector_cycle before we start looking into special cases */
1735 	current_cycle = fs->sector_cycle;
1736 
1737 	/* Let's look now for special cases where some sectors have only ATEs with
1738 	 * small data size.
1739 	 */
1740 
1741 	for (int i = 0; i < fs->sector_count; i++) {
1742 		step_addr = zms_close_ate_addr(fs, ((uint64_t)i << ADDR_SECT_SHIFT));
1743 
1744 		/* verify if the sector is closed */
1745 		sec_closed = zms_validate_closed_sector(fs, step_addr, &empty_ate, &close_ate);
1746 		if (sec_closed < 0) {
1747 			return sec_closed;
1748 		}
1749 
1750 		/* If the sector is closed and its offset is pointing to a position less than the
1751 		 * 3rd to last ATE position in a sector, it means that we need to leave the second
1752 		 * to last ATE empty.
1753 		 */
1754 		if ((sec_closed == 1) && (close_ate.offset <= second_to_last_offset)) {
1755 			free_space -= fs->ate_size;
1756 		} else if (!sec_closed) {
1757 			/* sector is open, let's recover the last ATE */
1758 			fs->sector_cycle = empty_ate.cycle_cnt;
1759 			rc = zms_recover_last_ate(fs, &step_addr, &data_wra);
1760 			if (rc) {
1761 				return rc;
1762 			}
1763 			if (SECTOR_OFFSET(step_addr) <= second_to_last_offset) {
1764 				free_space -= fs->ate_size;
1765 			}
1766 		}
1767 	}
1768 	/* restore sector cycle */
1769 	fs->sector_cycle = current_cycle;
1770 
1771 	return free_space;
1772 }
1773 
zms_active_sector_free_space(struct zms_fs * fs)1774 size_t zms_active_sector_free_space(struct zms_fs *fs)
1775 {
1776 	if (!fs->ready) {
1777 		LOG_ERR("ZMS not initialized");
1778 		return -EACCES;
1779 	}
1780 
1781 	return fs->ate_wra - fs->data_wra - fs->ate_size;
1782 }
1783 
zms_sector_use_next(struct zms_fs * fs)1784 int zms_sector_use_next(struct zms_fs *fs)
1785 {
1786 	int ret;
1787 
1788 	if (!fs->ready) {
1789 		LOG_ERR("ZMS not initialized");
1790 		return -EACCES;
1791 	}
1792 
1793 	k_mutex_lock(&fs->zms_lock, K_FOREVER);
1794 
1795 	ret = zms_sector_close(fs);
1796 	if (ret != 0) {
1797 		goto end;
1798 	}
1799 
1800 	ret = zms_gc(fs);
1801 
1802 end:
1803 	k_mutex_unlock(&fs->zms_lock);
1804 	return ret;
1805 }
1806