1 /*  NVS: non volatile storage in flash
2  *
3  * Copyright (c) 2018 Laczen
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/flash.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <zephyr/fs/nvs.h>
13 #include <zephyr/sys/crc.h>
14 #include "nvs_priv.h"
15 
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(fs_nvs, CONFIG_NVS_LOG_LEVEL);
18 
19 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate);
20 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry);
21 
22 #ifdef CONFIG_NVS_LOOKUP_CACHE
23 
nvs_lookup_cache_pos(uint16_t id)24 static inline size_t nvs_lookup_cache_pos(uint16_t id)
25 {
26 	uint16_t hash;
27 
28 	/* 16-bit integer hash function found by https://github.com/skeeto/hash-prospector. */
29 	hash = id;
30 	hash ^= hash >> 8;
31 	hash *= 0x88b5U;
32 	hash ^= hash >> 7;
33 	hash *= 0xdb2dU;
34 	hash ^= hash >> 9;
35 
36 	return hash % CONFIG_NVS_LOOKUP_CACHE_SIZE;
37 }
38 
nvs_lookup_cache_rebuild(struct nvs_fs * fs)39 static int nvs_lookup_cache_rebuild(struct nvs_fs *fs)
40 {
41 	int rc;
42 	uint32_t addr, ate_addr;
43 	uint32_t *cache_entry;
44 	struct nvs_ate ate;
45 
46 	memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache));
47 	addr = fs->ate_wra;
48 
49 	while (true) {
50 		/* Make a copy of 'addr' as it will be advanced by nvs_pref_ate() */
51 		ate_addr = addr;
52 		rc = nvs_prev_ate(fs, &addr, &ate);
53 
54 		if (rc) {
55 			return rc;
56 		}
57 
58 		cache_entry = &fs->lookup_cache[nvs_lookup_cache_pos(ate.id)];
59 
60 		if (ate.id != 0xFFFF && *cache_entry == NVS_LOOKUP_CACHE_NO_ADDR &&
61 		    nvs_ate_valid(fs, &ate)) {
62 			*cache_entry = ate_addr;
63 		}
64 
65 		if (addr == fs->ate_wra) {
66 			break;
67 		}
68 	}
69 
70 	return 0;
71 }
72 
nvs_lookup_cache_invalidate(struct nvs_fs * fs,uint32_t sector)73 static void nvs_lookup_cache_invalidate(struct nvs_fs *fs, uint32_t sector)
74 {
75 	uint32_t *cache_entry = fs->lookup_cache;
76 	uint32_t *const cache_end = &fs->lookup_cache[CONFIG_NVS_LOOKUP_CACHE_SIZE];
77 
78 	for (; cache_entry < cache_end; ++cache_entry) {
79 		if ((*cache_entry >> ADDR_SECT_SHIFT) == sector) {
80 			*cache_entry = NVS_LOOKUP_CACHE_NO_ADDR;
81 		}
82 	}
83 }
84 
85 #endif /* CONFIG_NVS_LOOKUP_CACHE */
86 
87 /* basic routines */
88 /* nvs_al_size returns size aligned to fs->write_block_size */
nvs_al_size(struct nvs_fs * fs,size_t len)89 static inline size_t nvs_al_size(struct nvs_fs *fs, size_t len)
90 {
91 	uint8_t write_block_size = fs->flash_parameters->write_block_size;
92 
93 	if (write_block_size <= 1U) {
94 		return len;
95 	}
96 	return (len + (write_block_size - 1U)) & ~(write_block_size - 1U);
97 }
98 /* end basic routines */
99 
100 /* flash routines */
101 /* basic aligned flash write to nvs address */
nvs_flash_al_wrt(struct nvs_fs * fs,uint32_t addr,const void * data,size_t len)102 static int nvs_flash_al_wrt(struct nvs_fs *fs, uint32_t addr, const void *data,
103 			     size_t len)
104 {
105 	const uint8_t *data8 = (const uint8_t *)data;
106 	int rc = 0;
107 	off_t offset;
108 	size_t blen;
109 	uint8_t buf[NVS_BLOCK_SIZE];
110 
111 	if (!len) {
112 		/* Nothing to write, avoid changing the flash protection */
113 		return 0;
114 	}
115 
116 	offset = fs->offset;
117 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
118 	offset += addr & ADDR_OFFS_MASK;
119 
120 	blen = len & ~(fs->flash_parameters->write_block_size - 1U);
121 	if (blen > 0) {
122 		rc = flash_write(fs->flash_device, offset, data8, blen);
123 		if (rc) {
124 			/* flash write error */
125 			goto end;
126 		}
127 		len -= blen;
128 		offset += blen;
129 		data8 += blen;
130 	}
131 	if (len) {
132 		memcpy(buf, data8, len);
133 		(void)memset(buf + len, fs->flash_parameters->erase_value,
134 			fs->flash_parameters->write_block_size - len);
135 
136 		rc = flash_write(fs->flash_device, offset, buf,
137 				 fs->flash_parameters->write_block_size);
138 	}
139 
140 end:
141 	return rc;
142 }
143 
144 /* basic flash read from nvs address */
nvs_flash_rd(struct nvs_fs * fs,uint32_t addr,void * data,size_t len)145 static int nvs_flash_rd(struct nvs_fs *fs, uint32_t addr, void *data,
146 			 size_t len)
147 {
148 	int rc;
149 	off_t offset;
150 
151 	offset = fs->offset;
152 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
153 	offset += addr & ADDR_OFFS_MASK;
154 
155 	rc = flash_read(fs->flash_device, offset, data, len);
156 	return rc;
157 }
158 
159 /* allocation entry write */
nvs_flash_ate_wrt(struct nvs_fs * fs,const struct nvs_ate * entry)160 static int nvs_flash_ate_wrt(struct nvs_fs *fs, const struct nvs_ate *entry)
161 {
162 	int rc;
163 
164 	rc = nvs_flash_al_wrt(fs, fs->ate_wra, entry,
165 			       sizeof(struct nvs_ate));
166 #ifdef CONFIG_NVS_LOOKUP_CACHE
167 	/* 0xFFFF is a special-purpose identifier. Exclude it from the cache */
168 	if (entry->id != 0xFFFF) {
169 		fs->lookup_cache[nvs_lookup_cache_pos(entry->id)] = fs->ate_wra;
170 	}
171 #endif
172 	fs->ate_wra -= nvs_al_size(fs, sizeof(struct nvs_ate));
173 
174 	return rc;
175 }
176 
177 /* data write */
nvs_flash_data_wrt(struct nvs_fs * fs,const void * data,size_t len)178 static int nvs_flash_data_wrt(struct nvs_fs *fs, const void *data, size_t len)
179 {
180 	int rc;
181 
182 	rc = nvs_flash_al_wrt(fs, fs->data_wra, data, len);
183 	fs->data_wra += nvs_al_size(fs, len);
184 
185 	return rc;
186 }
187 
188 /* flash ate read */
nvs_flash_ate_rd(struct nvs_fs * fs,uint32_t addr,struct nvs_ate * entry)189 static int nvs_flash_ate_rd(struct nvs_fs *fs, uint32_t addr,
190 			     struct nvs_ate *entry)
191 {
192 	return nvs_flash_rd(fs, addr, entry, sizeof(struct nvs_ate));
193 }
194 
195 /* end of basic flash routines */
196 
197 /* advanced flash routines */
198 
199 /* nvs_flash_block_cmp compares the data in flash at addr to data
200  * in blocks of size NVS_BLOCK_SIZE aligned to fs->write_block_size
201  * returns 0 if equal, 1 if not equal, errcode if error
202  */
nvs_flash_block_cmp(struct nvs_fs * fs,uint32_t addr,const void * data,size_t len)203 static int nvs_flash_block_cmp(struct nvs_fs *fs, uint32_t addr, const void *data,
204 				size_t len)
205 {
206 	const uint8_t *data8 = (const uint8_t *)data;
207 	int rc;
208 	size_t bytes_to_cmp, block_size;
209 	uint8_t buf[NVS_BLOCK_SIZE];
210 
211 	block_size =
212 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
213 
214 	while (len) {
215 		bytes_to_cmp = MIN(block_size, len);
216 		rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
217 		if (rc) {
218 			return rc;
219 		}
220 		rc = memcmp(data8, buf, bytes_to_cmp);
221 		if (rc) {
222 			return 1;
223 		}
224 		len -= bytes_to_cmp;
225 		addr += bytes_to_cmp;
226 		data8 += bytes_to_cmp;
227 	}
228 	return 0;
229 }
230 
231 /* nvs_flash_cmp_const compares the data in flash at addr to a constant
232  * value. returns 0 if all data in flash is equal to value, 1 if not equal,
233  * errcode if error
234  */
nvs_flash_cmp_const(struct nvs_fs * fs,uint32_t addr,uint8_t value,size_t len)235 static int nvs_flash_cmp_const(struct nvs_fs *fs, uint32_t addr, uint8_t value,
236 				size_t len)
237 {
238 	int rc;
239 	size_t bytes_to_cmp, block_size;
240 	uint8_t cmp[NVS_BLOCK_SIZE];
241 
242 	block_size =
243 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
244 
245 	(void)memset(cmp, value, block_size);
246 	while (len) {
247 		bytes_to_cmp = MIN(block_size, len);
248 		rc = nvs_flash_block_cmp(fs, addr, cmp, bytes_to_cmp);
249 		if (rc) {
250 			return rc;
251 		}
252 		len -= bytes_to_cmp;
253 		addr += bytes_to_cmp;
254 	}
255 	return 0;
256 }
257 
258 /* flash block move: move a block at addr to the current data write location
259  * and updates the data write location.
260  */
nvs_flash_block_move(struct nvs_fs * fs,uint32_t addr,size_t len)261 static int nvs_flash_block_move(struct nvs_fs *fs, uint32_t addr, size_t len)
262 {
263 	int rc;
264 	size_t bytes_to_copy, block_size;
265 	uint8_t buf[NVS_BLOCK_SIZE];
266 
267 	block_size =
268 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
269 
270 	while (len) {
271 		bytes_to_copy = MIN(block_size, len);
272 		rc = nvs_flash_rd(fs, addr, buf, bytes_to_copy);
273 		if (rc) {
274 			return rc;
275 		}
276 		rc = nvs_flash_data_wrt(fs, buf, bytes_to_copy);
277 		if (rc) {
278 			return rc;
279 		}
280 		len -= bytes_to_copy;
281 		addr += bytes_to_copy;
282 	}
283 	return 0;
284 }
285 
286 /* erase a sector and verify erase was OK.
287  * return 0 if OK, errorcode on error.
288  */
nvs_flash_erase_sector(struct nvs_fs * fs,uint32_t addr)289 static int nvs_flash_erase_sector(struct nvs_fs *fs, uint32_t addr)
290 {
291 	int rc;
292 	off_t offset;
293 
294 	addr &= ADDR_SECT_MASK;
295 
296 	offset = fs->offset;
297 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
298 
299 	LOG_DBG("Erasing flash at %lx, len %d", (long int) offset,
300 		fs->sector_size);
301 
302 #ifdef CONFIG_NVS_LOOKUP_CACHE
303 	nvs_lookup_cache_invalidate(fs, addr >> ADDR_SECT_SHIFT);
304 #endif
305 	rc = flash_erase(fs->flash_device, offset, fs->sector_size);
306 
307 	if (rc) {
308 		return rc;
309 	}
310 
311 	if (nvs_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value,
312 			fs->sector_size)) {
313 		rc = -ENXIO;
314 	}
315 
316 	return rc;
317 }
318 
319 /* crc update on allocation entry */
nvs_ate_crc8_update(struct nvs_ate * entry)320 static void nvs_ate_crc8_update(struct nvs_ate *entry)
321 {
322 	uint8_t crc8;
323 
324 	crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
325 	entry->crc8 = crc8;
326 }
327 
328 /* crc check on allocation entry
329  * returns 0 if OK, 1 on crc fail
330  */
nvs_ate_crc8_check(const struct nvs_ate * entry)331 static int nvs_ate_crc8_check(const struct nvs_ate *entry)
332 {
333 	uint8_t crc8;
334 
335 	crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
336 	if (crc8 == entry->crc8) {
337 		return 0;
338 	}
339 	return 1;
340 }
341 
342 /* nvs_ate_cmp_const compares an ATE to a constant value. returns 0 if
343  * the whole ATE is equal to value, 1 if not equal.
344  */
345 
nvs_ate_cmp_const(const struct nvs_ate * entry,uint8_t value)346 static int nvs_ate_cmp_const(const struct nvs_ate *entry, uint8_t value)
347 {
348 	const uint8_t *data8 = (const uint8_t *)entry;
349 	int i;
350 
351 	for (i = 0; i < sizeof(struct nvs_ate); i++) {
352 		if (data8[i] != value) {
353 			return 1;
354 		}
355 	}
356 
357 	return 0;
358 }
359 
360 /* nvs_ate_valid validates an ate:
361  *     return 1 if crc8 and offset valid,
362  *            0 otherwise
363  */
nvs_ate_valid(struct nvs_fs * fs,const struct nvs_ate * entry)364 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
365 {
366 	size_t ate_size;
367 
368 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
369 
370 	if ((nvs_ate_crc8_check(entry)) ||
371 	    (entry->offset >= (fs->sector_size - ate_size))) {
372 		return 0;
373 	}
374 
375 	return 1;
376 }
377 
378 /* nvs_close_ate_valid validates an sector close ate: a valid sector close ate:
379  * - valid ate
380  * - len = 0 and id = 0xFFFF
381  * - offset points to location at ate multiple from sector size
382  * return 1 if valid, 0 otherwise
383  */
nvs_close_ate_valid(struct nvs_fs * fs,const struct nvs_ate * entry)384 static int nvs_close_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
385 {
386 	size_t ate_size;
387 
388 	if ((!nvs_ate_valid(fs, entry)) || (entry->len != 0U) ||
389 	    (entry->id != 0xFFFF)) {
390 		return 0;
391 	}
392 
393 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
394 	if ((fs->sector_size - entry->offset) % ate_size) {
395 		return 0;
396 	}
397 
398 	return 1;
399 }
400 
401 /* store an entry in flash */
nvs_flash_wrt_entry(struct nvs_fs * fs,uint16_t id,const void * data,size_t len)402 static int nvs_flash_wrt_entry(struct nvs_fs *fs, uint16_t id, const void *data,
403 				size_t len)
404 {
405 	int rc;
406 	struct nvs_ate entry;
407 
408 	entry.id = id;
409 	entry.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
410 	entry.len = (uint16_t)len;
411 	entry.part = 0xff;
412 
413 	nvs_ate_crc8_update(&entry);
414 
415 	rc = nvs_flash_data_wrt(fs, data, len);
416 	if (rc) {
417 		return rc;
418 	}
419 	rc = nvs_flash_ate_wrt(fs, &entry);
420 	if (rc) {
421 		return rc;
422 	}
423 
424 	return 0;
425 }
426 /* end of flash routines */
427 
428 /* If the closing ate is invalid, its offset cannot be trusted and
429  * the last valid ate of the sector should instead try to be recovered by going
430  * through all ate's.
431  *
432  * addr should point to the faulty closing ate and will be updated to the last
433  * valid ate. If no valid ate is found it will be left untouched.
434  */
nvs_recover_last_ate(struct nvs_fs * fs,uint32_t * addr)435 static int nvs_recover_last_ate(struct nvs_fs *fs, uint32_t *addr)
436 {
437 	uint32_t data_end_addr, ate_end_addr;
438 	struct nvs_ate end_ate;
439 	size_t ate_size;
440 	int rc;
441 
442 	LOG_DBG("Recovering last ate from sector %d",
443 		(*addr >> ADDR_SECT_SHIFT));
444 
445 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
446 
447 	*addr -= ate_size;
448 	ate_end_addr = *addr;
449 	data_end_addr = *addr & ADDR_SECT_MASK;
450 	while (ate_end_addr > data_end_addr) {
451 		rc = nvs_flash_ate_rd(fs, ate_end_addr, &end_ate);
452 		if (rc) {
453 			return rc;
454 		}
455 		if (nvs_ate_valid(fs, &end_ate)) {
456 			/* found a valid ate, update data_end_addr and *addr */
457 			data_end_addr &= ADDR_SECT_MASK;
458 			data_end_addr += end_ate.offset + end_ate.len;
459 			*addr = ate_end_addr;
460 		}
461 		ate_end_addr -= ate_size;
462 	}
463 
464 	return 0;
465 }
466 
467 /* walking through allocation entry list, from newest to oldest entries
468  * read ate from addr, modify addr to the previous ate
469  */
nvs_prev_ate(struct nvs_fs * fs,uint32_t * addr,struct nvs_ate * ate)470 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate)
471 {
472 	int rc;
473 	struct nvs_ate close_ate;
474 	size_t ate_size;
475 
476 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
477 
478 	rc = nvs_flash_ate_rd(fs, *addr, ate);
479 	if (rc) {
480 		return rc;
481 	}
482 
483 	*addr += ate_size;
484 	if (((*addr) & ADDR_OFFS_MASK) != (fs->sector_size - ate_size)) {
485 		return 0;
486 	}
487 
488 	/* last ate in sector, do jump to previous sector */
489 	if (((*addr) >> ADDR_SECT_SHIFT) == 0U) {
490 		*addr += ((fs->sector_count - 1) << ADDR_SECT_SHIFT);
491 	} else {
492 		*addr -= (1 << ADDR_SECT_SHIFT);
493 	}
494 
495 	rc = nvs_flash_ate_rd(fs, *addr, &close_ate);
496 	if (rc) {
497 		return rc;
498 	}
499 
500 	rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
501 	/* at the end of filesystem */
502 	if (!rc) {
503 		*addr = fs->ate_wra;
504 		return 0;
505 	}
506 
507 	/* Update the address if the close ate is valid.
508 	 */
509 	if (nvs_close_ate_valid(fs, &close_ate)) {
510 		(*addr) &= ADDR_SECT_MASK;
511 		(*addr) += close_ate.offset;
512 		return 0;
513 	}
514 
515 	/* The close_ate was invalid, `lets find out the last valid ate
516 	 * and point the address to this found ate.
517 	 *
518 	 * remark: if there was absolutely no valid data in the sector *addr
519 	 * is kept at sector_end - 2*ate_size, the next read will contain
520 	 * invalid data and continue with a sector jump
521 	 */
522 	return nvs_recover_last_ate(fs, addr);
523 }
524 
nvs_sector_advance(struct nvs_fs * fs,uint32_t * addr)525 static void nvs_sector_advance(struct nvs_fs *fs, uint32_t *addr)
526 {
527 	*addr += (1 << ADDR_SECT_SHIFT);
528 	if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) {
529 		*addr -= (fs->sector_count << ADDR_SECT_SHIFT);
530 	}
531 }
532 
533 /* allocation entry close (this closes the current sector) by writing offset
534  * of last ate to the sector end.
535  */
nvs_sector_close(struct nvs_fs * fs)536 static int nvs_sector_close(struct nvs_fs *fs)
537 {
538 	struct nvs_ate close_ate;
539 	size_t ate_size;
540 
541 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
542 
543 	close_ate.id = 0xFFFF;
544 	close_ate.len = 0U;
545 	close_ate.offset = (uint16_t)((fs->ate_wra + ate_size) & ADDR_OFFS_MASK);
546 	close_ate.part = 0xff;
547 
548 	fs->ate_wra &= ADDR_SECT_MASK;
549 	fs->ate_wra += (fs->sector_size - ate_size);
550 
551 	nvs_ate_crc8_update(&close_ate);
552 
553 	(void)nvs_flash_ate_wrt(fs, &close_ate);
554 
555 	nvs_sector_advance(fs, &fs->ate_wra);
556 
557 	fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
558 
559 	return 0;
560 }
561 
nvs_add_gc_done_ate(struct nvs_fs * fs)562 static int nvs_add_gc_done_ate(struct nvs_fs *fs)
563 {
564 	struct nvs_ate gc_done_ate;
565 
566 	LOG_DBG("Adding gc done ate at %x", fs->ate_wra & ADDR_OFFS_MASK);
567 	gc_done_ate.id = 0xffff;
568 	gc_done_ate.len = 0U;
569 	gc_done_ate.part = 0xff;
570 	gc_done_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
571 	nvs_ate_crc8_update(&gc_done_ate);
572 
573 	return nvs_flash_ate_wrt(fs, &gc_done_ate);
574 }
575 
576 /* garbage collection: the address ate_wra has been updated to the new sector
577  * that has just been started. The data to gc is in the sector after this new
578  * sector.
579  */
nvs_gc(struct nvs_fs * fs)580 static int nvs_gc(struct nvs_fs *fs)
581 {
582 	int rc;
583 	struct nvs_ate close_ate, gc_ate, wlk_ate;
584 	uint32_t sec_addr, gc_addr, gc_prev_addr, wlk_addr, wlk_prev_addr,
585 	      data_addr, stop_addr;
586 	size_t ate_size;
587 
588 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
589 
590 	sec_addr = (fs->ate_wra & ADDR_SECT_MASK);
591 	nvs_sector_advance(fs, &sec_addr);
592 	gc_addr = sec_addr + fs->sector_size - ate_size;
593 
594 	/* if the sector is not closed don't do gc */
595 	rc = nvs_flash_ate_rd(fs, gc_addr, &close_ate);
596 	if (rc < 0) {
597 		/* flash error */
598 		return rc;
599 	}
600 
601 	rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
602 	if (!rc) {
603 		goto gc_done;
604 	}
605 
606 	stop_addr = gc_addr - ate_size;
607 
608 	if (nvs_close_ate_valid(fs, &close_ate)) {
609 		gc_addr &= ADDR_SECT_MASK;
610 		gc_addr += close_ate.offset;
611 	} else {
612 		rc = nvs_recover_last_ate(fs, &gc_addr);
613 		if (rc) {
614 			return rc;
615 		}
616 	}
617 
618 	do {
619 		gc_prev_addr = gc_addr;
620 		rc = nvs_prev_ate(fs, &gc_addr, &gc_ate);
621 		if (rc) {
622 			return rc;
623 		}
624 
625 		if (!nvs_ate_valid(fs, &gc_ate)) {
626 			continue;
627 		}
628 
629 #ifdef CONFIG_NVS_LOOKUP_CACHE
630 		wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(gc_ate.id)];
631 
632 		if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
633 			wlk_addr = fs->ate_wra;
634 		}
635 #else
636 		wlk_addr = fs->ate_wra;
637 #endif
638 		do {
639 			wlk_prev_addr = wlk_addr;
640 			rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
641 			if (rc) {
642 				return rc;
643 			}
644 			/* if ate with same id is reached we might need to copy.
645 			 * only consider valid wlk_ate's. Something wrong might
646 			 * have been written that has the same ate but is
647 			 * invalid, don't consider these as a match.
648 			 */
649 			if ((wlk_ate.id == gc_ate.id) &&
650 			    (nvs_ate_valid(fs, &wlk_ate))) {
651 				break;
652 			}
653 		} while (wlk_addr != fs->ate_wra);
654 
655 		/* if walk has reached the same address as gc_addr copy is
656 		 * needed unless it is a deleted item.
657 		 */
658 		if ((wlk_prev_addr == gc_prev_addr) && gc_ate.len) {
659 			/* copy needed */
660 			LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len);
661 
662 			data_addr = (gc_prev_addr & ADDR_SECT_MASK);
663 			data_addr += gc_ate.offset;
664 
665 			gc_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
666 			nvs_ate_crc8_update(&gc_ate);
667 
668 			rc = nvs_flash_block_move(fs, data_addr, gc_ate.len);
669 			if (rc) {
670 				return rc;
671 			}
672 
673 			rc = nvs_flash_ate_wrt(fs, &gc_ate);
674 			if (rc) {
675 				return rc;
676 			}
677 		}
678 	} while (gc_prev_addr != stop_addr);
679 
680 gc_done:
681 
682 	/* Make it possible to detect that gc has finished by writing a
683 	 * gc done ate to the sector. In the field we might have nvs systems
684 	 * that do not have sufficient space to add this ate, so for these
685 	 * situations avoid adding the gc done ate.
686 	 */
687 
688 	if (fs->ate_wra >= (fs->data_wra + ate_size)) {
689 		rc = nvs_add_gc_done_ate(fs);
690 		if (rc) {
691 			return rc;
692 		}
693 	}
694 
695 	/* Erase the gc'ed sector */
696 	rc = nvs_flash_erase_sector(fs, sec_addr);
697 	if (rc) {
698 		return rc;
699 	}
700 	return 0;
701 }
702 
nvs_startup(struct nvs_fs * fs)703 static int nvs_startup(struct nvs_fs *fs)
704 {
705 	int rc;
706 	struct nvs_ate last_ate;
707 	size_t ate_size, empty_len;
708 	/* Initialize addr to 0 for the case fs->sector_count == 0. This
709 	 * should never happen as this is verified in nvs_mount() but both
710 	 * Coverity and GCC believe the contrary.
711 	 */
712 	uint32_t addr = 0U;
713 	uint16_t i, closed_sectors = 0;
714 	uint8_t erase_value = fs->flash_parameters->erase_value;
715 
716 	k_mutex_lock(&fs->nvs_lock, K_FOREVER);
717 
718 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
719 	/* step through the sectors to find a open sector following
720 	 * a closed sector, this is where NVS can write.
721 	 */
722 	for (i = 0; i < fs->sector_count; i++) {
723 		addr = (i << ADDR_SECT_SHIFT) +
724 		       (uint16_t)(fs->sector_size - ate_size);
725 		rc = nvs_flash_cmp_const(fs, addr, erase_value,
726 					 sizeof(struct nvs_ate));
727 		if (rc) {
728 			/* closed sector */
729 			closed_sectors++;
730 			nvs_sector_advance(fs, &addr);
731 			rc = nvs_flash_cmp_const(fs, addr, erase_value,
732 						 sizeof(struct nvs_ate));
733 			if (!rc) {
734 				/* open sector */
735 				break;
736 			}
737 		}
738 	}
739 	/* all sectors are closed, this is not a nvs fs */
740 	if (closed_sectors == fs->sector_count) {
741 		rc = -EDEADLK;
742 		goto end;
743 	}
744 
745 	if (i == fs->sector_count) {
746 		/* none of the sectors where closed, in most cases we can set
747 		 * the address to the first sector, except when there are only
748 		 * two sectors. Then we can only set it to the first sector if
749 		 * the last sector contains no ate's. So we check this first
750 		 */
751 		rc = nvs_flash_cmp_const(fs, addr - ate_size, erase_value,
752 				sizeof(struct nvs_ate));
753 		if (!rc) {
754 			/* empty ate */
755 			nvs_sector_advance(fs, &addr);
756 		}
757 	}
758 
759 	/* addr contains address of closing ate in the most recent sector,
760 	 * search for the last valid ate using the recover_last_ate routine
761 	 */
762 
763 	rc = nvs_recover_last_ate(fs, &addr);
764 	if (rc) {
765 		goto end;
766 	}
767 
768 	/* addr contains address of the last valid ate in the most recent sector
769 	 * search for the first ate containing all cells erased, in the process
770 	 * also update fs->data_wra.
771 	 */
772 	fs->ate_wra = addr;
773 	fs->data_wra = addr & ADDR_SECT_MASK;
774 
775 	while (fs->ate_wra >= fs->data_wra) {
776 		rc = nvs_flash_ate_rd(fs, fs->ate_wra, &last_ate);
777 		if (rc) {
778 			goto end;
779 		}
780 
781 		rc = nvs_ate_cmp_const(&last_ate, erase_value);
782 
783 		if (!rc) {
784 			/* found ff empty location */
785 			break;
786 		}
787 
788 		if (nvs_ate_valid(fs, &last_ate)) {
789 			/* complete write of ate was performed */
790 			fs->data_wra = addr & ADDR_SECT_MASK;
791 			/* Align the data write address to the current
792 			 * write block size so that it is possible to write to
793 			 * the sector even if the block size has changed after
794 			 * a software upgrade (unless the physical ATE size
795 			 * will change)."
796 			 */
797 			fs->data_wra += nvs_al_size(fs, last_ate.offset + last_ate.len);
798 
799 			/* ate on the last position within the sector is
800 			 * reserved for deletion an entry
801 			 */
802 			if (fs->ate_wra == fs->data_wra && last_ate.len) {
803 				/* not a delete ate */
804 				rc = -ESPIPE;
805 				goto end;
806 			}
807 		}
808 
809 		fs->ate_wra -= ate_size;
810 	}
811 
812 	/* if the sector after the write sector is not empty gc was interrupted
813 	 * we might need to restart gc if it has not yet finished. Otherwise
814 	 * just erase the sector.
815 	 * When gc needs to be restarted, first erase the sector otherwise the
816 	 * data might not fit into the sector.
817 	 */
818 	addr = fs->ate_wra & ADDR_SECT_MASK;
819 	nvs_sector_advance(fs, &addr);
820 	rc = nvs_flash_cmp_const(fs, addr, erase_value, fs->sector_size);
821 	if (rc < 0) {
822 		goto end;
823 	}
824 	if (rc) {
825 		/* the sector after fs->ate_wrt is not empty, look for a marker
826 		 * (gc_done_ate) that indicates that gc was finished.
827 		 */
828 		bool gc_done_marker = false;
829 		struct nvs_ate gc_done_ate;
830 
831 		addr = fs->ate_wra + ate_size;
832 		while ((addr & ADDR_OFFS_MASK) < (fs->sector_size - ate_size)) {
833 			rc = nvs_flash_ate_rd(fs, addr, &gc_done_ate);
834 			if (rc) {
835 				goto end;
836 			}
837 			if (nvs_ate_valid(fs, &gc_done_ate) &&
838 			    (gc_done_ate.id == 0xffff) &&
839 			    (gc_done_ate.len == 0U)) {
840 				gc_done_marker = true;
841 				break;
842 			}
843 			addr += ate_size;
844 		}
845 
846 		if (gc_done_marker) {
847 			/* erase the next sector */
848 			LOG_INF("GC Done marker found");
849 			addr = fs->ate_wra & ADDR_SECT_MASK;
850 			nvs_sector_advance(fs, &addr);
851 			rc = nvs_flash_erase_sector(fs, addr);
852 			goto end;
853 		}
854 		LOG_INF("No GC Done marker found: restarting gc");
855 		rc = nvs_flash_erase_sector(fs, fs->ate_wra);
856 		if (rc) {
857 			goto end;
858 		}
859 		fs->ate_wra &= ADDR_SECT_MASK;
860 		fs->ate_wra += (fs->sector_size - 2 * ate_size);
861 		fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK);
862 #ifdef CONFIG_NVS_LOOKUP_CACHE
863 		/**
864 		 * At this point, the lookup cache wasn't built but the gc function need to use it.
865 		 * So, temporarily, we set the lookup cache to the end of the fs.
866 		 * The cache will be rebuilt afterwards
867 		 **/
868 		for (i = 0; i < CONFIG_NVS_LOOKUP_CACHE_SIZE; i++) {
869 			fs->lookup_cache[i] = fs->ate_wra;
870 		}
871 #endif
872 		rc = nvs_gc(fs);
873 		goto end;
874 	}
875 
876 	/* possible data write after last ate write, update data_wra */
877 	while (fs->ate_wra > fs->data_wra) {
878 		empty_len = fs->ate_wra - fs->data_wra;
879 
880 		rc = nvs_flash_cmp_const(fs, fs->data_wra, erase_value,
881 				empty_len);
882 		if (rc < 0) {
883 			goto end;
884 		}
885 		if (!rc) {
886 			break;
887 		}
888 
889 		fs->data_wra += fs->flash_parameters->write_block_size;
890 	}
891 
892 	/* If the ate_wra is pointing to the first ate write location in a
893 	 * sector and data_wra is not 0, erase the sector as it contains no
894 	 * valid data (this also avoids closing a sector without any data).
895 	 */
896 	if (((fs->ate_wra + 2 * ate_size) == fs->sector_size) &&
897 	    (fs->data_wra != (fs->ate_wra & ADDR_SECT_MASK))) {
898 		rc = nvs_flash_erase_sector(fs, fs->ate_wra);
899 		if (rc) {
900 			goto end;
901 		}
902 		fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
903 	}
904 
905 end:
906 
907 #ifdef CONFIG_NVS_LOOKUP_CACHE
908 	if (!rc) {
909 		rc = nvs_lookup_cache_rebuild(fs);
910 	}
911 #endif
912 	/* If the sector is empty add a gc done ate to avoid having insufficient
913 	 * space when doing gc.
914 	 */
915 	if ((!rc) && ((fs->ate_wra & ADDR_OFFS_MASK) ==
916 		      (fs->sector_size - 2 * ate_size))) {
917 
918 		rc = nvs_add_gc_done_ate(fs);
919 	}
920 	k_mutex_unlock(&fs->nvs_lock);
921 	return rc;
922 }
923 
nvs_clear(struct nvs_fs * fs)924 int nvs_clear(struct nvs_fs *fs)
925 {
926 	int rc;
927 	uint32_t addr;
928 
929 	if (!fs->ready) {
930 		LOG_ERR("NVS not initialized");
931 		return -EACCES;
932 	}
933 
934 	for (uint16_t i = 0; i < fs->sector_count; i++) {
935 		addr = i << ADDR_SECT_SHIFT;
936 		rc = nvs_flash_erase_sector(fs, addr);
937 		if (rc) {
938 			return rc;
939 		}
940 	}
941 
942 	/* nvs needs to be reinitialized after clearing */
943 	fs->ready = false;
944 
945 	return 0;
946 }
947 
nvs_mount(struct nvs_fs * fs)948 int nvs_mount(struct nvs_fs *fs)
949 {
950 
951 	int rc;
952 	struct flash_pages_info info;
953 	size_t write_block_size;
954 
955 	k_mutex_init(&fs->nvs_lock);
956 
957 	fs->flash_parameters = flash_get_parameters(fs->flash_device);
958 	if (fs->flash_parameters == NULL) {
959 		LOG_ERR("Could not obtain flash parameters");
960 		return -EINVAL;
961 	}
962 
963 	write_block_size = flash_get_write_block_size(fs->flash_device);
964 
965 	/* check that the write block size is supported */
966 	if (write_block_size > NVS_BLOCK_SIZE || write_block_size == 0) {
967 		LOG_ERR("Unsupported write block size");
968 		return -EINVAL;
969 	}
970 
971 	/* check that sector size is a multiple of pagesize */
972 	rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info);
973 	if (rc) {
974 		LOG_ERR("Unable to get page info");
975 		return -EINVAL;
976 	}
977 	if (!fs->sector_size || fs->sector_size % info.size) {
978 		LOG_ERR("Invalid sector size");
979 		return -EINVAL;
980 	}
981 
982 	/* check the number of sectors, it should be at least 2 */
983 	if (fs->sector_count < 2) {
984 		LOG_ERR("Configuration error - sector count");
985 		return -EINVAL;
986 	}
987 
988 	rc = nvs_startup(fs);
989 	if (rc) {
990 		return rc;
991 	}
992 
993 	/* nvs is ready for use */
994 	fs->ready = true;
995 
996 	LOG_INF("%d Sectors of %d bytes", fs->sector_count, fs->sector_size);
997 	LOG_INF("alloc wra: %d, %x",
998 		(fs->ate_wra >> ADDR_SECT_SHIFT),
999 		(fs->ate_wra & ADDR_OFFS_MASK));
1000 	LOG_INF("data wra: %d, %x",
1001 		(fs->data_wra >> ADDR_SECT_SHIFT),
1002 		(fs->data_wra & ADDR_OFFS_MASK));
1003 
1004 	return 0;
1005 }
1006 
nvs_write(struct nvs_fs * fs,uint16_t id,const void * data,size_t len)1007 ssize_t nvs_write(struct nvs_fs *fs, uint16_t id, const void *data, size_t len)
1008 {
1009 	int rc, gc_count;
1010 	size_t ate_size, data_size;
1011 	struct nvs_ate wlk_ate;
1012 	uint32_t wlk_addr, rd_addr;
1013 	uint16_t required_space = 0U; /* no space, appropriate for delete ate */
1014 	bool prev_found = false;
1015 
1016 	if (!fs->ready) {
1017 		LOG_ERR("NVS not initialized");
1018 		return -EACCES;
1019 	}
1020 
1021 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1022 	data_size = nvs_al_size(fs, len);
1023 
1024 	/* The maximum data size is sector size - 4 ate
1025 	 * where: 1 ate for data, 1 ate for sector close, 1 ate for gc done,
1026 	 * and 1 ate to always allow a delete.
1027 	 */
1028 	if ((len > (fs->sector_size - 4 * ate_size)) ||
1029 	    ((len > 0) && (data == NULL))) {
1030 		return -EINVAL;
1031 	}
1032 
1033 	/* find latest entry with same id */
1034 #ifdef CONFIG_NVS_LOOKUP_CACHE
1035 	wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)];
1036 
1037 	if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
1038 		goto no_cached_entry;
1039 	}
1040 #else
1041 	wlk_addr = fs->ate_wra;
1042 #endif
1043 	rd_addr = wlk_addr;
1044 
1045 	while (1) {
1046 		rd_addr = wlk_addr;
1047 		rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1048 		if (rc) {
1049 			return rc;
1050 		}
1051 		if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) {
1052 			prev_found = true;
1053 			break;
1054 		}
1055 		if (wlk_addr == fs->ate_wra) {
1056 			break;
1057 		}
1058 	}
1059 
1060 #ifdef CONFIG_NVS_LOOKUP_CACHE
1061 no_cached_entry:
1062 #endif
1063 
1064 	if (prev_found) {
1065 		/* previous entry found */
1066 		rd_addr &= ADDR_SECT_MASK;
1067 		rd_addr += wlk_ate.offset;
1068 
1069 		if (len == 0) {
1070 			/* do not try to compare with empty data */
1071 			if (wlk_ate.len == 0U) {
1072 				/* skip delete entry as it is already the
1073 				 * last one
1074 				 */
1075 				return 0;
1076 			}
1077 		} else if (len == wlk_ate.len) {
1078 			/* do not try to compare if lengths are not equal */
1079 			/* compare the data and if equal return 0 */
1080 			rc = nvs_flash_block_cmp(fs, rd_addr, data, len);
1081 			if (rc <= 0) {
1082 				return rc;
1083 			}
1084 		}
1085 	} else {
1086 		/* skip delete entry for non-existing entry */
1087 		if (len == 0) {
1088 			return 0;
1089 		}
1090 	}
1091 
1092 	/* calculate required space if the entry contains data */
1093 	if (data_size) {
1094 		/* Leave space for delete ate */
1095 		required_space = data_size + ate_size;
1096 	}
1097 
1098 	k_mutex_lock(&fs->nvs_lock, K_FOREVER);
1099 
1100 	gc_count = 0;
1101 	while (1) {
1102 		if (gc_count == fs->sector_count) {
1103 			/* gc'ed all sectors, no extra space will be created
1104 			 * by extra gc.
1105 			 */
1106 			rc = -ENOSPC;
1107 			goto end;
1108 		}
1109 
1110 		if (fs->ate_wra >= (fs->data_wra + required_space)) {
1111 
1112 			rc = nvs_flash_wrt_entry(fs, id, data, len);
1113 			if (rc) {
1114 				goto end;
1115 			}
1116 			break;
1117 		}
1118 
1119 
1120 		rc = nvs_sector_close(fs);
1121 		if (rc) {
1122 			goto end;
1123 		}
1124 
1125 		rc = nvs_gc(fs);
1126 		if (rc) {
1127 			goto end;
1128 		}
1129 		gc_count++;
1130 	}
1131 	rc = len;
1132 end:
1133 	k_mutex_unlock(&fs->nvs_lock);
1134 	return rc;
1135 }
1136 
nvs_delete(struct nvs_fs * fs,uint16_t id)1137 int nvs_delete(struct nvs_fs *fs, uint16_t id)
1138 {
1139 	return nvs_write(fs, id, NULL, 0);
1140 }
1141 
nvs_read_hist(struct nvs_fs * fs,uint16_t id,void * data,size_t len,uint16_t cnt)1142 ssize_t nvs_read_hist(struct nvs_fs *fs, uint16_t id, void *data, size_t len,
1143 		      uint16_t cnt)
1144 {
1145 	int rc;
1146 	uint32_t wlk_addr, rd_addr;
1147 	uint16_t cnt_his;
1148 	struct nvs_ate wlk_ate;
1149 	size_t ate_size;
1150 
1151 	if (!fs->ready) {
1152 		LOG_ERR("NVS not initialized");
1153 		return -EACCES;
1154 	}
1155 
1156 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1157 
1158 	if (len > (fs->sector_size - 2 * ate_size)) {
1159 		return -EINVAL;
1160 	}
1161 
1162 	cnt_his = 0U;
1163 
1164 #ifdef CONFIG_NVS_LOOKUP_CACHE
1165 	wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)];
1166 
1167 	if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
1168 		rc = -ENOENT;
1169 		goto err;
1170 	}
1171 #else
1172 	wlk_addr = fs->ate_wra;
1173 #endif
1174 	rd_addr = wlk_addr;
1175 
1176 	while (cnt_his <= cnt) {
1177 		rd_addr = wlk_addr;
1178 		rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1179 		if (rc) {
1180 			goto err;
1181 		}
1182 		if ((wlk_ate.id == id) &&  (nvs_ate_valid(fs, &wlk_ate))) {
1183 			cnt_his++;
1184 		}
1185 		if (wlk_addr == fs->ate_wra) {
1186 			break;
1187 		}
1188 	}
1189 
1190 	if (((wlk_addr == fs->ate_wra) && (wlk_ate.id != id)) ||
1191 	    (wlk_ate.len == 0U) || (cnt_his < cnt)) {
1192 		return -ENOENT;
1193 	}
1194 
1195 	rd_addr &= ADDR_SECT_MASK;
1196 	rd_addr += wlk_ate.offset;
1197 	rc = nvs_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len));
1198 	if (rc) {
1199 		goto err;
1200 	}
1201 
1202 	return wlk_ate.len;
1203 
1204 err:
1205 	return rc;
1206 }
1207 
nvs_read(struct nvs_fs * fs,uint16_t id,void * data,size_t len)1208 ssize_t nvs_read(struct nvs_fs *fs, uint16_t id, void *data, size_t len)
1209 {
1210 	int rc;
1211 
1212 	rc = nvs_read_hist(fs, id, data, len, 0);
1213 	return rc;
1214 }
1215 
nvs_calc_free_space(struct nvs_fs * fs)1216 ssize_t nvs_calc_free_space(struct nvs_fs *fs)
1217 {
1218 
1219 	int rc;
1220 	struct nvs_ate step_ate, wlk_ate;
1221 	uint32_t step_addr, wlk_addr;
1222 	size_t ate_size, free_space;
1223 
1224 	if (!fs->ready) {
1225 		LOG_ERR("NVS not initialized");
1226 		return -EACCES;
1227 	}
1228 
1229 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1230 
1231 	free_space = 0;
1232 	for (uint16_t i = 1; i < fs->sector_count; i++) {
1233 		free_space += (fs->sector_size - ate_size);
1234 	}
1235 
1236 	step_addr = fs->ate_wra;
1237 
1238 	while (1) {
1239 		rc = nvs_prev_ate(fs, &step_addr, &step_ate);
1240 		if (rc) {
1241 			return rc;
1242 		}
1243 
1244 		wlk_addr = fs->ate_wra;
1245 
1246 		while (1) {
1247 			rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1248 			if (rc) {
1249 				return rc;
1250 			}
1251 			if ((wlk_ate.id == step_ate.id) ||
1252 			    (wlk_addr == fs->ate_wra)) {
1253 				break;
1254 			}
1255 		}
1256 
1257 		if ((wlk_addr == step_addr) && step_ate.len &&
1258 		    (nvs_ate_valid(fs, &step_ate))) {
1259 			/* count needed */
1260 			free_space -= nvs_al_size(fs, step_ate.len);
1261 			free_space -= ate_size;
1262 		}
1263 
1264 		if (step_addr == fs->ate_wra) {
1265 			break;
1266 		}
1267 	}
1268 	return free_space;
1269 }
1270