1 /*  NVS: non volatile storage in flash
2  *
3  * Copyright (c) 2018 Laczen
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/drivers/flash.h>
9 #include <string.h>
10 #include <errno.h>
11 #include <inttypes.h>
12 #include <zephyr/fs/nvs.h>
13 #include <zephyr/sys/crc.h>
14 #include "nvs_priv.h"
15 
16 #include <zephyr/logging/log.h>
17 LOG_MODULE_REGISTER(fs_nvs, CONFIG_NVS_LOG_LEVEL);
18 
19 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate);
20 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry);
21 
22 #ifdef CONFIG_NVS_LOOKUP_CACHE
23 
nvs_lookup_cache_pos(uint16_t id)24 static inline size_t nvs_lookup_cache_pos(uint16_t id)
25 {
26 	uint16_t hash;
27 
28 	/* 16-bit integer hash function found by https://github.com/skeeto/hash-prospector. */
29 	hash = id;
30 	hash ^= hash >> 8;
31 	hash *= 0x88b5U;
32 	hash ^= hash >> 7;
33 	hash *= 0xdb2dU;
34 	hash ^= hash >> 9;
35 
36 	return hash % CONFIG_NVS_LOOKUP_CACHE_SIZE;
37 }
38 
nvs_lookup_cache_rebuild(struct nvs_fs * fs)39 static int nvs_lookup_cache_rebuild(struct nvs_fs *fs)
40 {
41 	int rc;
42 	uint32_t addr, ate_addr;
43 	uint32_t *cache_entry;
44 	struct nvs_ate ate;
45 
46 	memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache));
47 	addr = fs->ate_wra;
48 
49 	while (true) {
50 		/* Make a copy of 'addr' as it will be advanced by nvs_pref_ate() */
51 		ate_addr = addr;
52 		rc = nvs_prev_ate(fs, &addr, &ate);
53 
54 		if (rc) {
55 			return rc;
56 		}
57 
58 		cache_entry = &fs->lookup_cache[nvs_lookup_cache_pos(ate.id)];
59 
60 		if (ate.id != 0xFFFF && *cache_entry == NVS_LOOKUP_CACHE_NO_ADDR &&
61 		    nvs_ate_valid(fs, &ate)) {
62 			*cache_entry = ate_addr;
63 		}
64 
65 		if (addr == fs->ate_wra) {
66 			break;
67 		}
68 	}
69 
70 	return 0;
71 }
72 
nvs_lookup_cache_invalidate(struct nvs_fs * fs,uint32_t sector)73 static void nvs_lookup_cache_invalidate(struct nvs_fs *fs, uint32_t sector)
74 {
75 	uint32_t *cache_entry = fs->lookup_cache;
76 	uint32_t *const cache_end = &fs->lookup_cache[CONFIG_NVS_LOOKUP_CACHE_SIZE];
77 
78 	for (; cache_entry < cache_end; ++cache_entry) {
79 		if ((*cache_entry >> ADDR_SECT_SHIFT) == sector) {
80 			*cache_entry = NVS_LOOKUP_CACHE_NO_ADDR;
81 		}
82 	}
83 }
84 
85 #endif /* CONFIG_NVS_LOOKUP_CACHE */
86 
87 /* basic routines */
88 /* nvs_al_size returns size aligned to fs->write_block_size */
nvs_al_size(struct nvs_fs * fs,size_t len)89 static inline size_t nvs_al_size(struct nvs_fs *fs, size_t len)
90 {
91 	size_t write_block_size = fs->flash_parameters->write_block_size;
92 
93 	if (write_block_size <= 1U) {
94 		return len;
95 	}
96 	return (len + (write_block_size - 1U)) & ~(write_block_size - 1U);
97 }
98 /* end basic routines */
99 
100 /* flash routines */
101 /* basic aligned flash write to nvs address */
nvs_flash_al_wrt(struct nvs_fs * fs,uint32_t addr,const void * data,size_t len)102 static int nvs_flash_al_wrt(struct nvs_fs *fs, uint32_t addr, const void *data,
103 			     size_t len)
104 {
105 	const uint8_t *data8 = (const uint8_t *)data;
106 	int rc = 0;
107 	off_t offset;
108 	size_t blen;
109 	uint8_t buf[NVS_BLOCK_SIZE];
110 
111 	if (!len) {
112 		/* Nothing to write, avoid changing the flash protection */
113 		return 0;
114 	}
115 
116 	offset = fs->offset;
117 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
118 	offset += addr & ADDR_OFFS_MASK;
119 
120 	blen = len & ~(fs->flash_parameters->write_block_size - 1U);
121 	if (blen > 0) {
122 		rc = flash_write(fs->flash_device, offset, data8, blen);
123 		if (rc) {
124 			/* flash write error */
125 			goto end;
126 		}
127 		len -= blen;
128 		offset += blen;
129 		data8 += blen;
130 	}
131 	if (len) {
132 		memcpy(buf, data8, len);
133 		(void)memset(buf + len, fs->flash_parameters->erase_value,
134 			fs->flash_parameters->write_block_size - len);
135 
136 		rc = flash_write(fs->flash_device, offset, buf,
137 				 fs->flash_parameters->write_block_size);
138 	}
139 
140 end:
141 	return rc;
142 }
143 
144 /* basic flash read from nvs address */
nvs_flash_rd(struct nvs_fs * fs,uint32_t addr,void * data,size_t len)145 static int nvs_flash_rd(struct nvs_fs *fs, uint32_t addr, void *data,
146 			 size_t len)
147 {
148 	int rc;
149 	off_t offset;
150 
151 	offset = fs->offset;
152 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
153 	offset += addr & ADDR_OFFS_MASK;
154 
155 	rc = flash_read(fs->flash_device, offset, data, len);
156 	return rc;
157 }
158 
159 /* allocation entry write */
nvs_flash_ate_wrt(struct nvs_fs * fs,const struct nvs_ate * entry)160 static int nvs_flash_ate_wrt(struct nvs_fs *fs, const struct nvs_ate *entry)
161 {
162 	int rc;
163 
164 	rc = nvs_flash_al_wrt(fs, fs->ate_wra, entry,
165 			       sizeof(struct nvs_ate));
166 #ifdef CONFIG_NVS_LOOKUP_CACHE
167 	/* 0xFFFF is a special-purpose identifier. Exclude it from the cache */
168 	if (entry->id != 0xFFFF) {
169 		fs->lookup_cache[nvs_lookup_cache_pos(entry->id)] = fs->ate_wra;
170 	}
171 #endif
172 	fs->ate_wra -= nvs_al_size(fs, sizeof(struct nvs_ate));
173 
174 	return rc;
175 }
176 
177 /* data write */
nvs_flash_data_wrt(struct nvs_fs * fs,const void * data,size_t len,bool compute_crc)178 static int nvs_flash_data_wrt(struct nvs_fs *fs, const void *data, size_t len, bool compute_crc)
179 {
180 	int rc;
181 
182 	/* Only add the CRC if required (ignore deletion requests, i.e. when len is 0) */
183 	if (IS_ENABLED(CONFIG_NVS_DATA_CRC) && compute_crc && (len > 0)) {
184 		size_t aligned_len, data_len = len;
185 		uint8_t *data8 = (uint8_t *)data, buf[NVS_BLOCK_SIZE + NVS_DATA_CRC_SIZE], *pbuf;
186 		uint32_t data_crc;
187 
188 		/* Write as much aligned data as possible, so the CRC can be concatenated at
189 		 * the end of the unaligned data later
190 		 */
191 		aligned_len = len & ~(fs->flash_parameters->write_block_size - 1U);
192 		rc = nvs_flash_al_wrt(fs, fs->data_wra, data8, aligned_len);
193 		fs->data_wra += aligned_len;
194 		if (rc) {
195 			return rc;
196 		}
197 		data8 += aligned_len;
198 		len -= aligned_len;
199 
200 		/* Create a buffer with the unaligned data if any */
201 		pbuf = buf;
202 		if (len) {
203 			memcpy(pbuf, data8, len);
204 			pbuf += len;
205 		}
206 
207 		/* Append the CRC */
208 		data_crc = crc32_ieee(data, data_len);
209 		memcpy(pbuf, &data_crc, sizeof(data_crc));
210 		len += sizeof(data_crc);
211 
212 		rc = nvs_flash_al_wrt(fs, fs->data_wra, buf, len);
213 	} else {
214 		rc = nvs_flash_al_wrt(fs, fs->data_wra, data, len);
215 	}
216 	fs->data_wra += nvs_al_size(fs, len);
217 
218 	return rc;
219 }
220 
221 /* flash ate read */
nvs_flash_ate_rd(struct nvs_fs * fs,uint32_t addr,struct nvs_ate * entry)222 static int nvs_flash_ate_rd(struct nvs_fs *fs, uint32_t addr,
223 			     struct nvs_ate *entry)
224 {
225 	return nvs_flash_rd(fs, addr, entry, sizeof(struct nvs_ate));
226 }
227 
228 /* end of basic flash routines */
229 
230 /* advanced flash routines */
231 
232 /* nvs_flash_block_cmp compares the data in flash at addr to data
233  * in blocks of size NVS_BLOCK_SIZE aligned to fs->write_block_size
234  * returns 0 if equal, 1 if not equal, errcode if error
235  */
nvs_flash_block_cmp(struct nvs_fs * fs,uint32_t addr,const void * data,size_t len)236 static int nvs_flash_block_cmp(struct nvs_fs *fs, uint32_t addr, const void *data,
237 				size_t len)
238 {
239 	const uint8_t *data8 = (const uint8_t *)data;
240 	int rc;
241 	size_t bytes_to_cmp, block_size;
242 	uint8_t buf[NVS_BLOCK_SIZE];
243 
244 	block_size =
245 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
246 
247 	while (len) {
248 		bytes_to_cmp = MIN(block_size, len);
249 		rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
250 		if (rc) {
251 			return rc;
252 		}
253 		rc = memcmp(data8, buf, bytes_to_cmp);
254 		if (rc) {
255 			return 1;
256 		}
257 		len -= bytes_to_cmp;
258 		addr += bytes_to_cmp;
259 		data8 += bytes_to_cmp;
260 	}
261 	return 0;
262 }
263 
264 /* nvs_flash_cmp_const compares the data in flash at addr to a constant
265  * value. returns 0 if all data in flash is equal to value, 1 if not equal,
266  * errcode if error
267  */
nvs_flash_cmp_const(struct nvs_fs * fs,uint32_t addr,uint8_t value,size_t len)268 static int nvs_flash_cmp_const(struct nvs_fs *fs, uint32_t addr, uint8_t value,
269 				size_t len)
270 {
271 	int rc;
272 	size_t bytes_to_cmp, block_size;
273 	uint8_t buf[NVS_BLOCK_SIZE];
274 
275 	block_size =
276 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
277 
278 	while (len) {
279 		bytes_to_cmp = MIN(block_size, len);
280 		rc = nvs_flash_rd(fs, addr, buf, bytes_to_cmp);
281 		if (rc) {
282 			return rc;
283 		}
284 
285 		for (size_t i = 0; i < bytes_to_cmp; i++) {
286 			if (buf[i] != value) {
287 				return 1;
288 			}
289 		}
290 
291 		len -= bytes_to_cmp;
292 		addr += bytes_to_cmp;
293 	}
294 	return 0;
295 }
296 
297 /* flash block move: move a block at addr to the current data write location
298  * and updates the data write location.
299  */
nvs_flash_block_move(struct nvs_fs * fs,uint32_t addr,size_t len)300 static int nvs_flash_block_move(struct nvs_fs *fs, uint32_t addr, size_t len)
301 {
302 	int rc;
303 	size_t bytes_to_copy, block_size;
304 	uint8_t buf[NVS_BLOCK_SIZE];
305 
306 	block_size =
307 		NVS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U);
308 
309 	while (len) {
310 		bytes_to_copy = MIN(block_size, len);
311 		rc = nvs_flash_rd(fs, addr, buf, bytes_to_copy);
312 		if (rc) {
313 			return rc;
314 		}
315 		/* Just rewrite the whole record, no need to recompute the CRC as the data
316 		 * did not change
317 		 */
318 		rc = nvs_flash_data_wrt(fs, buf, bytes_to_copy, false);
319 		if (rc) {
320 			return rc;
321 		}
322 		len -= bytes_to_copy;
323 		addr += bytes_to_copy;
324 	}
325 	return 0;
326 }
327 
328 /* erase a sector and verify erase was OK.
329  * return 0 if OK, errorcode on error.
330  */
nvs_flash_erase_sector(struct nvs_fs * fs,uint32_t addr)331 static int nvs_flash_erase_sector(struct nvs_fs *fs, uint32_t addr)
332 {
333 	int rc;
334 	off_t offset;
335 
336 	addr &= ADDR_SECT_MASK;
337 
338 	offset = fs->offset;
339 	offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT);
340 
341 	LOG_DBG("Erasing flash at %lx, len %d", (long int) offset,
342 		fs->sector_size);
343 
344 #ifdef CONFIG_NVS_LOOKUP_CACHE
345 	nvs_lookup_cache_invalidate(fs, addr >> ADDR_SECT_SHIFT);
346 #endif
347 	rc = flash_flatten(fs->flash_device, offset, fs->sector_size);
348 
349 	if (rc) {
350 		return rc;
351 	}
352 
353 	if (nvs_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value,
354 			fs->sector_size)) {
355 		rc = -ENXIO;
356 	}
357 
358 	return rc;
359 }
360 
361 /* crc update on allocation entry */
nvs_ate_crc8_update(struct nvs_ate * entry)362 static void nvs_ate_crc8_update(struct nvs_ate *entry)
363 {
364 	uint8_t crc8;
365 
366 	crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
367 	entry->crc8 = crc8;
368 }
369 
370 /* crc check on allocation entry
371  * returns 0 if OK, 1 on crc fail
372  */
nvs_ate_crc8_check(const struct nvs_ate * entry)373 static int nvs_ate_crc8_check(const struct nvs_ate *entry)
374 {
375 	uint8_t crc8;
376 
377 	crc8 = crc8_ccitt(0xff, entry, offsetof(struct nvs_ate, crc8));
378 	if (crc8 == entry->crc8) {
379 		return 0;
380 	}
381 	return 1;
382 }
383 
384 /* nvs_ate_cmp_const compares an ATE to a constant value. returns 0 if
385  * the whole ATE is equal to value, 1 if not equal.
386  */
nvs_ate_cmp_const(const struct nvs_ate * entry,uint8_t value)387 static int nvs_ate_cmp_const(const struct nvs_ate *entry, uint8_t value)
388 {
389 	const uint8_t *data8 = (const uint8_t *)entry;
390 	int i;
391 
392 	for (i = 0; i < sizeof(struct nvs_ate); i++) {
393 		if (data8[i] != value) {
394 			return 1;
395 		}
396 	}
397 
398 	return 0;
399 }
400 
401 /* nvs_ate_valid validates an ate:
402  *     return 1 if crc8, offset and length are valid,
403  *            0 otherwise
404  */
nvs_ate_valid(struct nvs_fs * fs,const struct nvs_ate * entry)405 static int nvs_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
406 {
407 	size_t ate_size;
408 	uint32_t position;
409 
410 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
411 	position = entry->offset + entry->len;
412 
413 	if ((nvs_ate_crc8_check(entry)) ||
414 	    (position >= (fs->sector_size - ate_size))) {
415 		return 0;
416 	}
417 
418 	return 1;
419 }
420 
421 /* nvs_close_ate_valid validates an sector close ate: a valid sector close ate:
422  * - valid ate
423  * - len = 0 and id = 0xFFFF
424  * - offset points to location at ate multiple from sector size
425  * return 1 if valid, 0 otherwise
426  */
nvs_close_ate_valid(struct nvs_fs * fs,const struct nvs_ate * entry)427 static int nvs_close_ate_valid(struct nvs_fs *fs, const struct nvs_ate *entry)
428 {
429 	size_t ate_size;
430 
431 	if ((!nvs_ate_valid(fs, entry)) || (entry->len != 0U) ||
432 	    (entry->id != 0xFFFF)) {
433 		return 0;
434 	}
435 
436 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
437 	if ((fs->sector_size - entry->offset) % ate_size) {
438 		return 0;
439 	}
440 
441 	return 1;
442 }
443 
444 /* store an entry in flash */
nvs_flash_wrt_entry(struct nvs_fs * fs,uint16_t id,const void * data,size_t len)445 static int nvs_flash_wrt_entry(struct nvs_fs *fs, uint16_t id, const void *data,
446 				size_t len)
447 {
448 	int rc;
449 	struct nvs_ate entry;
450 
451 	entry.id = id;
452 	entry.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
453 	entry.len = (uint16_t)len;
454 	entry.part = 0xff;
455 
456 	rc = nvs_flash_data_wrt(fs, data, len, true);
457 	if (rc) {
458 		return rc;
459 	}
460 
461 #ifdef CONFIG_NVS_DATA_CRC
462 	/* No CRC has been added if this is a deletion write request */
463 	if (len > 0) {
464 		entry.len += NVS_DATA_CRC_SIZE;
465 	}
466 #endif
467 	nvs_ate_crc8_update(&entry);
468 
469 	rc = nvs_flash_ate_wrt(fs, &entry);
470 
471 	return rc;
472 }
473 /* end of flash routines */
474 
475 /* If the closing ate is invalid, its offset cannot be trusted and
476  * the last valid ate of the sector should instead try to be recovered by going
477  * through all ate's.
478  *
479  * addr should point to the faulty closing ate and will be updated to the last
480  * valid ate. If no valid ate is found it will be left untouched.
481  */
nvs_recover_last_ate(struct nvs_fs * fs,uint32_t * addr)482 static int nvs_recover_last_ate(struct nvs_fs *fs, uint32_t *addr)
483 {
484 	uint32_t data_end_addr, ate_end_addr;
485 	struct nvs_ate end_ate;
486 	size_t ate_size;
487 	int rc;
488 
489 	LOG_DBG("Recovering last ate from sector %d",
490 		(*addr >> ADDR_SECT_SHIFT));
491 
492 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
493 
494 	*addr -= ate_size;
495 	ate_end_addr = *addr;
496 	data_end_addr = *addr & ADDR_SECT_MASK;
497 	while (ate_end_addr > data_end_addr) {
498 		rc = nvs_flash_ate_rd(fs, ate_end_addr, &end_ate);
499 		if (rc) {
500 			return rc;
501 		}
502 		if (nvs_ate_valid(fs, &end_ate)) {
503 			/* found a valid ate, update data_end_addr and *addr */
504 			data_end_addr &= ADDR_SECT_MASK;
505 			data_end_addr += end_ate.offset + end_ate.len;
506 			*addr = ate_end_addr;
507 		}
508 		ate_end_addr -= ate_size;
509 	}
510 
511 	return 0;
512 }
513 
514 /* walking through allocation entry list, from newest to oldest entries
515  * read ate from addr, modify addr to the previous ate
516  */
nvs_prev_ate(struct nvs_fs * fs,uint32_t * addr,struct nvs_ate * ate)517 static int nvs_prev_ate(struct nvs_fs *fs, uint32_t *addr, struct nvs_ate *ate)
518 {
519 	int rc;
520 	struct nvs_ate close_ate;
521 	size_t ate_size;
522 
523 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
524 
525 	rc = nvs_flash_ate_rd(fs, *addr, ate);
526 	if (rc) {
527 		return rc;
528 	}
529 
530 	*addr += ate_size;
531 	if (((*addr) & ADDR_OFFS_MASK) != (fs->sector_size - ate_size)) {
532 		return 0;
533 	}
534 
535 	/* last ate in sector, do jump to previous sector */
536 	if (((*addr) >> ADDR_SECT_SHIFT) == 0U) {
537 		*addr += ((fs->sector_count - 1) << ADDR_SECT_SHIFT);
538 	} else {
539 		*addr -= (1 << ADDR_SECT_SHIFT);
540 	}
541 
542 	rc = nvs_flash_ate_rd(fs, *addr, &close_ate);
543 	if (rc) {
544 		return rc;
545 	}
546 
547 	rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
548 	/* at the end of filesystem */
549 	if (!rc) {
550 		*addr = fs->ate_wra;
551 		return 0;
552 	}
553 
554 	/* Update the address if the close ate is valid.
555 	 */
556 	if (nvs_close_ate_valid(fs, &close_ate)) {
557 		(*addr) &= ADDR_SECT_MASK;
558 		(*addr) += close_ate.offset;
559 		return 0;
560 	}
561 
562 	/* The close_ate was invalid, `lets find out the last valid ate
563 	 * and point the address to this found ate.
564 	 *
565 	 * remark: if there was absolutely no valid data in the sector *addr
566 	 * is kept at sector_end - 2*ate_size, the next read will contain
567 	 * invalid data and continue with a sector jump
568 	 */
569 	return nvs_recover_last_ate(fs, addr);
570 }
571 
nvs_sector_advance(struct nvs_fs * fs,uint32_t * addr)572 static void nvs_sector_advance(struct nvs_fs *fs, uint32_t *addr)
573 {
574 	*addr += (1 << ADDR_SECT_SHIFT);
575 	if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) {
576 		*addr -= (fs->sector_count << ADDR_SECT_SHIFT);
577 	}
578 }
579 
580 /* allocation entry close (this closes the current sector) by writing offset
581  * of last ate to the sector end.
582  */
nvs_sector_close(struct nvs_fs * fs)583 static int nvs_sector_close(struct nvs_fs *fs)
584 {
585 	struct nvs_ate close_ate;
586 	size_t ate_size;
587 
588 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
589 
590 	close_ate.id = 0xFFFF;
591 	close_ate.len = 0U;
592 	close_ate.offset = (uint16_t)((fs->ate_wra + ate_size) & ADDR_OFFS_MASK);
593 	close_ate.part = 0xff;
594 
595 	fs->ate_wra &= ADDR_SECT_MASK;
596 	fs->ate_wra += (fs->sector_size - ate_size);
597 
598 	nvs_ate_crc8_update(&close_ate);
599 
600 	(void)nvs_flash_ate_wrt(fs, &close_ate);
601 
602 	nvs_sector_advance(fs, &fs->ate_wra);
603 
604 	fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
605 
606 	return 0;
607 }
608 
nvs_add_gc_done_ate(struct nvs_fs * fs)609 static int nvs_add_gc_done_ate(struct nvs_fs *fs)
610 {
611 	struct nvs_ate gc_done_ate;
612 
613 	LOG_DBG("Adding gc done ate at %x", fs->ate_wra & ADDR_OFFS_MASK);
614 	gc_done_ate.id = 0xffff;
615 	gc_done_ate.len = 0U;
616 	gc_done_ate.part = 0xff;
617 	gc_done_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
618 	nvs_ate_crc8_update(&gc_done_ate);
619 
620 	return nvs_flash_ate_wrt(fs, &gc_done_ate);
621 }
622 
623 /* garbage collection: the address ate_wra has been updated to the new sector
624  * that has just been started. The data to gc is in the sector after this new
625  * sector.
626  */
nvs_gc(struct nvs_fs * fs)627 static int nvs_gc(struct nvs_fs *fs)
628 {
629 	int rc;
630 	struct nvs_ate close_ate, gc_ate, wlk_ate;
631 	uint32_t sec_addr, gc_addr, gc_prev_addr, wlk_addr, wlk_prev_addr,
632 	      data_addr, stop_addr;
633 	size_t ate_size;
634 
635 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
636 
637 	sec_addr = (fs->ate_wra & ADDR_SECT_MASK);
638 	nvs_sector_advance(fs, &sec_addr);
639 	gc_addr = sec_addr + fs->sector_size - ate_size;
640 
641 	/* if the sector is not closed don't do gc */
642 	rc = nvs_flash_ate_rd(fs, gc_addr, &close_ate);
643 	if (rc < 0) {
644 		/* flash error */
645 		return rc;
646 	}
647 
648 	rc = nvs_ate_cmp_const(&close_ate, fs->flash_parameters->erase_value);
649 	if (!rc) {
650 		goto gc_done;
651 	}
652 
653 	stop_addr = gc_addr - ate_size;
654 
655 	if (nvs_close_ate_valid(fs, &close_ate)) {
656 		gc_addr &= ADDR_SECT_MASK;
657 		gc_addr += close_ate.offset;
658 	} else {
659 		rc = nvs_recover_last_ate(fs, &gc_addr);
660 		if (rc) {
661 			return rc;
662 		}
663 	}
664 
665 	do {
666 		gc_prev_addr = gc_addr;
667 		rc = nvs_prev_ate(fs, &gc_addr, &gc_ate);
668 		if (rc) {
669 			return rc;
670 		}
671 
672 		if (!nvs_ate_valid(fs, &gc_ate)) {
673 			continue;
674 		}
675 
676 #ifdef CONFIG_NVS_LOOKUP_CACHE
677 		wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(gc_ate.id)];
678 
679 		if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
680 			wlk_addr = fs->ate_wra;
681 		}
682 #else
683 		wlk_addr = fs->ate_wra;
684 #endif
685 		do {
686 			wlk_prev_addr = wlk_addr;
687 			rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
688 			if (rc) {
689 				return rc;
690 			}
691 			/* if ate with same id is reached we might need to copy.
692 			 * only consider valid wlk_ate's. Something wrong might
693 			 * have been written that has the same ate but is
694 			 * invalid, don't consider these as a match.
695 			 */
696 			if ((wlk_ate.id == gc_ate.id) &&
697 			    (nvs_ate_valid(fs, &wlk_ate))) {
698 				break;
699 			}
700 		} while (wlk_addr != fs->ate_wra);
701 
702 		/* if walk has reached the same address as gc_addr copy is
703 		 * needed unless it is a deleted item.
704 		 */
705 		if ((wlk_prev_addr == gc_prev_addr) && gc_ate.len) {
706 			/* copy needed */
707 			LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len);
708 
709 			data_addr = (gc_prev_addr & ADDR_SECT_MASK);
710 			data_addr += gc_ate.offset;
711 
712 			gc_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK);
713 			nvs_ate_crc8_update(&gc_ate);
714 
715 			rc = nvs_flash_block_move(fs, data_addr, gc_ate.len);
716 			if (rc) {
717 				return rc;
718 			}
719 
720 			rc = nvs_flash_ate_wrt(fs, &gc_ate);
721 			if (rc) {
722 				return rc;
723 			}
724 		}
725 	} while (gc_prev_addr != stop_addr);
726 
727 gc_done:
728 
729 	/* Make it possible to detect that gc has finished by writing a
730 	 * gc done ate to the sector. In the field we might have nvs systems
731 	 * that do not have sufficient space to add this ate, so for these
732 	 * situations avoid adding the gc done ate.
733 	 */
734 
735 	if (fs->ate_wra >= (fs->data_wra + ate_size)) {
736 		rc = nvs_add_gc_done_ate(fs);
737 		if (rc) {
738 			return rc;
739 		}
740 	}
741 
742 	/* Erase the gc'ed sector */
743 	rc = nvs_flash_erase_sector(fs, sec_addr);
744 
745 	return rc;
746 }
747 
nvs_startup(struct nvs_fs * fs)748 static int nvs_startup(struct nvs_fs *fs)
749 {
750 	int rc;
751 	struct nvs_ate last_ate;
752 	size_t ate_size, empty_len;
753 	/* Initialize addr to 0 for the case fs->sector_count == 0. This
754 	 * should never happen as this is verified in nvs_mount() but both
755 	 * Coverity and GCC believe the contrary.
756 	 */
757 	uint32_t addr = 0U;
758 	uint16_t i, closed_sectors = 0;
759 	uint8_t erase_value = fs->flash_parameters->erase_value;
760 
761 	k_mutex_lock(&fs->nvs_lock, K_FOREVER);
762 
763 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
764 	/* step through the sectors to find a open sector following
765 	 * a closed sector, this is where NVS can write.
766 	 */
767 	for (i = 0; i < fs->sector_count; i++) {
768 		addr = (i << ADDR_SECT_SHIFT) +
769 		       (uint16_t)(fs->sector_size - ate_size);
770 		rc = nvs_flash_cmp_const(fs, addr, erase_value,
771 					 sizeof(struct nvs_ate));
772 		if (rc) {
773 			/* closed sector */
774 			closed_sectors++;
775 			nvs_sector_advance(fs, &addr);
776 			rc = nvs_flash_cmp_const(fs, addr, erase_value,
777 						 sizeof(struct nvs_ate));
778 			if (!rc) {
779 				/* open sector */
780 				break;
781 			}
782 		}
783 	}
784 	/* all sectors are closed, this is not a nvs fs or irreparably corrupted */
785 	if (closed_sectors == fs->sector_count) {
786 #ifdef CONFIG_NVS_INIT_BAD_MEMORY_REGION
787 		LOG_WRN("All sectors closed, erasing all sectors...");
788 		rc = flash_flatten(fs->flash_device, fs->offset,
789 				   fs->sector_size * fs->sector_count);
790 		if (rc) {
791 			goto end;
792 		}
793 
794 		i = fs->sector_count;
795 		addr = ((fs->sector_count - 1) << ADDR_SECT_SHIFT) +
796 		       (uint16_t)(fs->sector_size - ate_size);
797 #else
798 		rc = -EDEADLK;
799 		goto end;
800 #endif
801 	}
802 
803 	if (i == fs->sector_count) {
804 		/* none of the sectors where closed, in most cases we can set
805 		 * the address to the first sector, except when there are only
806 		 * two sectors. Then we can only set it to the first sector if
807 		 * the last sector contains no ate's. So we check this first
808 		 */
809 		rc = nvs_flash_cmp_const(fs, addr - ate_size, erase_value,
810 				sizeof(struct nvs_ate));
811 		if (!rc) {
812 			/* empty ate */
813 			nvs_sector_advance(fs, &addr);
814 		}
815 	}
816 
817 	/* addr contains address of closing ate in the most recent sector,
818 	 * search for the last valid ate using the recover_last_ate routine
819 	 */
820 
821 	rc = nvs_recover_last_ate(fs, &addr);
822 	if (rc) {
823 		goto end;
824 	}
825 
826 	/* addr contains address of the last valid ate in the most recent sector
827 	 * search for the first ate containing all cells erased, in the process
828 	 * also update fs->data_wra.
829 	 */
830 	fs->ate_wra = addr;
831 	fs->data_wra = addr & ADDR_SECT_MASK;
832 
833 	while (fs->ate_wra >= fs->data_wra) {
834 		rc = nvs_flash_ate_rd(fs, fs->ate_wra, &last_ate);
835 		if (rc) {
836 			goto end;
837 		}
838 
839 		rc = nvs_ate_cmp_const(&last_ate, erase_value);
840 
841 		if (!rc) {
842 			/* found ff empty location */
843 			break;
844 		}
845 
846 		if (nvs_ate_valid(fs, &last_ate)) {
847 			/* complete write of ate was performed */
848 			fs->data_wra = addr & ADDR_SECT_MASK;
849 			/* Align the data write address to the current
850 			 * write block size so that it is possible to write to
851 			 * the sector even if the block size has changed after
852 			 * a software upgrade (unless the physical ATE size
853 			 * will change)."
854 			 */
855 			fs->data_wra += nvs_al_size(fs, last_ate.offset + last_ate.len);
856 
857 			/* ate on the last position within the sector is
858 			 * reserved for deletion an entry
859 			 */
860 			if (fs->ate_wra == fs->data_wra && last_ate.len) {
861 				/* not a delete ate */
862 				rc = -ESPIPE;
863 				goto end;
864 			}
865 		}
866 
867 		fs->ate_wra -= ate_size;
868 	}
869 
870 	/* if the sector after the write sector is not empty gc was interrupted
871 	 * we might need to restart gc if it has not yet finished. Otherwise
872 	 * just erase the sector.
873 	 * When gc needs to be restarted, first erase the sector otherwise the
874 	 * data might not fit into the sector.
875 	 */
876 	addr = fs->ate_wra & ADDR_SECT_MASK;
877 	nvs_sector_advance(fs, &addr);
878 	rc = nvs_flash_cmp_const(fs, addr, erase_value, fs->sector_size);
879 	if (rc < 0) {
880 		goto end;
881 	}
882 	if (rc) {
883 		/* the sector after fs->ate_wrt is not empty, look for a marker
884 		 * (gc_done_ate) that indicates that gc was finished.
885 		 */
886 		bool gc_done_marker = false;
887 		struct nvs_ate gc_done_ate;
888 
889 		addr = fs->ate_wra + ate_size;
890 		while ((addr & ADDR_OFFS_MASK) < (fs->sector_size - ate_size)) {
891 			rc = nvs_flash_ate_rd(fs, addr, &gc_done_ate);
892 			if (rc) {
893 				goto end;
894 			}
895 			if (nvs_ate_valid(fs, &gc_done_ate) &&
896 			    (gc_done_ate.id == 0xffff) &&
897 			    (gc_done_ate.len == 0U)) {
898 				gc_done_marker = true;
899 				break;
900 			}
901 			addr += ate_size;
902 		}
903 
904 		if (gc_done_marker) {
905 			/* erase the next sector */
906 			LOG_INF("GC Done marker found");
907 			addr = fs->ate_wra & ADDR_SECT_MASK;
908 			nvs_sector_advance(fs, &addr);
909 			rc = nvs_flash_erase_sector(fs, addr);
910 			goto end;
911 		}
912 		LOG_INF("No GC Done marker found: restarting gc");
913 		rc = nvs_flash_erase_sector(fs, fs->ate_wra);
914 		if (rc) {
915 			goto end;
916 		}
917 		fs->ate_wra &= ADDR_SECT_MASK;
918 		fs->ate_wra += (fs->sector_size - 2 * ate_size);
919 		fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK);
920 #ifdef CONFIG_NVS_LOOKUP_CACHE
921 		/**
922 		 * At this point, the lookup cache wasn't built but the gc function need to use it.
923 		 * So, temporarily, we set the lookup cache to the end of the fs.
924 		 * The cache will be rebuilt afterwards
925 		 **/
926 		for (i = 0; i < CONFIG_NVS_LOOKUP_CACHE_SIZE; i++) {
927 			fs->lookup_cache[i] = fs->ate_wra;
928 		}
929 #endif
930 		rc = nvs_gc(fs);
931 		goto end;
932 	}
933 
934 	/* possible data write after last ate write, update data_wra */
935 	while (fs->ate_wra > fs->data_wra) {
936 		empty_len = fs->ate_wra - fs->data_wra;
937 
938 		rc = nvs_flash_cmp_const(fs, fs->data_wra, erase_value,
939 				empty_len);
940 		if (rc < 0) {
941 			goto end;
942 		}
943 		if (!rc) {
944 			break;
945 		}
946 
947 		fs->data_wra += fs->flash_parameters->write_block_size;
948 	}
949 
950 	/* If the ate_wra is pointing to the first ate write location in a
951 	 * sector and data_wra is not 0, erase the sector as it contains no
952 	 * valid data (this also avoids closing a sector without any data).
953 	 */
954 	if (((fs->ate_wra + 2 * ate_size) == fs->sector_size) &&
955 	    (fs->data_wra != (fs->ate_wra & ADDR_SECT_MASK))) {
956 		rc = nvs_flash_erase_sector(fs, fs->ate_wra);
957 		if (rc) {
958 			goto end;
959 		}
960 		fs->data_wra = fs->ate_wra & ADDR_SECT_MASK;
961 	}
962 
963 end:
964 
965 #ifdef CONFIG_NVS_LOOKUP_CACHE
966 	if (!rc) {
967 		rc = nvs_lookup_cache_rebuild(fs);
968 	}
969 #endif
970 	/* If the sector is empty add a gc done ate to avoid having insufficient
971 	 * space when doing gc.
972 	 */
973 	if ((!rc) && ((fs->ate_wra & ADDR_OFFS_MASK) ==
974 		      (fs->sector_size - 2 * ate_size))) {
975 
976 		rc = nvs_add_gc_done_ate(fs);
977 	}
978 	k_mutex_unlock(&fs->nvs_lock);
979 	return rc;
980 }
981 
nvs_clear(struct nvs_fs * fs)982 int nvs_clear(struct nvs_fs *fs)
983 {
984 	int rc;
985 	uint32_t addr;
986 
987 	if (!fs->ready) {
988 		LOG_ERR("NVS not initialized");
989 		return -EACCES;
990 	}
991 
992 	for (uint16_t i = 0; i < fs->sector_count; i++) {
993 		addr = i << ADDR_SECT_SHIFT;
994 		rc = nvs_flash_erase_sector(fs, addr);
995 		if (rc) {
996 			return rc;
997 		}
998 	}
999 
1000 	/* nvs needs to be reinitialized after clearing */
1001 	fs->ready = false;
1002 
1003 	return 0;
1004 }
1005 
nvs_mount(struct nvs_fs * fs)1006 int nvs_mount(struct nvs_fs *fs)
1007 {
1008 	int rc;
1009 	struct flash_pages_info info;
1010 	size_t write_block_size;
1011 
1012 	k_mutex_init(&fs->nvs_lock);
1013 
1014 	fs->flash_parameters = flash_get_parameters(fs->flash_device);
1015 	if (fs->flash_parameters == NULL) {
1016 		LOG_ERR("Could not obtain flash parameters");
1017 		return -EINVAL;
1018 	}
1019 
1020 	write_block_size = flash_get_write_block_size(fs->flash_device);
1021 
1022 	/* check that the write block size is supported */
1023 	if (write_block_size > NVS_BLOCK_SIZE || write_block_size == 0) {
1024 		LOG_ERR("Unsupported write block size");
1025 		return -EINVAL;
1026 	}
1027 
1028 	/* check that sector size is a multiple of pagesize */
1029 	rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info);
1030 	if (rc) {
1031 		LOG_ERR("Unable to get page info");
1032 		return -EINVAL;
1033 	}
1034 	if (!fs->sector_size || fs->sector_size % info.size) {
1035 		LOG_ERR("Invalid sector size");
1036 		return -EINVAL;
1037 	}
1038 
1039 	/* check the number of sectors, it should be at least 2 */
1040 	if (fs->sector_count < 2) {
1041 		LOG_ERR("Configuration error - sector count");
1042 		return -EINVAL;
1043 	}
1044 
1045 	rc = nvs_startup(fs);
1046 	if (rc) {
1047 		return rc;
1048 	}
1049 
1050 	/* nvs is ready for use */
1051 	fs->ready = true;
1052 
1053 	LOG_INF("%d Sectors of %d bytes", fs->sector_count, fs->sector_size);
1054 	LOG_INF("alloc wra: %d, %x",
1055 		(fs->ate_wra >> ADDR_SECT_SHIFT),
1056 		(fs->ate_wra & ADDR_OFFS_MASK));
1057 	LOG_INF("data wra: %d, %x",
1058 		(fs->data_wra >> ADDR_SECT_SHIFT),
1059 		(fs->data_wra & ADDR_OFFS_MASK));
1060 
1061 	return 0;
1062 }
1063 
nvs_write(struct nvs_fs * fs,uint16_t id,const void * data,size_t len)1064 ssize_t nvs_write(struct nvs_fs *fs, uint16_t id, const void *data, size_t len)
1065 {
1066 	int rc, gc_count;
1067 	size_t ate_size, data_size;
1068 	struct nvs_ate wlk_ate;
1069 	uint32_t wlk_addr, rd_addr;
1070 	uint16_t required_space = 0U; /* no space, appropriate for delete ate */
1071 	bool prev_found = false;
1072 
1073 	if (!fs->ready) {
1074 		LOG_ERR("NVS not initialized");
1075 		return -EACCES;
1076 	}
1077 
1078 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1079 	data_size = nvs_al_size(fs, len);
1080 
1081 	/* The maximum data size is sector size - 4 ate
1082 	 * where: 1 ate for data, 1 ate for sector close, 1 ate for gc done,
1083 	 * and 1 ate to always allow a delete.
1084 	 * Also take into account the data CRC that is appended at the end of the data field,
1085 	 * if any.
1086 	 */
1087 	if ((len > (fs->sector_size - 4 * ate_size - NVS_DATA_CRC_SIZE)) ||
1088 	    ((len > 0) && (data == NULL))) {
1089 		return -EINVAL;
1090 	}
1091 
1092 	/* find latest entry with same id */
1093 #ifdef CONFIG_NVS_LOOKUP_CACHE
1094 	wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)];
1095 
1096 	if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
1097 		goto no_cached_entry;
1098 	}
1099 #else
1100 	wlk_addr = fs->ate_wra;
1101 #endif
1102 	rd_addr = wlk_addr;
1103 
1104 	while (1) {
1105 		rd_addr = wlk_addr;
1106 		rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1107 		if (rc) {
1108 			return rc;
1109 		}
1110 		if ((wlk_ate.id == id) && (nvs_ate_valid(fs, &wlk_ate))) {
1111 			prev_found = true;
1112 			break;
1113 		}
1114 		if (wlk_addr == fs->ate_wra) {
1115 			break;
1116 		}
1117 	}
1118 
1119 #ifdef CONFIG_NVS_LOOKUP_CACHE
1120 no_cached_entry:
1121 #endif
1122 
1123 	if (prev_found) {
1124 		/* previous entry found */
1125 		rd_addr &= ADDR_SECT_MASK;
1126 		rd_addr += wlk_ate.offset;
1127 
1128 		if (len == 0) {
1129 			/* do not try to compare with empty data */
1130 			if (wlk_ate.len == 0U) {
1131 				/* skip delete entry as it is already the
1132 				 * last one
1133 				 */
1134 				return 0;
1135 			}
1136 		} else if (len + NVS_DATA_CRC_SIZE == wlk_ate.len) {
1137 			/* do not try to compare if lengths are not equal */
1138 			/* compare the data and if equal return 0 */
1139 			rc = nvs_flash_block_cmp(fs, rd_addr, data, len + NVS_DATA_CRC_SIZE);
1140 			if (rc <= 0) {
1141 				return rc;
1142 			}
1143 		}
1144 	} else {
1145 		/* skip delete entry for non-existing entry */
1146 		if (len == 0) {
1147 			return 0;
1148 		}
1149 	}
1150 
1151 	/* calculate required space if the entry contains data */
1152 	if (data_size) {
1153 		/* Leave space for delete ate */
1154 		required_space = data_size + ate_size + NVS_DATA_CRC_SIZE;
1155 	}
1156 
1157 	k_mutex_lock(&fs->nvs_lock, K_FOREVER);
1158 
1159 	gc_count = 0;
1160 	while (1) {
1161 		if (gc_count == fs->sector_count) {
1162 			/* gc'ed all sectors, no extra space will be created
1163 			 * by extra gc.
1164 			 */
1165 			rc = -ENOSPC;
1166 			goto end;
1167 		}
1168 
1169 		if (fs->ate_wra >= (fs->data_wra + required_space)) {
1170 
1171 			rc = nvs_flash_wrt_entry(fs, id, data, len);
1172 			if (rc) {
1173 				goto end;
1174 			}
1175 			break;
1176 		}
1177 
1178 
1179 		rc = nvs_sector_close(fs);
1180 		if (rc) {
1181 			goto end;
1182 		}
1183 
1184 		rc = nvs_gc(fs);
1185 		if (rc) {
1186 			goto end;
1187 		}
1188 		gc_count++;
1189 	}
1190 	rc = len;
1191 end:
1192 	k_mutex_unlock(&fs->nvs_lock);
1193 	return rc;
1194 }
1195 
nvs_delete(struct nvs_fs * fs,uint16_t id)1196 int nvs_delete(struct nvs_fs *fs, uint16_t id)
1197 {
1198 	return nvs_write(fs, id, NULL, 0);
1199 }
1200 
nvs_read_hist(struct nvs_fs * fs,uint16_t id,void * data,size_t len,uint16_t cnt)1201 ssize_t nvs_read_hist(struct nvs_fs *fs, uint16_t id, void *data, size_t len,
1202 		      uint16_t cnt)
1203 {
1204 	int rc;
1205 	uint32_t wlk_addr, rd_addr;
1206 	uint16_t cnt_his;
1207 	struct nvs_ate wlk_ate;
1208 	size_t ate_size;
1209 #ifdef CONFIG_NVS_DATA_CRC
1210 	uint32_t read_data_crc, computed_data_crc;
1211 #endif
1212 
1213 	if (!fs->ready) {
1214 		LOG_ERR("NVS not initialized");
1215 		return -EACCES;
1216 	}
1217 
1218 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1219 
1220 	if (len > (fs->sector_size - 2 * ate_size)) {
1221 		return -EINVAL;
1222 	}
1223 
1224 	cnt_his = 0U;
1225 
1226 #ifdef CONFIG_NVS_LOOKUP_CACHE
1227 	wlk_addr = fs->lookup_cache[nvs_lookup_cache_pos(id)];
1228 
1229 	if (wlk_addr == NVS_LOOKUP_CACHE_NO_ADDR) {
1230 		rc = -ENOENT;
1231 		goto err;
1232 	}
1233 #else
1234 	wlk_addr = fs->ate_wra;
1235 #endif
1236 	rd_addr = wlk_addr;
1237 
1238 	while (cnt_his <= cnt) {
1239 		rd_addr = wlk_addr;
1240 		rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1241 		if (rc) {
1242 			goto err;
1243 		}
1244 		if ((wlk_ate.id == id) &&  (nvs_ate_valid(fs, &wlk_ate))) {
1245 			cnt_his++;
1246 		}
1247 		if (wlk_addr == fs->ate_wra) {
1248 			break;
1249 		}
1250 	}
1251 
1252 	if (((wlk_addr == fs->ate_wra) && (wlk_ate.id != id)) ||
1253 	    (wlk_ate.len == 0U) || (cnt_his < cnt)) {
1254 		return -ENOENT;
1255 	}
1256 
1257 #ifdef CONFIG_NVS_DATA_CRC
1258 	/* When data CRC is enabled, there should be at least the CRC stored in the data field */
1259 	if (wlk_ate.len < NVS_DATA_CRC_SIZE) {
1260 		return -ENOENT;
1261 	}
1262 #endif
1263 
1264 	rd_addr &= ADDR_SECT_MASK;
1265 	rd_addr += wlk_ate.offset;
1266 	rc = nvs_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len - NVS_DATA_CRC_SIZE));
1267 	if (rc) {
1268 		goto err;
1269 	}
1270 
1271 	/* Check data CRC (only if the whole element data has been read) */
1272 #ifdef CONFIG_NVS_DATA_CRC
1273 	if (len >= (wlk_ate.len - NVS_DATA_CRC_SIZE)) {
1274 		rd_addr += wlk_ate.len - NVS_DATA_CRC_SIZE;
1275 		rc = nvs_flash_rd(fs, rd_addr, &read_data_crc, sizeof(read_data_crc));
1276 		if (rc) {
1277 			goto err;
1278 		}
1279 
1280 		computed_data_crc = crc32_ieee(data, wlk_ate.len - NVS_DATA_CRC_SIZE);
1281 		if (read_data_crc != computed_data_crc) {
1282 			LOG_ERR("Invalid data CRC: read_data_crc=0x%08X, computed_data_crc=0x%08X",
1283 				read_data_crc, computed_data_crc);
1284 			rc = -EIO;
1285 			goto err;
1286 		}
1287 	}
1288 #endif
1289 
1290 	return wlk_ate.len - NVS_DATA_CRC_SIZE;
1291 
1292 err:
1293 	return rc;
1294 }
1295 
nvs_read(struct nvs_fs * fs,uint16_t id,void * data,size_t len)1296 ssize_t nvs_read(struct nvs_fs *fs, uint16_t id, void *data, size_t len)
1297 {
1298 	int rc;
1299 
1300 	rc = nvs_read_hist(fs, id, data, len, 0);
1301 	return rc;
1302 }
1303 
nvs_calc_free_space(struct nvs_fs * fs)1304 ssize_t nvs_calc_free_space(struct nvs_fs *fs)
1305 {
1306 	int rc;
1307 	struct nvs_ate step_ate, wlk_ate;
1308 	uint32_t step_addr, wlk_addr;
1309 	size_t ate_size, free_space;
1310 
1311 	if (!fs->ready) {
1312 		LOG_ERR("NVS not initialized");
1313 		return -EACCES;
1314 	}
1315 
1316 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1317 
1318 	/*
1319 	 * There is always a closing ATE and a reserved ATE for
1320 	 * deletion in each sector.
1321 	 * Take into account one less sector because it is reserved for the
1322 	 * garbage collection.
1323 	 */
1324 	free_space = (fs->sector_count - 1) * (fs->sector_size - (2 * ate_size));
1325 
1326 	step_addr = fs->ate_wra;
1327 
1328 	while (1) {
1329 		rc = nvs_prev_ate(fs, &step_addr, &step_ate);
1330 		if (rc) {
1331 			return rc;
1332 		}
1333 
1334 		wlk_addr = fs->ate_wra;
1335 
1336 		while (1) {
1337 			rc = nvs_prev_ate(fs, &wlk_addr, &wlk_ate);
1338 			if (rc) {
1339 				return rc;
1340 			}
1341 			if ((wlk_ate.id == step_ate.id) ||
1342 			    (wlk_addr == fs->ate_wra)) {
1343 				break;
1344 			}
1345 		}
1346 
1347 		if (nvs_ate_valid(fs, &step_ate)) {
1348 			/* Take into account the GC done ATE if it is present */
1349 			if (step_ate.len == 0) {
1350 				if (step_ate.id == 0xFFFF) {
1351 					free_space -= ate_size;
1352 				}
1353 			} else if (wlk_addr == step_addr) {
1354 				/* count needed */
1355 				free_space -= nvs_al_size(fs, step_ate.len);
1356 				free_space -= ate_size;
1357 			}
1358 		}
1359 
1360 		if (step_addr == fs->ate_wra) {
1361 			break;
1362 		}
1363 	}
1364 	return free_space;
1365 }
1366 
nvs_sector_max_data_size(struct nvs_fs * fs)1367 size_t nvs_sector_max_data_size(struct nvs_fs *fs)
1368 {
1369 	size_t ate_size;
1370 
1371 	if (!fs->ready) {
1372 		LOG_ERR("NVS not initialized");
1373 		return -EACCES;
1374 	}
1375 
1376 	ate_size = nvs_al_size(fs, sizeof(struct nvs_ate));
1377 
1378 	return fs->ate_wra - fs->data_wra - ate_size - NVS_DATA_CRC_SIZE;
1379 }
1380 
nvs_sector_use_next(struct nvs_fs * fs)1381 int nvs_sector_use_next(struct nvs_fs *fs)
1382 {
1383 	int ret;
1384 
1385 	if (!fs->ready) {
1386 		LOG_ERR("NVS not initialized");
1387 		return -EACCES;
1388 	}
1389 
1390 	k_mutex_lock(&fs->nvs_lock, K_FOREVER);
1391 
1392 	ret = nvs_sector_close(fs);
1393 	if (ret != 0) {
1394 		goto end;
1395 	}
1396 
1397 	ret = nvs_gc(fs);
1398 
1399 end:
1400 	k_mutex_unlock(&fs->nvs_lock);
1401 	return ret;
1402 }
1403