1 // Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 #include "nvs_page.hpp"
15 #if defined(LINUX_TARGET)
16 #include "crc.h"
17 #else
18 #include <esp_rom_crc.h>
19 #endif
20 #include <cstdio>
21 #include <cstring>
22
23 namespace nvs
24 {
25
Page()26 Page::Page() : mPartition(nullptr) { }
27
calculateCrc32()28 uint32_t Page::Header::calculateCrc32()
29 {
30 return esp_rom_crc32_le(0xffffffff,
31 reinterpret_cast<uint8_t*>(this) + offsetof(Header, mSeqNumber),
32 offsetof(Header, mCrc32) - offsetof(Header, mSeqNumber));
33 }
34
load(Partition * partition,uint32_t sectorNumber)35 esp_err_t Page::load(Partition *partition, uint32_t sectorNumber)
36 {
37 if (partition == nullptr) {
38 return ESP_ERR_INVALID_ARG;
39 }
40
41 mPartition = partition;
42 mBaseAddress = sectorNumber * SEC_SIZE;
43 mUsedEntryCount = 0;
44 mErasedEntryCount = 0;
45
46 Header header;
47 auto rc = mPartition->read_raw(mBaseAddress, &header, sizeof(header));
48 if (rc != ESP_OK) {
49 mState = PageState::INVALID;
50 return rc;
51 }
52 if (header.mState == PageState::UNINITIALIZED) {
53 mState = header.mState;
54 // check if the whole page is really empty
55 // reading the whole page takes ~40 times less than erasing it
56 const int BLOCK_SIZE = 128;
57 uint32_t* block = new (std::nothrow) uint32_t[BLOCK_SIZE];
58
59 if (!block) return ESP_ERR_NO_MEM;
60
61 for (uint32_t i = 0; i < SPI_FLASH_SEC_SIZE; i += 4 * BLOCK_SIZE) {
62 rc = mPartition->read_raw(mBaseAddress + i, block, 4 * BLOCK_SIZE);
63 if (rc != ESP_OK) {
64 mState = PageState::INVALID;
65 delete[] block;
66 return rc;
67 }
68 if (std::any_of(block, block + BLOCK_SIZE, [](uint32_t val) -> bool { return val != 0xffffffff; })) {
69 // page isn't as empty after all, mark it as corrupted
70 mState = PageState::CORRUPT;
71 break;
72 }
73 }
74 delete[] block;
75 } else if (header.mCrc32 != header.calculateCrc32()) {
76 header.mState = PageState::CORRUPT;
77 } else {
78 mState = header.mState;
79 mSeqNumber = header.mSeqNumber;
80 if(header.mVersion < NVS_VERSION) {
81 return ESP_ERR_NVS_NEW_VERSION_FOUND;
82 } else {
83 mVersion = header.mVersion;
84 }
85 }
86
87 switch (mState) {
88 case PageState::UNINITIALIZED:
89 break;
90
91 case PageState::FULL:
92 case PageState::ACTIVE:
93 case PageState::FREEING:
94 mLoadEntryTable();
95 break;
96
97 default:
98 mState = PageState::CORRUPT;
99 break;
100 }
101
102 return ESP_OK;
103 }
104
writeEntry(const Item & item)105 esp_err_t Page::writeEntry(const Item& item)
106 {
107 esp_err_t err;
108
109 err = mPartition->write(getEntryAddress(mNextFreeEntry), &item, sizeof(item));
110
111 if (err != ESP_OK) {
112 mState = PageState::INVALID;
113 return err;
114 }
115
116 err = alterEntryState(mNextFreeEntry, EntryState::WRITTEN);
117 if (err != ESP_OK) {
118 return err;
119 }
120
121 if (mFirstUsedEntry == INVALID_ENTRY) {
122 mFirstUsedEntry = mNextFreeEntry;
123 }
124
125 ++mUsedEntryCount;
126 ++mNextFreeEntry;
127
128 return ESP_OK;
129 }
130
writeEntryData(const uint8_t * data,size_t size)131 esp_err_t Page::writeEntryData(const uint8_t* data, size_t size)
132 {
133 assert(size % ENTRY_SIZE == 0);
134 assert(mNextFreeEntry != INVALID_ENTRY);
135 assert(mFirstUsedEntry != INVALID_ENTRY);
136 const uint16_t count = size / ENTRY_SIZE;
137
138 const uint8_t* buf = data;
139
140 #if !defined LINUX_TARGET
141 // TODO: check whether still necessary with esp_partition* API
142 /* On the ESP32, data can come from DROM, which is not accessible by spi_flash_write
143 * function. To work around this, we copy the data to heap if it came from DROM.
144 * Hopefully this won't happen very often in practice. For data from DRAM, we should
145 * still be able to write it to flash directly.
146 * TODO: figure out how to make this platform-specific check nicer (probably by introducing
147 * a platform-specific flash layer).
148 */
149 if ((uint32_t) data < 0x3ff00000) {
150 buf = (uint8_t*) malloc(size);
151 if (!buf) {
152 return ESP_ERR_NO_MEM;
153 }
154 memcpy((void*)buf, data, size);
155 }
156 #endif // ! LINUX_TARGET
157
158 auto rc = mPartition->write(getEntryAddress(mNextFreeEntry), buf, size);
159
160 #if !defined LINUX_TARGET
161 if (buf != data) {
162 free((void*)buf);
163 }
164 #endif // ! LINUX_TARGET
165 if (rc != ESP_OK) {
166 mState = PageState::INVALID;
167 return rc;
168 }
169 auto err = alterEntryRangeState(mNextFreeEntry, mNextFreeEntry + count, EntryState::WRITTEN);
170 if (err != ESP_OK) {
171 return err;
172 }
173 mUsedEntryCount += count;
174 mNextFreeEntry += count;
175 return ESP_OK;
176 }
177
writeItem(uint8_t nsIndex,ItemType datatype,const char * key,const void * data,size_t dataSize,uint8_t chunkIdx)178 esp_err_t Page::writeItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx)
179 {
180 Item item;
181 esp_err_t err;
182
183 if (mState == PageState::INVALID) {
184 return ESP_ERR_NVS_INVALID_STATE;
185 }
186
187 if (mState == PageState::UNINITIALIZED) {
188 err = initialize();
189 if (err != ESP_OK) {
190 return err;
191 }
192 }
193
194 if (mState == PageState::FULL) {
195 return ESP_ERR_NVS_PAGE_FULL;
196 }
197
198 const size_t keySize = strlen(key);
199 if (keySize > Item::MAX_KEY_LENGTH) {
200 return ESP_ERR_NVS_KEY_TOO_LONG;
201 }
202
203 if (dataSize > Page::CHUNK_MAX_SIZE) {
204 return ESP_ERR_NVS_VALUE_TOO_LONG;
205 }
206
207 size_t totalSize = ENTRY_SIZE;
208 size_t entriesCount = 1;
209 if (isVariableLengthType(datatype)) {
210 size_t roundedSize = (dataSize + ENTRY_SIZE - 1) & ~(ENTRY_SIZE - 1);
211 totalSize += roundedSize;
212 entriesCount += roundedSize / ENTRY_SIZE;
213 }
214
215 // primitive types should fit into one entry
216 assert(totalSize == ENTRY_SIZE ||
217 isVariableLengthType(datatype));
218
219 if (mNextFreeEntry == INVALID_ENTRY || mNextFreeEntry + entriesCount > ENTRY_COUNT) {
220 // page will not fit this amount of data
221 return ESP_ERR_NVS_PAGE_FULL;
222 }
223
224 // write first item
225 size_t span = (totalSize + ENTRY_SIZE - 1) / ENTRY_SIZE;
226 item = Item(nsIndex, datatype, span, key, chunkIdx);
227 err = mHashList.insert(item, mNextFreeEntry);
228
229 if (err != ESP_OK) {
230 return err;
231 }
232
233 if (!isVariableLengthType(datatype)) {
234 memcpy(item.data, data, dataSize);
235 item.crc32 = item.calculateCrc32();
236 err = writeEntry(item);
237 if (err != ESP_OK) {
238 return err;
239 }
240 } else {
241 const uint8_t* src = reinterpret_cast<const uint8_t*>(data);
242 item.varLength.dataCrc32 = Item::calculateCrc32(src, dataSize);
243 item.varLength.dataSize = dataSize;
244 item.varLength.reserved = 0xffff;
245 item.crc32 = item.calculateCrc32();
246 err = writeEntry(item);
247 if (err != ESP_OK) {
248 return err;
249 }
250
251 size_t left = dataSize / ENTRY_SIZE * ENTRY_SIZE;
252 if (left > 0) {
253 err = writeEntryData(static_cast<const uint8_t*>(data), left);
254 if (err != ESP_OK) {
255 return err;
256 }
257 }
258
259 size_t tail = dataSize - left;
260 if (tail > 0) {
261 std::fill_n(item.rawData, ENTRY_SIZE, 0xff);
262 memcpy(item.rawData, static_cast<const uint8_t*>(data) + left, tail);
263 err = writeEntry(item);
264 if (err != ESP_OK) {
265 return err;
266 }
267 }
268
269 }
270 return ESP_OK;
271 }
272
readItem(uint8_t nsIndex,ItemType datatype,const char * key,void * data,size_t dataSize,uint8_t chunkIdx,VerOffset chunkStart)273 esp_err_t Page::readItem(uint8_t nsIndex, ItemType datatype, const char* key, void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
274 {
275 size_t index = 0;
276 Item item;
277
278 if (mState == PageState::INVALID) {
279 return ESP_ERR_NVS_INVALID_STATE;
280 }
281
282 esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
283 if (rc != ESP_OK) {
284 return rc;
285 }
286
287 if (!isVariableLengthType(datatype)) {
288 if (dataSize != getAlignmentForType(datatype)) {
289 return ESP_ERR_NVS_TYPE_MISMATCH;
290 }
291
292 memcpy(data, item.data, dataSize);
293 return ESP_OK;
294 }
295
296 if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
297 return ESP_ERR_NVS_INVALID_LENGTH;
298 }
299
300 uint8_t* dst = reinterpret_cast<uint8_t*>(data);
301 size_t left = item.varLength.dataSize;
302 for (size_t i = index + 1; i < index + item.span; ++i) {
303 Item ditem;
304 rc = readEntry(i, ditem);
305 if (rc != ESP_OK) {
306 return rc;
307 }
308 size_t willCopy = ENTRY_SIZE;
309 willCopy = (left < willCopy)?left:willCopy;
310 memcpy(dst, ditem.rawData, willCopy);
311 left -= willCopy;
312 dst += willCopy;
313 }
314 if (Item::calculateCrc32(reinterpret_cast<uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
315 rc = eraseEntryAndSpan(index);
316 if (rc != ESP_OK) {
317 return rc;
318 }
319 return ESP_ERR_NVS_NOT_FOUND;
320 }
321 return ESP_OK;
322 }
323
cmpItem(uint8_t nsIndex,ItemType datatype,const char * key,const void * data,size_t dataSize,uint8_t chunkIdx,VerOffset chunkStart)324 esp_err_t Page::cmpItem(uint8_t nsIndex, ItemType datatype, const char* key, const void* data, size_t dataSize, uint8_t chunkIdx, VerOffset chunkStart)
325 {
326 size_t index = 0;
327 Item item;
328
329 if (mState == PageState::INVALID) {
330 return ESP_ERR_NVS_INVALID_STATE;
331 }
332
333 esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
334 if (rc != ESP_OK) {
335 return rc;
336 }
337
338 if (!isVariableLengthType(datatype)) {
339 if (dataSize != getAlignmentForType(datatype)) {
340 return ESP_ERR_NVS_TYPE_MISMATCH;
341 }
342
343 if (memcmp(data, item.data, dataSize)) {
344 return ESP_ERR_NVS_CONTENT_DIFFERS;
345 }
346 return ESP_OK;
347 }
348
349 if (dataSize < static_cast<size_t>(item.varLength.dataSize)) {
350 return ESP_ERR_NVS_INVALID_LENGTH;
351 }
352
353 const uint8_t* dst = reinterpret_cast<const uint8_t*>(data);
354 size_t left = item.varLength.dataSize;
355 for (size_t i = index + 1; i < index + item.span; ++i) {
356 Item ditem;
357 rc = readEntry(i, ditem);
358 if (rc != ESP_OK) {
359 return rc;
360 }
361 size_t willCopy = ENTRY_SIZE;
362 willCopy = (left < willCopy)?left:willCopy;
363 if (memcmp(dst, ditem.rawData, willCopy)) {
364 return ESP_ERR_NVS_CONTENT_DIFFERS;
365 }
366 left -= willCopy;
367 dst += willCopy;
368 }
369 if (Item::calculateCrc32(reinterpret_cast<const uint8_t*>(data), item.varLength.dataSize) != item.varLength.dataCrc32) {
370 return ESP_ERR_NVS_NOT_FOUND;
371 }
372
373 return ESP_OK;
374 }
375
eraseItem(uint8_t nsIndex,ItemType datatype,const char * key,uint8_t chunkIdx,VerOffset chunkStart)376 esp_err_t Page::eraseItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
377 {
378 size_t index = 0;
379 Item item;
380 esp_err_t rc = findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
381 if (rc != ESP_OK) {
382 return rc;
383 }
384 return eraseEntryAndSpan(index);
385 }
386
findItem(uint8_t nsIndex,ItemType datatype,const char * key,uint8_t chunkIdx,VerOffset chunkStart)387 esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, uint8_t chunkIdx, VerOffset chunkStart)
388 {
389 size_t index = 0;
390 Item item;
391 return findItem(nsIndex, datatype, key, index, item, chunkIdx, chunkStart);
392 }
393
eraseEntryAndSpan(size_t index)394 esp_err_t Page::eraseEntryAndSpan(size_t index)
395 {
396 auto state = mEntryTable.get(index);
397 assert(state == EntryState::WRITTEN || state == EntryState::EMPTY);
398
399 size_t span = 1;
400 if (state == EntryState::WRITTEN) {
401 Item item;
402 auto rc = readEntry(index, item);
403 if (rc != ESP_OK) {
404 return rc;
405 }
406 if (item.calculateCrc32() != item.crc32) {
407 mHashList.erase(index, false);
408 rc = alterEntryState(index, EntryState::ERASED);
409 --mUsedEntryCount;
410 ++mErasedEntryCount;
411 if (rc != ESP_OK) {
412 return rc;
413 }
414 } else {
415 mHashList.erase(index);
416 span = item.span;
417 for (ptrdiff_t i = index + span - 1; i >= static_cast<ptrdiff_t>(index); --i) {
418 if (mEntryTable.get(i) == EntryState::WRITTEN) {
419 --mUsedEntryCount;
420 }
421 ++mErasedEntryCount;
422 }
423 if (span == 1) {
424 rc = alterEntryState(index, EntryState::ERASED);
425 } else {
426 rc = alterEntryRangeState(index, index + span, EntryState::ERASED);
427 }
428 if (rc != ESP_OK) {
429 return rc;
430 }
431 }
432 } else {
433 auto rc = alterEntryState(index, EntryState::ERASED);
434 if (rc != ESP_OK) {
435 return rc;
436 }
437 }
438
439 if (index == mFirstUsedEntry) {
440 updateFirstUsedEntry(index, span);
441 }
442
443 if (index + span > mNextFreeEntry) {
444 mNextFreeEntry = index + span;
445 }
446
447 return ESP_OK;
448 }
449
updateFirstUsedEntry(size_t index,size_t span)450 void Page::updateFirstUsedEntry(size_t index, size_t span)
451 {
452 assert(index == mFirstUsedEntry);
453 mFirstUsedEntry = INVALID_ENTRY;
454 size_t end = mNextFreeEntry;
455 if (end > ENTRY_COUNT) {
456 end = ENTRY_COUNT;
457 }
458 for (size_t i = index + span; i < end; ++i) {
459 if (mEntryTable.get(i) == EntryState::WRITTEN) {
460 mFirstUsedEntry = i;
461 break;
462 }
463 }
464 }
465
copyItems(Page & other)466 esp_err_t Page::copyItems(Page& other)
467 {
468 if (mFirstUsedEntry == INVALID_ENTRY) {
469 return ESP_ERR_NVS_NOT_FOUND;
470 }
471
472 if (other.mState == PageState::UNINITIALIZED) {
473 auto err = other.initialize();
474 if (err != ESP_OK) {
475 return err;
476 }
477 }
478
479 Item entry;
480 size_t readEntryIndex = mFirstUsedEntry;
481
482 while (readEntryIndex < ENTRY_COUNT) {
483
484 if (mEntryTable.get(readEntryIndex) != EntryState::WRITTEN) {
485 assert(readEntryIndex != mFirstUsedEntry);
486 readEntryIndex++;
487 continue;
488 }
489 auto err = readEntry(readEntryIndex, entry);
490 if (err != ESP_OK) {
491 return err;
492 }
493
494 err = other.mHashList.insert(entry, other.mNextFreeEntry);
495 if (err != ESP_OK) {
496 return err;
497 }
498
499 err = other.writeEntry(entry);
500 if (err != ESP_OK) {
501 return err;
502 }
503 size_t span = entry.span;
504 size_t end = readEntryIndex + span;
505
506 assert(end <= ENTRY_COUNT);
507
508 for (size_t i = readEntryIndex + 1; i < end; ++i) {
509 readEntry(i, entry);
510 err = other.writeEntry(entry);
511 if (err != ESP_OK) {
512 return err;
513 }
514 }
515 readEntryIndex = end;
516
517 }
518 return ESP_OK;
519 }
520
mLoadEntryTable()521 esp_err_t Page::mLoadEntryTable()
522 {
523 // for states where we actually care about data in the page, read entry state table
524 if (mState == PageState::ACTIVE ||
525 mState == PageState::FULL ||
526 mState == PageState::FREEING) {
527 auto rc = mPartition->read_raw(mBaseAddress + ENTRY_TABLE_OFFSET, mEntryTable.data(),
528 mEntryTable.byteSize());
529 if (rc != ESP_OK) {
530 mState = PageState::INVALID;
531 return rc;
532 }
533 }
534
535 mErasedEntryCount = 0;
536 mUsedEntryCount = 0;
537 for (size_t i = 0; i < ENTRY_COUNT; ++i) {
538 auto s = mEntryTable.get(i);
539 if (s == EntryState::WRITTEN) {
540 if (mFirstUsedEntry == INVALID_ENTRY) {
541 mFirstUsedEntry = i;
542 }
543 ++mUsedEntryCount;
544 } else if (s == EntryState::ERASED) {
545 ++mErasedEntryCount;
546 }
547 }
548
549 // for PageState::ACTIVE, we may have more data written to this page
550 // as such, we need to figure out where the first unused entry is
551 if (mState == PageState::ACTIVE) {
552 for (size_t i = 0; i < ENTRY_COUNT; ++i) {
553 if (mEntryTable.get(i) == EntryState::EMPTY) {
554 mNextFreeEntry = i;
555 break;
556 }
557 }
558
559 // however, if power failed after some data was written into the entry.
560 // but before the entry state table was altered, the entry locacted via
561 // entry state table may actually be half-written.
562 // this is easy to check by reading EntryHeader (i.e. first word)
563 while (mNextFreeEntry < ENTRY_COUNT) {
564 uint32_t entryAddress = getEntryAddress(mNextFreeEntry);
565 uint32_t header;
566 auto rc = mPartition->read_raw(entryAddress, &header, sizeof(header));
567 if (rc != ESP_OK) {
568 mState = PageState::INVALID;
569 return rc;
570 }
571 if (header != 0xffffffff) {
572 auto oldState = mEntryTable.get(mNextFreeEntry);
573 auto err = alterEntryState(mNextFreeEntry, EntryState::ERASED);
574 if (err != ESP_OK) {
575 mState = PageState::INVALID;
576 return err;
577 }
578 ++mNextFreeEntry;
579 if (oldState == EntryState::WRITTEN) {
580 --mUsedEntryCount;
581 }
582 ++mErasedEntryCount;
583 }
584 else {
585 break;
586 }
587 }
588
589 // check that all variable-length items are written or erased fully
590 Item item;
591 size_t lastItemIndex = INVALID_ENTRY;
592 size_t end = mNextFreeEntry;
593 if (end > ENTRY_COUNT) {
594 end = ENTRY_COUNT;
595 }
596 size_t span;
597 for (size_t i = 0; i < end; i += span) {
598 span = 1;
599 if (mEntryTable.get(i) == EntryState::ERASED) {
600 lastItemIndex = INVALID_ENTRY;
601 continue;
602 }
603
604 lastItemIndex = i;
605
606 auto err = readEntry(i, item);
607 if (err != ESP_OK) {
608 mState = PageState::INVALID;
609 return err;
610 }
611
612 if (item.crc32 != item.calculateCrc32()) {
613 err = eraseEntryAndSpan(i);
614 if (err != ESP_OK) {
615 mState = PageState::INVALID;
616 return err;
617 }
618 continue;
619 }
620
621 err = mHashList.insert(item, i);
622 if (err != ESP_OK) {
623 mState = PageState::INVALID;
624 return err;
625 }
626
627 // search for potential duplicate item
628 size_t duplicateIndex = mHashList.find(0, item);
629
630 if (isVariableLengthType(item.datatype)) {
631 span = item.span;
632 bool needErase = false;
633 for (size_t j = i; j < i + span; ++j) {
634 if (mEntryTable.get(j) != EntryState::WRITTEN) {
635 needErase = true;
636 lastItemIndex = INVALID_ENTRY;
637 break;
638 }
639 }
640 if (needErase) {
641 eraseEntryAndSpan(i);
642 continue;
643 }
644 }
645
646 /* Note that logic for duplicate detections works fine even
647 * when old-format blob is present along with new-format blob-index
648 * for same key on active page. Since datatype is not used in hash calculation,
649 * old-format blob will be removed.*/
650 if (duplicateIndex < i) {
651 eraseEntryAndSpan(duplicateIndex);
652 }
653 }
654
655 // check that last item is not duplicate
656 if (lastItemIndex != INVALID_ENTRY) {
657 size_t findItemIndex = 0;
658 Item dupItem;
659 if (findItem(item.nsIndex, item.datatype, item.key, findItemIndex, dupItem) == ESP_OK) {
660 if (findItemIndex < lastItemIndex) {
661 auto err = eraseEntryAndSpan(findItemIndex);
662 if (err != ESP_OK) {
663 mState = PageState::INVALID;
664 return err;
665 }
666 }
667 }
668 }
669 } else if (mState == PageState::FULL || mState == PageState::FREEING) {
670 // We have already filled mHashList for page in active state.
671 // Do the same for the case when page is in full or freeing state.
672 Item item;
673 for (size_t i = mFirstUsedEntry; i < ENTRY_COUNT; ++i) {
674 if (mEntryTable.get(i) != EntryState::WRITTEN) {
675 continue;
676 }
677
678 auto err = readEntry(i, item);
679 if (err != ESP_OK) {
680 mState = PageState::INVALID;
681 return err;
682 }
683
684 if (item.crc32 != item.calculateCrc32()) {
685 err = eraseEntryAndSpan(i);
686 if (err != ESP_OK) {
687 mState = PageState::INVALID;
688 return err;
689 }
690 continue;
691 }
692
693 assert(item.span > 0);
694
695 err = mHashList.insert(item, i);
696 if (err != ESP_OK) {
697 mState = PageState::INVALID;
698 return err;
699 }
700
701 size_t span = item.span;
702
703 if (isVariableLengthType(item.datatype)) {
704 for (size_t j = i + 1; j < i + span; ++j) {
705 if (mEntryTable.get(j) != EntryState::WRITTEN) {
706 eraseEntryAndSpan(i);
707 break;
708 }
709 }
710 }
711
712 i += span - 1;
713 }
714
715 }
716
717 return ESP_OK;
718 }
719
720
initialize()721 esp_err_t Page::initialize()
722 {
723 assert(mState == PageState::UNINITIALIZED);
724 mState = PageState::ACTIVE;
725 Header header;
726 header.mState = mState;
727 header.mSeqNumber = mSeqNumber;
728 header.mVersion = mVersion;
729 header.mCrc32 = header.calculateCrc32();
730
731 auto rc = mPartition->write_raw(mBaseAddress, &header, sizeof(header));
732 if (rc != ESP_OK) {
733 mState = PageState::INVALID;
734 return rc;
735 }
736
737 mNextFreeEntry = 0;
738 std::fill_n(mEntryTable.data(), mEntryTable.byteSize() / sizeof(uint32_t), 0xffffffff);
739 return ESP_OK;
740 }
741
alterEntryState(size_t index,EntryState state)742 esp_err_t Page::alterEntryState(size_t index, EntryState state)
743 {
744 assert(index < ENTRY_COUNT);
745 mEntryTable.set(index, state);
746 size_t wordToWrite = mEntryTable.getWordIndex(index);
747 uint32_t word = mEntryTable.data()[wordToWrite];
748 auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordToWrite) * 4,
749 &word, sizeof(word));
750 if (rc != ESP_OK) {
751 mState = PageState::INVALID;
752 return rc;
753 }
754 return ESP_OK;
755 }
756
alterEntryRangeState(size_t begin,size_t end,EntryState state)757 esp_err_t Page::alterEntryRangeState(size_t begin, size_t end, EntryState state)
758 {
759 assert(end <= ENTRY_COUNT);
760 assert(end > begin);
761 size_t wordIndex = mEntryTable.getWordIndex(end - 1);
762 for (ptrdiff_t i = end - 1; i >= static_cast<ptrdiff_t>(begin); --i) {
763 mEntryTable.set(i, state);
764 size_t nextWordIndex;
765 if (i == static_cast<ptrdiff_t>(begin)) {
766 nextWordIndex = (size_t) -1;
767 } else {
768 nextWordIndex = mEntryTable.getWordIndex(i - 1);
769 }
770 if (nextWordIndex != wordIndex) {
771 uint32_t word = mEntryTable.data()[wordIndex];
772 auto rc = mPartition->write_raw(mBaseAddress + ENTRY_TABLE_OFFSET + static_cast<uint32_t>(wordIndex) * 4,
773 &word, 4);
774 if (rc != ESP_OK) {
775 return rc;
776 }
777 }
778 wordIndex = nextWordIndex;
779 }
780 return ESP_OK;
781 }
782
alterPageState(PageState state)783 esp_err_t Page::alterPageState(PageState state)
784 {
785 uint32_t state_val = static_cast<uint32_t>(state);
786 auto rc = mPartition->write_raw(mBaseAddress, &state_val, sizeof(state));
787 if (rc != ESP_OK) {
788 mState = PageState::INVALID;
789 return rc;
790 }
791 mState = (PageState) state;
792 return ESP_OK;
793 }
794
readEntry(size_t index,Item & dst) const795 esp_err_t Page::readEntry(size_t index, Item& dst) const
796 {
797 auto rc = mPartition->read(getEntryAddress(index), &dst, sizeof(dst));
798 if (rc != ESP_OK) {
799 return rc;
800 }
801 return ESP_OK;
802 }
803
findItem(uint8_t nsIndex,ItemType datatype,const char * key,size_t & itemIndex,Item & item,uint8_t chunkIdx,VerOffset chunkStart)804 esp_err_t Page::findItem(uint8_t nsIndex, ItemType datatype, const char* key, size_t &itemIndex, Item& item, uint8_t chunkIdx, VerOffset chunkStart)
805 {
806 if (mState == PageState::CORRUPT || mState == PageState::INVALID || mState == PageState::UNINITIALIZED) {
807 return ESP_ERR_NVS_NOT_FOUND;
808 }
809
810 size_t findBeginIndex = itemIndex;
811 if (findBeginIndex >= ENTRY_COUNT) {
812 return ESP_ERR_NVS_NOT_FOUND;
813 }
814
815 size_t start = mFirstUsedEntry;
816 if (findBeginIndex > mFirstUsedEntry && findBeginIndex < ENTRY_COUNT) {
817 start = findBeginIndex;
818 }
819
820 size_t end = mNextFreeEntry;
821 if (end > ENTRY_COUNT) {
822 end = ENTRY_COUNT;
823 }
824
825 if (nsIndex != NS_ANY && datatype != ItemType::ANY && key != NULL) {
826 size_t cachedIndex = mHashList.find(start, Item(nsIndex, datatype, 0, key, chunkIdx));
827 if (cachedIndex < ENTRY_COUNT) {
828 start = cachedIndex;
829 } else {
830 return ESP_ERR_NVS_NOT_FOUND;
831 }
832 }
833
834 size_t next;
835 for (size_t i = start; i < end; i = next) {
836 next = i + 1;
837 if (mEntryTable.get(i) != EntryState::WRITTEN) {
838 continue;
839 }
840
841 auto rc = readEntry(i, item);
842 if (rc != ESP_OK) {
843 mState = PageState::INVALID;
844 return rc;
845 }
846
847 auto crc32 = item.calculateCrc32();
848 if (item.crc32 != crc32) {
849 rc = eraseEntryAndSpan(i);
850 if (rc != ESP_OK) {
851 mState = PageState::INVALID;
852 return rc;
853 }
854 continue;
855 }
856
857 if (isVariableLengthType(item.datatype)) {
858 next = i + item.span;
859 }
860
861 if (nsIndex != NS_ANY && item.nsIndex != nsIndex) {
862 continue;
863 }
864
865 if (key != nullptr && strncmp(key, item.key, Item::MAX_KEY_LENGTH) != 0) {
866 continue;
867 }
868 /* For blob data, chunkIndex should match*/
869 if (chunkIdx != CHUNK_ANY
870 && datatype == ItemType::BLOB_DATA
871 && item.chunkIndex != chunkIdx) {
872 continue;
873 }
874 /* Blob-index will match the <ns,key> with blob data.
875 * Skip data chunks when searching for blob index*/
876 if (datatype == ItemType::BLOB_IDX
877 && item.chunkIndex != CHUNK_ANY) {
878 continue;
879 }
880 /* Match the version for blob-index*/
881 if (datatype == ItemType::BLOB_IDX
882 && chunkStart != VerOffset::VER_ANY
883 && item.blobIndex.chunkStart != chunkStart) {
884 continue;
885 }
886
887
888 if (datatype != ItemType::ANY && item.datatype != datatype) {
889 if (key == nullptr && nsIndex == NS_ANY && chunkIdx == CHUNK_ANY) {
890 continue; // continue for bruteforce search on blob indices.
891 }
892 itemIndex = i;
893 return ESP_ERR_NVS_TYPE_MISMATCH;
894 }
895
896 itemIndex = i;
897
898 return ESP_OK;
899 }
900
901 return ESP_ERR_NVS_NOT_FOUND;
902 }
903
getSeqNumber(uint32_t & seqNumber) const904 esp_err_t Page::getSeqNumber(uint32_t& seqNumber) const
905 {
906 if (mState != PageState::UNINITIALIZED && mState != PageState::INVALID && mState != PageState::CORRUPT) {
907 seqNumber = mSeqNumber;
908 return ESP_OK;
909 }
910 return ESP_ERR_NVS_NOT_INITIALIZED;
911 }
912
913
setSeqNumber(uint32_t seqNumber)914 esp_err_t Page::setSeqNumber(uint32_t seqNumber)
915 {
916 if (mState != PageState::UNINITIALIZED) {
917 return ESP_ERR_NVS_INVALID_STATE;
918 }
919 mSeqNumber = seqNumber;
920 return ESP_OK;
921 }
922
setVersion(uint8_t ver)923 esp_err_t Page::setVersion(uint8_t ver)
924 {
925 if (mState != PageState::UNINITIALIZED) {
926 return ESP_ERR_NVS_INVALID_STATE;
927 }
928 mVersion = ver;
929 return ESP_OK;
930 }
931
erase()932 esp_err_t Page::erase()
933 {
934 auto rc = mPartition->erase_range(mBaseAddress, SPI_FLASH_SEC_SIZE);
935 if (rc != ESP_OK) {
936 mState = PageState::INVALID;
937 return rc;
938 }
939 mUsedEntryCount = 0;
940 mErasedEntryCount = 0;
941 mFirstUsedEntry = INVALID_ENTRY;
942 mNextFreeEntry = INVALID_ENTRY;
943 mState = PageState::UNINITIALIZED;
944 mHashList.clear();
945 return ESP_OK;
946 }
947
markFreeing()948 esp_err_t Page::markFreeing()
949 {
950 if (mState != PageState::FULL && mState != PageState::ACTIVE) {
951 return ESP_ERR_NVS_INVALID_STATE;
952 }
953 return alterPageState(PageState::FREEING);
954 }
955
markFull()956 esp_err_t Page::markFull()
957 {
958 if (mState != PageState::ACTIVE) {
959 return ESP_ERR_NVS_INVALID_STATE;
960 }
961 return alterPageState(PageState::FULL);
962 }
963
getVarDataTailroom() const964 size_t Page::getVarDataTailroom() const
965 {
966 if (mState == PageState::UNINITIALIZED) {
967 return CHUNK_MAX_SIZE;
968 } else if (mState == PageState::FULL) {
969 return 0;
970 }
971 /* Skip one entry for blob data item precessing the data */
972 return ((mNextFreeEntry < (ENTRY_COUNT-1)) ? ((ENTRY_COUNT - mNextFreeEntry - 1) * ENTRY_SIZE): 0);
973 }
974
pageStateToName(PageState ps)975 const char* Page::pageStateToName(PageState ps)
976 {
977 switch (ps) {
978 case PageState::CORRUPT:
979 return "CORRUPT";
980
981 case PageState::ACTIVE:
982 return "ACTIVE";
983
984 case PageState::FREEING:
985 return "FREEING";
986
987 case PageState::FULL:
988 return "FULL";
989
990 case PageState::INVALID:
991 return "INVALID";
992
993 case PageState::UNINITIALIZED:
994 return "UNINITIALIZED";
995
996 default:
997 assert(0 && "invalid state value");
998 return "";
999 }
1000 }
1001
debugDump() const1002 void Page::debugDump() const
1003 {
1004 printf("state=%x (%s) addr=%x seq=%d\nfirstUsed=%d nextFree=%d used=%d erased=%d\n", (uint32_t) mState, pageStateToName(mState), mBaseAddress, mSeqNumber, static_cast<int>(mFirstUsedEntry), static_cast<int>(mNextFreeEntry), mUsedEntryCount, mErasedEntryCount);
1005 size_t skip = 0;
1006 for (size_t i = 0; i < ENTRY_COUNT; ++i) {
1007 printf("%3d: ", static_cast<int>(i));
1008 EntryState state = mEntryTable.get(i);
1009 if (state == EntryState::EMPTY) {
1010 printf("E\n");
1011 } else if (state == EntryState::ERASED) {
1012 printf("X\n");
1013 } else if (state == EntryState::WRITTEN) {
1014 Item item;
1015 readEntry(i, item);
1016 if (skip == 0) {
1017 printf("W ns=%2u type=%2u span=%3u key=\"%s\" chunkIdx=%d len=%d\n", item.nsIndex, static_cast<unsigned>(item.datatype), item.span, item.key, item.chunkIndex, (item.span != 1)?((int)item.varLength.dataSize):-1);
1018 if (item.span > 0 && item.span <= ENTRY_COUNT - i) {
1019 skip = item.span - 1;
1020 } else {
1021 skip = 0;
1022 }
1023 } else {
1024 printf("D\n");
1025 skip--;
1026 }
1027 }
1028 }
1029 }
1030
calcEntries(nvs_stats_t & nvsStats)1031 esp_err_t Page::calcEntries(nvs_stats_t &nvsStats)
1032 {
1033 assert(mState != PageState::FREEING);
1034
1035 nvsStats.total_entries += ENTRY_COUNT;
1036
1037 switch (mState) {
1038 case PageState::UNINITIALIZED:
1039 case PageState::CORRUPT:
1040 nvsStats.free_entries += ENTRY_COUNT;
1041 break;
1042
1043 case PageState::FULL:
1044 case PageState::ACTIVE:
1045 nvsStats.used_entries += mUsedEntryCount;
1046 nvsStats.free_entries += ENTRY_COUNT - mUsedEntryCount; // it's equivalent free + erase entries.
1047 break;
1048
1049 case PageState::INVALID:
1050 return ESP_ERR_INVALID_STATE;
1051 break;
1052
1053 default:
1054 assert(false && "Unhandled state");
1055 break;
1056 }
1057 return ESP_OK;
1058 }
1059
1060 } // namespace nvs
1061