1 /*
2  * Copyright (c) 2016 Intel Corporation.
3  * Copyright (c) 2022-2024 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <string.h>
9 #include <zephyr/types.h>
10 #include <zephyr/sys/__assert.h>
11 #include <zephyr/sys/util.h>
12 #include <zephyr/drivers/disk.h>
13 #include <errno.h>
14 #include <zephyr/init.h>
15 #include <zephyr/device.h>
16 #include <zephyr/drivers/flash.h>
17 #include <zephyr/storage/flash_map.h>
18 
19 #include <zephyr/logging/log.h>
20 LOG_MODULE_REGISTER(flashdisk, CONFIG_FLASHDISK_LOG_LEVEL);
21 
22 #if defined(CONFIG_FLASH_HAS_EXPLICIT_ERASE) &&	\
23 	defined(CONFIG_FLASH_HAS_NO_EXPLICIT_ERASE)
24 #define DISK_ERASE_RUNTIME_CHECK
25 #endif
26 
27 struct flashdisk_data {
28 	struct disk_info info;
29 	struct k_mutex lock;
30 	const unsigned int area_id;
31 	const off_t offset;
32 	uint8_t *const cache;
33 	const size_t cache_size;
34 	const size_t size;
35 	const size_t sector_size;
36 	size_t page_size;
37 	off_t cached_addr;
38 	bool cache_valid;
39 	bool cache_dirty;
40 	bool erase_required;
41 };
42 
43 #define GET_SIZE_TO_BOUNDARY(start, block_size) \
44 	(block_size - (start & (block_size - 1)))
45 
46 /*
47  * The default block size is used for devices not requiring erase.
48  * It defaults to 512 as this is most widely used sector size
49  * on storage devices.
50  */
51 #define DEFAULT_BLOCK_SIZE	512
52 
flashdisk_with_erase(const struct flashdisk_data * ctx)53 static inline bool flashdisk_with_erase(const struct flashdisk_data *ctx)
54 {
55 	ARG_UNUSED(ctx);
56 #if CONFIG_FLASH_HAS_EXPLICIT_ERASE
57 #if CONFIG_FLASH_HAS_NO_EXPLICIT_ERASE
58 	return ctx->erase_required;
59 #else
60 	return true;
61 #endif
62 #endif
63 	return false;
64 }
65 
flashdisk_probe_erase(struct flashdisk_data * ctx)66 static inline void flashdisk_probe_erase(struct flashdisk_data *ctx)
67 {
68 #if defined(DISK_ERASE_RUNTIME_CHECK)
69 	ctx->erase_required =
70 		flash_params_get_erase_cap(flash_get_parameters(ctx->info.dev)) &
71 			FLASH_ERASE_C_EXPLICIT;
72 #else
73 	ARG_UNUSED(ctx);
74 #endif
75 }
76 
disk_flash_access_status(struct disk_info * disk)77 static int disk_flash_access_status(struct disk_info *disk)
78 {
79 	LOG_DBG("status : %s", disk->dev ? "okay" : "no media");
80 	if (!disk->dev) {
81 		return DISK_STATUS_NOMEDIA;
82 	}
83 
84 	return DISK_STATUS_OK;
85 }
86 
flashdisk_init_runtime(struct flashdisk_data * ctx,const struct flash_area * fap)87 static int flashdisk_init_runtime(struct flashdisk_data *ctx,
88 				  const struct flash_area *fap)
89 {
90 	int rc;
91 	struct flash_pages_info page;
92 	off_t offset;
93 
94 	flashdisk_probe_erase(ctx);
95 
96 	if (IS_ENABLED(CONFIG_FLASHDISK_VERIFY_PAGE_LAYOUT) && flashdisk_with_erase(ctx)) {
97 		rc = flash_get_page_info_by_offs(ctx->info.dev, ctx->offset, &page);
98 		if (rc < 0) {
99 			LOG_ERR("Error %d while getting page info", rc);
100 			return rc;
101 		}
102 
103 		ctx->page_size = page.size;
104 	} else {
105 		ctx->page_size = DEFAULT_BLOCK_SIZE;
106 	}
107 
108 	LOG_INF("Initialize device %s", ctx->info.name);
109 	LOG_INF("offset %lx, sector size %zu, page size %zu, volume size %zu",
110 		(long)ctx->offset, ctx->sector_size, ctx->page_size, ctx->size);
111 
112 	if (ctx->cache_size == 0) {
113 		/* Read-only flashdisk, no flash partition constraints */
114 		LOG_INF("%s is read-only", ctx->info.name);
115 		return 0;
116 	}
117 
118 	if (IS_ENABLED(CONFIG_FLASHDISK_VERIFY_PAGE_LAYOUT) && flashdisk_with_erase(ctx)) {
119 		if (ctx->offset != page.start_offset) {
120 			LOG_ERR("Disk %s does not start at page boundary",
121 				ctx->info.name);
122 			return -EINVAL;
123 		}
124 
125 		offset = ctx->offset + page.size;
126 		while (offset < ctx->offset + ctx->size) {
127 			rc = flash_get_page_info_by_offs(ctx->info.dev, offset, &page);
128 			if (rc < 0) {
129 				LOG_ERR("Error %d getting page info at offset %lx", rc, offset);
130 				return rc;
131 			}
132 			if (page.size != ctx->page_size) {
133 				LOG_ERR("Non-uniform page size is not supported");
134 				return rc;
135 			}
136 			offset += page.size;
137 		}
138 
139 		if (offset != ctx->offset + ctx->size) {
140 			LOG_ERR("Last page crossess disk %s boundary",
141 				ctx->info.name);
142 			return -EINVAL;
143 		}
144 	}
145 
146 	if (ctx->page_size > ctx->cache_size) {
147 		LOG_ERR("Cache too small (%zu needs %zu)",
148 			ctx->cache_size, ctx->page_size);
149 		return -ENOMEM;
150 	}
151 
152 	return 0;
153 }
154 
disk_flash_access_init(struct disk_info * disk)155 static int disk_flash_access_init(struct disk_info *disk)
156 {
157 	struct flashdisk_data *ctx;
158 	const struct flash_area *fap;
159 	int rc;
160 
161 	ctx = CONTAINER_OF(disk, struct flashdisk_data, info);
162 
163 	rc = flash_area_open(ctx->area_id, &fap);
164 	if (rc < 0) {
165 		LOG_ERR("Flash area %u open error %d", ctx->area_id, rc);
166 		return rc;
167 	}
168 
169 	k_mutex_lock(&ctx->lock, K_FOREVER);
170 
171 	disk->dev = flash_area_get_device(fap);
172 
173 	rc = flashdisk_init_runtime(ctx, fap);
174 	if (rc < 0) {
175 		flash_area_close(fap);
176 	}
177 	k_mutex_unlock(&ctx->lock);
178 
179 	return rc;
180 }
181 
sectors_in_range(struct flashdisk_data * ctx,uint32_t start_sector,uint32_t sector_count)182 static bool sectors_in_range(struct flashdisk_data *ctx,
183 			     uint32_t start_sector, uint32_t sector_count)
184 {
185 	uint32_t start, end;
186 
187 	start = ctx->offset + (start_sector * ctx->sector_size);
188 	end = start + (sector_count * ctx->sector_size);
189 
190 	if ((end >= start) && (start >= ctx->offset) && (end <= ctx->offset + ctx->size)) {
191 		return true;
192 	}
193 
194 	LOG_ERR("sector start %" PRIu32 " count %" PRIu32
195 		" outside partition boundary", start_sector, sector_count);
196 	return false;
197 }
198 
disk_flash_access_read(struct disk_info * disk,uint8_t * buff,uint32_t start_sector,uint32_t sector_count)199 static int disk_flash_access_read(struct disk_info *disk, uint8_t *buff,
200 				uint32_t start_sector, uint32_t sector_count)
201 {
202 	struct flashdisk_data *ctx;
203 	off_t fl_addr;
204 	uint32_t remaining;
205 	uint32_t offset;
206 	uint32_t len;
207 	int rc = 0;
208 
209 	ctx = CONTAINER_OF(disk, struct flashdisk_data, info);
210 
211 	if (!sectors_in_range(ctx, start_sector, sector_count)) {
212 		return -EINVAL;
213 	}
214 
215 	fl_addr = ctx->offset + start_sector * ctx->sector_size;
216 	remaining = (sector_count * ctx->sector_size);
217 
218 	k_mutex_lock(&ctx->lock, K_FOREVER);
219 
220 	/* Operate on page addresses to easily check for cached data */
221 	offset = fl_addr & (ctx->page_size - 1);
222 	fl_addr = ROUND_DOWN(fl_addr, ctx->page_size);
223 
224 	/* Read up to page boundary on first iteration */
225 	len = ctx->page_size - offset;
226 	while (remaining) {
227 		if (remaining < len) {
228 			len = remaining;
229 		}
230 
231 		if (ctx->cache_valid && ctx->cached_addr == fl_addr) {
232 			memcpy(buff, &ctx->cache[offset], len);
233 		} else if (flash_read(disk->dev, fl_addr + offset, buff, len) < 0) {
234 			rc = -EIO;
235 			goto end;
236 		}
237 
238 		fl_addr += ctx->page_size;
239 		remaining -= len;
240 		buff += len;
241 
242 		/* Try to read whole page on next iteration */
243 		len = ctx->page_size;
244 		offset = 0;
245 	}
246 
247 end:
248 	k_mutex_unlock(&ctx->lock);
249 
250 	return rc;
251 }
252 
flashdisk_cache_commit(struct flashdisk_data * ctx)253 static int flashdisk_cache_commit(struct flashdisk_data *ctx)
254 {
255 	if (!ctx->cache_valid || !ctx->cache_dirty) {
256 		/* Either no cached data or cache matches flash data */
257 		return 0;
258 	}
259 
260 	if (flashdisk_with_erase(ctx)) {
261 		if (flash_erase(ctx->info.dev, ctx->cached_addr, ctx->page_size) < 0) {
262 			return -EIO;
263 		}
264 	}
265 
266 	/* write data to flash */
267 	if (flash_write(ctx->info.dev, ctx->cached_addr, ctx->cache, ctx->page_size) < 0) {
268 		return -EIO;
269 	}
270 
271 	ctx->cache_dirty = false;
272 	return 0;
273 }
274 
flashdisk_cache_load(struct flashdisk_data * ctx,off_t fl_addr)275 static int flashdisk_cache_load(struct flashdisk_data *ctx, off_t fl_addr)
276 {
277 	int rc;
278 
279 	__ASSERT_NO_MSG((fl_addr & (ctx->page_size - 1)) == 0);
280 
281 	if (ctx->cache_valid) {
282 		if (ctx->cached_addr == fl_addr) {
283 			/* Page is already cached */
284 			return 0;
285 		}
286 		/* Different page is in cache, commit it first */
287 		rc = flashdisk_cache_commit(ctx);
288 		if (rc < 0) {
289 			/* Failed to commit dirty page, abort */
290 			return rc;
291 		}
292 	}
293 
294 	/* Load page into cache */
295 	ctx->cache_valid = false;
296 	ctx->cache_dirty = false;
297 	ctx->cached_addr = fl_addr;
298 	rc = flash_read(ctx->info.dev, fl_addr, ctx->cache, ctx->page_size);
299 	if (rc == 0) {
300 		/* Successfully loaded into cache, mark as valid */
301 		ctx->cache_valid = true;
302 		return 0;
303 	}
304 
305 	return -EIO;
306 }
307 
308 /* input size is either less or equal to a block size (ctx->page_size)
309  * and write data never spans across adjacent blocks.
310  */
flashdisk_cache_write(struct flashdisk_data * ctx,off_t start_addr,uint32_t size,const void * buff)311 static int flashdisk_cache_write(struct flashdisk_data *ctx, off_t start_addr,
312 				uint32_t size, const void *buff)
313 {
314 	int rc;
315 	off_t fl_addr;
316 	uint32_t offset;
317 
318 	/* adjust offset if starting address is not erase-aligned address */
319 	offset = start_addr & (ctx->page_size - 1);
320 
321 	/* always align starting address for flash cache operations */
322 	fl_addr = ROUND_DOWN(start_addr, ctx->page_size);
323 
324 	/* when writing full page the address must be page aligned
325 	 * when writing partial page user data must be within a single page
326 	 */
327 	__ASSERT_NO_MSG(fl_addr + ctx->page_size >= start_addr + size);
328 
329 	rc = flashdisk_cache_load(ctx, fl_addr);
330 	if (rc < 0) {
331 		return rc;
332 	}
333 
334 	/* Do not mark cache as dirty if data to be written matches cache.
335 	 * If cache is already dirty, copy data to cache without compare.
336 	 */
337 	if (ctx->cache_dirty || memcmp(&ctx->cache[offset], buff, size)) {
338 		/* Update cache and mark it as dirty */
339 		memcpy(&ctx->cache[offset], buff, size);
340 		ctx->cache_dirty = true;
341 	}
342 
343 	return 0;
344 }
345 
disk_flash_access_write(struct disk_info * disk,const uint8_t * buff,uint32_t start_sector,uint32_t sector_count)346 static int disk_flash_access_write(struct disk_info *disk, const uint8_t *buff,
347 				 uint32_t start_sector, uint32_t sector_count)
348 {
349 	struct flashdisk_data *ctx;
350 	off_t fl_addr;
351 	uint32_t remaining;
352 	uint32_t size;
353 	int rc = 0;
354 
355 	ctx = CONTAINER_OF(disk, struct flashdisk_data, info);
356 
357 	if (ctx->cache_size == 0) {
358 		return -ENOTSUP;
359 	}
360 
361 	if (!sectors_in_range(ctx, start_sector, sector_count)) {
362 		return -EINVAL;
363 	}
364 
365 	fl_addr = ctx->offset + start_sector * ctx->sector_size;
366 	remaining = (sector_count * ctx->sector_size);
367 
368 	k_mutex_lock(&ctx->lock, K_FOREVER);
369 
370 	/* check if start address is erased-aligned address  */
371 	if (fl_addr & (ctx->page_size - 1)) {
372 		off_t block_bnd;
373 
374 		/* not aligned */
375 		/* check if the size goes over flash block boundary */
376 		block_bnd = fl_addr + ctx->page_size;
377 		block_bnd = block_bnd & ~(ctx->page_size - 1);
378 		if ((fl_addr + remaining) <= block_bnd) {
379 			/* not over block boundary (a partial block also) */
380 			if (flashdisk_cache_write(ctx, fl_addr, remaining, buff) < 0) {
381 				rc = -EIO;
382 			}
383 			goto end;
384 		}
385 
386 		/* write goes over block boundary */
387 		size = GET_SIZE_TO_BOUNDARY(fl_addr, ctx->page_size);
388 
389 		/* write first partial block */
390 		if (flashdisk_cache_write(ctx, fl_addr, size, buff) < 0) {
391 			rc = -EIO;
392 			goto end;
393 		}
394 
395 		fl_addr += size;
396 		remaining -= size;
397 		buff += size;
398 	}
399 
400 	/* start is an erase-aligned address */
401 	while (remaining) {
402 		if (remaining < ctx->page_size) {
403 			break;
404 		}
405 
406 		if (flashdisk_cache_write(ctx, fl_addr, ctx->page_size, buff) < 0) {
407 			rc = -EIO;
408 			goto end;
409 		}
410 
411 		fl_addr += ctx->page_size;
412 		remaining -= ctx->page_size;
413 		buff += ctx->page_size;
414 	}
415 
416 	/* remaining partial block */
417 	if (remaining) {
418 		if (flashdisk_cache_write(ctx, fl_addr, remaining, buff) < 0) {
419 			rc = -EIO;
420 			goto end;
421 		}
422 	}
423 
424 end:
425 	k_mutex_unlock(&ctx->lock);
426 
427 	return 0;
428 }
429 
disk_flash_access_ioctl(struct disk_info * disk,uint8_t cmd,void * buff)430 static int disk_flash_access_ioctl(struct disk_info *disk, uint8_t cmd, void *buff)
431 {
432 	int rc;
433 	struct flashdisk_data *ctx;
434 
435 	ctx = CONTAINER_OF(disk, struct flashdisk_data, info);
436 
437 	switch (cmd) {
438 	case DISK_IOCTL_CTRL_DEINIT:
439 	case DISK_IOCTL_CTRL_SYNC:
440 		k_mutex_lock(&ctx->lock, K_FOREVER);
441 		rc = flashdisk_cache_commit(ctx);
442 		k_mutex_unlock(&ctx->lock);
443 		return rc;
444 	case DISK_IOCTL_GET_SECTOR_COUNT:
445 		*(uint32_t *)buff = ctx->size / ctx->sector_size;
446 		return 0;
447 	case DISK_IOCTL_GET_SECTOR_SIZE:
448 		*(uint32_t *)buff = ctx->sector_size;
449 		return 0;
450 	case DISK_IOCTL_GET_ERASE_BLOCK_SZ: /* in sectors */
451 		k_mutex_lock(&ctx->lock, K_FOREVER);
452 		*(uint32_t *)buff = ctx->page_size / ctx->sector_size;
453 		k_mutex_unlock(&ctx->lock);
454 		return 0;
455 	case DISK_IOCTL_CTRL_INIT:
456 		return disk_flash_access_init(disk);
457 	default:
458 		break;
459 	}
460 
461 	return -EINVAL;
462 }
463 
464 static const struct disk_operations flash_disk_ops = {
465 	.init = disk_flash_access_init,
466 	.status = disk_flash_access_status,
467 	.read = disk_flash_access_read,
468 	.write = disk_flash_access_write,
469 	.ioctl = disk_flash_access_ioctl,
470 };
471 
472 #define DT_DRV_COMPAT zephyr_flash_disk
473 
474 #define PARTITION_PHANDLE(n) DT_PHANDLE_BY_IDX(DT_DRV_INST(n), partition, 0)
475 /* Force cache size to 0 if partition is read-only */
476 #define CACHE_SIZE(n) (DT_INST_PROP(n, cache_size) * !DT_PROP(PARTITION_PHANDLE(n), read_only))
477 
478 #define DEFINE_FLASHDISKS_CACHE(n) \
479 	static uint8_t __aligned(4) flashdisk##n##_cache[CACHE_SIZE(n)];
480 DT_INST_FOREACH_STATUS_OKAY(DEFINE_FLASHDISKS_CACHE)
481 
482 #define DEFINE_FLASHDISKS_DEVICE(n)						\
483 {										\
484 	.info = {								\
485 		.ops = &flash_disk_ops,						\
486 		.name = DT_INST_PROP(n, disk_name),				\
487 	},									\
488 	.area_id = DT_FIXED_PARTITION_ID(PARTITION_PHANDLE(n)),			\
489 	.offset = DT_REG_ADDR(PARTITION_PHANDLE(n)),				\
490 	.cache = flashdisk##n##_cache,						\
491 	.cache_size = sizeof(flashdisk##n##_cache),				\
492 	.size = DT_REG_SIZE(PARTITION_PHANDLE(n)),				\
493 	.sector_size = DT_INST_PROP(n, sector_size),				\
494 },
495 
496 static struct flashdisk_data flash_disks[] = {
497 	DT_INST_FOREACH_STATUS_OKAY(DEFINE_FLASHDISKS_DEVICE)
498 };
499 
500 #define VERIFY_CACHE_SIZE_IS_NOT_ZERO_IF_NOT_READ_ONLY(n)			\
501 	COND_CODE_1(DT_PROP(PARTITION_PHANDLE(n), read_only),			\
502 		(/* cache-size is not used for read-only disks */),		\
503 		(BUILD_ASSERT(DT_INST_PROP(n, cache_size) != 0,			\
504 		"Devicetree node " DT_NODE_PATH(DT_DRV_INST(n))			\
505 		" must have non-zero cache-size");))
506 DT_INST_FOREACH_STATUS_OKAY(VERIFY_CACHE_SIZE_IS_NOT_ZERO_IF_NOT_READ_ONLY)
507 
508 #define VERIFY_CACHE_SIZE_IS_MULTIPLY_OF_SECTOR_SIZE(n)					\
509 	BUILD_ASSERT(DT_INST_PROP(n, cache_size) % DT_INST_PROP(n, sector_size) == 0,	\
510 		"Devicetree node " DT_NODE_PATH(DT_DRV_INST(n))				\
511 		" has cache size which is not a multiple of its sector size");
DT_INST_FOREACH_STATUS_OKAY(VERIFY_CACHE_SIZE_IS_MULTIPLY_OF_SECTOR_SIZE)512 DT_INST_FOREACH_STATUS_OKAY(VERIFY_CACHE_SIZE_IS_MULTIPLY_OF_SECTOR_SIZE)
513 
514 static int disk_flash_init(void)
515 {
516 	int err = 0;
517 
518 	for (int i = 0; i < ARRAY_SIZE(flash_disks); i++) {
519 		int rc;
520 
521 		k_mutex_init(&flash_disks[i].lock);
522 
523 		rc = disk_access_register(&flash_disks[i].info);
524 		if (rc < 0) {
525 			LOG_ERR("Failed to register disk %s error %d",
526 				flash_disks[i].info.name, rc);
527 			err = rc;
528 		}
529 	}
530 
531 	return err;
532 }
533 
534 SYS_INIT(disk_flash_init, POST_KERNEL, CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
535