Lines Matching +full:mtd +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Direct MTD block device access
5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6 * Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/blktrans.h>
38 * buffer cache can handle, we must implement read-modify-write on flash
39 * sectors for each block write requests. To avoid over-erasing flash sectors
44 static int erase_write (struct mtd_info *mtd, unsigned long pos, in erase_write() argument
57 ret = mtd_erase(mtd, &erase); in erase_write()
61 pos, len, mtd->name); in erase_write()
69 ret = mtd_write(mtd, pos, len, &retlen, buf); in erase_write()
73 return -EIO; in erase_write()
80 struct mtd_info *mtd = mtdblk->mbd.mtd; in write_cached_data() local
83 if (mtdblk->cache_state != STATE_DIRTY) in write_cached_data()
87 "at 0x%lx, size 0x%x\n", mtd->name, in write_cached_data()
88 mtdblk->cache_offset, mtdblk->cache_size); in write_cached_data()
90 ret = erase_write (mtd, mtdblk->cache_offset, in write_cached_data()
91 mtdblk->cache_size, mtdblk->cache_data); in write_cached_data()
104 if (ret == 0 || ret == -EIO) in write_cached_data()
105 mtdblk->cache_state = STATE_EMPTY; in write_cached_data()
113 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_write() local
114 unsigned int sect_size = mtdblk->cache_size; in do_cached_write()
119 mtd->name, pos, len); in do_cached_write()
122 return mtd_write(mtd, pos, len, &retlen, buf); in do_cached_write()
126 unsigned int offset = pos - sect_start; in do_cached_write()
127 unsigned int size = sect_size - offset; in do_cached_write()
137 ret = erase_write (mtd, pos, size, buf); in do_cached_write()
143 if (mtdblk->cache_state == STATE_DIRTY && in do_cached_write()
144 mtdblk->cache_offset != sect_start) { in do_cached_write()
150 if (mtdblk->cache_state == STATE_EMPTY || in do_cached_write()
151 mtdblk->cache_offset != sect_start) { in do_cached_write()
153 mtdblk->cache_state = STATE_EMPTY; in do_cached_write()
154 ret = mtd_read(mtd, sect_start, sect_size, in do_cached_write()
155 &retlen, mtdblk->cache_data); in do_cached_write()
159 return -EIO; in do_cached_write()
161 mtdblk->cache_offset = sect_start; in do_cached_write()
162 mtdblk->cache_size = sect_size; in do_cached_write()
163 mtdblk->cache_state = STATE_CLEAN; in do_cached_write()
167 memcpy (mtdblk->cache_data + offset, buf, size); in do_cached_write()
168 mtdblk->cache_state = STATE_DIRTY; in do_cached_write()
173 len -= size; in do_cached_write()
183 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_read() local
184 unsigned int sect_size = mtdblk->cache_size; in do_cached_read()
189 mtd->name, pos, len); in do_cached_read()
192 return mtd_read(mtd, pos, len, &retlen, buf); in do_cached_read()
196 unsigned int offset = pos - sect_start; in do_cached_read()
197 unsigned int size = sect_size - offset; in do_cached_read()
207 if (mtdblk->cache_state != STATE_EMPTY && in do_cached_read()
208 mtdblk->cache_offset == sect_start) { in do_cached_read()
209 memcpy (buf, mtdblk->cache_data + offset, size); in do_cached_read()
211 ret = mtd_read(mtd, pos, size, &retlen, buf); in do_cached_read()
215 return -EIO; in do_cached_read()
220 len -= size; in do_cached_read()
237 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { in mtdblock_writesect()
238 mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); in mtdblock_writesect()
239 if (!mtdblk->cache_data) in mtdblock_writesect()
240 return -EINTR; in mtdblock_writesect()
241 /* -EINTR is not really correct, but it is the best match in mtdblock_writesect()
243 * return -EAGAIN sometimes, but why bother? in mtdblock_writesect()
255 if (mtdblk->count) { in mtdblock_open()
256 mtdblk->count++; in mtdblock_open()
260 if (mtd_type_is_nand(mbd->mtd)) in mtdblock_open()
261 pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n", in mtdblock_open()
262 mbd->tr->name, mbd->mtd->name); in mtdblock_open()
265 mtdblk->count = 1; in mtdblock_open()
266 mutex_init(&mtdblk->cache_mutex); in mtdblock_open()
267 mtdblk->cache_state = STATE_EMPTY; in mtdblock_open()
268 if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { in mtdblock_open()
269 mtdblk->cache_size = mbd->mtd->erasesize; in mtdblock_open()
270 mtdblk->cache_data = NULL; in mtdblock_open()
284 mutex_lock(&mtdblk->cache_mutex); in mtdblock_release()
286 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_release()
288 if (!--mtdblk->count) { in mtdblock_release()
293 if (mbd->file_mode & FMODE_WRITE) in mtdblock_release()
294 mtd_sync(mbd->mtd); in mtdblock_release()
295 vfree(mtdblk->cache_data); in mtdblock_release()
306 mutex_lock(&mtdblk->cache_mutex); in mtdblock_flush()
308 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_flush()
309 mtd_sync(dev->mtd); in mtdblock_flush()
313 static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) in mtdblock_add_mtd() argument
320 dev->mbd.mtd = mtd; in mtdblock_add_mtd()
321 dev->mbd.devnum = mtd->index; in mtdblock_add_mtd()
323 dev->mbd.size = mtd->size >> 9; in mtdblock_add_mtd()
324 dev->mbd.tr = tr; in mtdblock_add_mtd()
326 if (!(mtd->flags & MTD_WRITEABLE)) in mtdblock_add_mtd()
327 dev->mbd.readonly = 1; in mtdblock_add_mtd()
329 if (add_mtd_blktrans_dev(&dev->mbd)) in mtdblock_add_mtd()
339 .name = "mtdblock",
357 MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");