Lines Matching refs:mtdblk
78 static int write_cached_data (struct mtdblk_dev *mtdblk) in write_cached_data() argument
80 struct mtd_info *mtd = mtdblk->mbd.mtd; in write_cached_data()
83 if (mtdblk->cache_state != STATE_DIRTY) in write_cached_data()
88 mtdblk->cache_offset, mtdblk->cache_size); in write_cached_data()
90 ret = erase_write (mtd, mtdblk->cache_offset, in write_cached_data()
91 mtdblk->cache_size, mtdblk->cache_data); in write_cached_data()
105 mtdblk->cache_state = STATE_EMPTY; in write_cached_data()
110 static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, in do_cached_write() argument
113 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_write()
114 unsigned int sect_size = mtdblk->cache_size; in do_cached_write()
143 if (mtdblk->cache_state == STATE_DIRTY && in do_cached_write()
144 mtdblk->cache_offset != sect_start) { in do_cached_write()
145 ret = write_cached_data(mtdblk); in do_cached_write()
150 if (mtdblk->cache_state == STATE_EMPTY || in do_cached_write()
151 mtdblk->cache_offset != sect_start) { in do_cached_write()
153 mtdblk->cache_state = STATE_EMPTY; in do_cached_write()
155 &retlen, mtdblk->cache_data); in do_cached_write()
161 mtdblk->cache_offset = sect_start; in do_cached_write()
162 mtdblk->cache_size = sect_size; in do_cached_write()
163 mtdblk->cache_state = STATE_CLEAN; in do_cached_write()
167 memcpy (mtdblk->cache_data + offset, buf, size); in do_cached_write()
168 mtdblk->cache_state = STATE_DIRTY; in do_cached_write()
180 static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, in do_cached_read() argument
183 struct mtd_info *mtd = mtdblk->mbd.mtd; in do_cached_read()
184 unsigned int sect_size = mtdblk->cache_size; in do_cached_read()
211 if (mtdblk->cache_state != STATE_EMPTY && in do_cached_read()
212 mtdblk->cache_offset == sect_start) { in do_cached_read()
213 memcpy (buf, mtdblk->cache_data + offset, size); in do_cached_read()
233 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); in mtdblock_readsect() local
234 return do_cached_read(mtdblk, block<<9, 512, buf); in mtdblock_readsect()
240 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); in mtdblock_writesect() local
241 if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { in mtdblock_writesect()
242 mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); in mtdblock_writesect()
243 if (!mtdblk->cache_data) in mtdblock_writesect()
250 return do_cached_write(mtdblk, block<<9, 512, buf); in mtdblock_writesect()
255 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); in mtdblock_open() local
259 if (mtdblk->count) { in mtdblock_open()
260 mtdblk->count++; in mtdblock_open()
269 mtdblk->count = 1; in mtdblock_open()
270 mutex_init(&mtdblk->cache_mutex); in mtdblock_open()
271 mtdblk->cache_state = STATE_EMPTY; in mtdblock_open()
273 mtdblk->cache_size = mbd->mtd->erasesize; in mtdblock_open()
274 mtdblk->cache_data = NULL; in mtdblock_open()
284 struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); in mtdblock_release() local
288 mutex_lock(&mtdblk->cache_mutex); in mtdblock_release()
289 write_cached_data(mtdblk); in mtdblock_release()
290 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_release()
292 if (!--mtdblk->count) { in mtdblock_release()
299 vfree(mtdblk->cache_data); in mtdblock_release()
307 struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); in mtdblock_flush() local
310 mutex_lock(&mtdblk->cache_mutex); in mtdblock_flush()
311 ret = write_cached_data(mtdblk); in mtdblock_flush()
312 mutex_unlock(&mtdblk->cache_mutex); in mtdblock_flush()