1 /*
2 * block2mtd.c - create an mtd from a block device
3 *
4 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
5 * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
6 *
7 * Licence: GPL
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 /*
13 * When the first attempt at device initialization fails, we may need to
14 * wait a little bit and retry. This timeout, by default 3 seconds, gives
15 * device time to start up. Required on BCM2708 and a few other chipsets.
16 */
17 #define MTD_DEFAULT_TIMEOUT 3
18
19 #include <linux/module.h>
20 #include <linux/delay.h>
21 #include <linux/fs.h>
22 #include <linux/blkdev.h>
23 #include <linux/backing-dev.h>
24 #include <linux/bio.h>
25 #include <linux/pagemap.h>
26 #include <linux/list.h>
27 #include <linux/init.h>
28 #include <linux/mtd/mtd.h>
29 #include <linux/mutex.h>
30 #include <linux/mount.h>
31 #include <linux/slab.h>
32 #include <linux/major.h>
33
34 /* Info for the block device */
35 struct block2mtd_dev {
36 struct list_head list;
37 struct block_device *blkdev;
38 struct mtd_info mtd;
39 struct mutex write_mutex;
40 };
41
42
43 /* Static info about the MTD, used in cleanup_module */
44 static LIST_HEAD(blkmtd_device_list);
45
46
page_read(struct address_space * mapping,int index)47 static struct page *page_read(struct address_space *mapping, int index)
48 {
49 return read_mapping_page(mapping, index, NULL);
50 }
51
52 /* erase a specified part of the device */
_block2mtd_erase(struct block2mtd_dev * dev,loff_t to,size_t len)53 static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
54 {
55 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
56 struct page *page;
57 int index = to >> PAGE_SHIFT; // page index
58 int pages = len >> PAGE_SHIFT;
59 u_long *p;
60 u_long *max;
61
62 while (pages) {
63 page = page_read(mapping, index);
64 if (IS_ERR(page))
65 return PTR_ERR(page);
66
67 max = page_address(page) + PAGE_SIZE;
68 for (p=page_address(page); p<max; p++)
69 if (*p != -1UL) {
70 lock_page(page);
71 memset(page_address(page), 0xff, PAGE_SIZE);
72 set_page_dirty(page);
73 unlock_page(page);
74 balance_dirty_pages_ratelimited(mapping);
75 break;
76 }
77
78 put_page(page);
79 pages--;
80 index++;
81 }
82 return 0;
83 }
block2mtd_erase(struct mtd_info * mtd,struct erase_info * instr)84 static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
85 {
86 struct block2mtd_dev *dev = mtd->priv;
87 size_t from = instr->addr;
88 size_t len = instr->len;
89 int err;
90
91 mutex_lock(&dev->write_mutex);
92 err = _block2mtd_erase(dev, from, len);
93 mutex_unlock(&dev->write_mutex);
94 if (err)
95 pr_err("erase failed err = %d\n", err);
96
97 return err;
98 }
99
100
block2mtd_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)101 static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
102 size_t *retlen, u_char *buf)
103 {
104 struct block2mtd_dev *dev = mtd->priv;
105 struct page *page;
106 int index = from >> PAGE_SHIFT;
107 int offset = from & (PAGE_SIZE-1);
108 int cpylen;
109
110 while (len) {
111 if ((offset + len) > PAGE_SIZE)
112 cpylen = PAGE_SIZE - offset; // multiple pages
113 else
114 cpylen = len; // this page
115 len = len - cpylen;
116
117 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
118 if (IS_ERR(page))
119 return PTR_ERR(page);
120
121 memcpy(buf, page_address(page) + offset, cpylen);
122 put_page(page);
123
124 if (retlen)
125 *retlen += cpylen;
126 buf += cpylen;
127 offset = 0;
128 index++;
129 }
130 return 0;
131 }
132
133
134 /* write data to the underlying device */
_block2mtd_write(struct block2mtd_dev * dev,const u_char * buf,loff_t to,size_t len,size_t * retlen)135 static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
136 loff_t to, size_t len, size_t *retlen)
137 {
138 struct page *page;
139 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
140 int index = to >> PAGE_SHIFT; // page index
141 int offset = to & ~PAGE_MASK; // page offset
142 int cpylen;
143
144 while (len) {
145 if ((offset+len) > PAGE_SIZE)
146 cpylen = PAGE_SIZE - offset; // multiple pages
147 else
148 cpylen = len; // this page
149 len = len - cpylen;
150
151 page = page_read(mapping, index);
152 if (IS_ERR(page))
153 return PTR_ERR(page);
154
155 if (memcmp(page_address(page)+offset, buf, cpylen)) {
156 lock_page(page);
157 memcpy(page_address(page) + offset, buf, cpylen);
158 set_page_dirty(page);
159 unlock_page(page);
160 balance_dirty_pages_ratelimited(mapping);
161 }
162 put_page(page);
163
164 if (retlen)
165 *retlen += cpylen;
166
167 buf += cpylen;
168 offset = 0;
169 index++;
170 }
171 return 0;
172 }
173
174
block2mtd_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)175 static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
176 size_t *retlen, const u_char *buf)
177 {
178 struct block2mtd_dev *dev = mtd->priv;
179 int err;
180
181 mutex_lock(&dev->write_mutex);
182 err = _block2mtd_write(dev, buf, to, len, retlen);
183 mutex_unlock(&dev->write_mutex);
184 if (err > 0)
185 err = 0;
186 return err;
187 }
188
189
190 /* sync the device - wait until the write queue is empty */
block2mtd_sync(struct mtd_info * mtd)191 static void block2mtd_sync(struct mtd_info *mtd)
192 {
193 struct block2mtd_dev *dev = mtd->priv;
194 sync_blockdev(dev->blkdev);
195 return;
196 }
197
198
block2mtd_free_device(struct block2mtd_dev * dev)199 static void block2mtd_free_device(struct block2mtd_dev *dev)
200 {
201 if (!dev)
202 return;
203
204 kfree(dev->mtd.name);
205
206 if (dev->blkdev) {
207 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
208 0, -1);
209 blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
210 }
211
212 kfree(dev);
213 }
214
215
add_device(char * devname,int erase_size,int timeout)216 static struct block2mtd_dev *add_device(char *devname, int erase_size,
217 int timeout)
218 {
219 #ifndef MODULE
220 int i;
221 #endif
222 const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
223 struct block_device *bdev;
224 struct block2mtd_dev *dev;
225 char *name;
226
227 if (!devname)
228 return NULL;
229
230 dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
231 if (!dev)
232 return NULL;
233
234 /* Get a handle on the device */
235 bdev = blkdev_get_by_path(devname, mode, dev);
236
237 #ifndef MODULE
238 /*
239 * We might not have the root device mounted at this point.
240 * Try to resolve the device name by other means.
241 */
242 for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
243 dev_t devt;
244
245 if (i)
246 /*
247 * Calling wait_for_device_probe in the first loop
248 * was not enough, sleep for a bit in subsequent
249 * go-arounds.
250 */
251 msleep(1000);
252 wait_for_device_probe();
253
254 devt = name_to_dev_t(devname);
255 if (!devt)
256 continue;
257 bdev = blkdev_get_by_dev(devt, mode, dev);
258 }
259 #endif
260
261 if (IS_ERR(bdev)) {
262 pr_err("error: cannot open device %s\n", devname);
263 goto err_free_block2mtd;
264 }
265 dev->blkdev = bdev;
266
267 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
268 pr_err("attempting to use an MTD device as a block device\n");
269 goto err_free_block2mtd;
270 }
271
272 if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
273 pr_err("erasesize must be a divisor of device size\n");
274 goto err_free_block2mtd;
275 }
276
277 mutex_init(&dev->write_mutex);
278
279 /* Setup the MTD structure */
280 /* make the name contain the block device in */
281 name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
282 if (!name)
283 goto err_destroy_mutex;
284
285 dev->mtd.name = name;
286
287 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
288 dev->mtd.erasesize = erase_size;
289 dev->mtd.writesize = 1;
290 dev->mtd.writebufsize = PAGE_SIZE;
291 dev->mtd.type = MTD_RAM;
292 dev->mtd.flags = MTD_CAP_RAM;
293 dev->mtd._erase = block2mtd_erase;
294 dev->mtd._write = block2mtd_write;
295 dev->mtd._sync = block2mtd_sync;
296 dev->mtd._read = block2mtd_read;
297 dev->mtd.priv = dev;
298 dev->mtd.owner = THIS_MODULE;
299
300 if (mtd_device_register(&dev->mtd, NULL, 0)) {
301 /* Device didn't get added, so free the entry */
302 goto err_destroy_mutex;
303 }
304
305 list_add(&dev->list, &blkmtd_device_list);
306 pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
307 dev->mtd.index,
308 dev->mtd.name + strlen("block2mtd: "),
309 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
310 return dev;
311
312 err_destroy_mutex:
313 mutex_destroy(&dev->write_mutex);
314 err_free_block2mtd:
315 block2mtd_free_device(dev);
316 return NULL;
317 }
318
319
320 /* This function works similar to reguler strtoul. In addition, it
321 * allows some suffixes for a more human-readable number format:
322 * ki, Ki, kiB, KiB - multiply result with 1024
323 * Mi, MiB - multiply result with 1024^2
324 * Gi, GiB - multiply result with 1024^3
325 */
ustrtoul(const char * cp,char ** endp,unsigned int base)326 static int ustrtoul(const char *cp, char **endp, unsigned int base)
327 {
328 unsigned long result = simple_strtoul(cp, endp, base);
329 switch (**endp) {
330 case 'G' :
331 result *= 1024;
332 case 'M':
333 result *= 1024;
334 case 'K':
335 case 'k':
336 result *= 1024;
337 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
338 if ((*endp)[1] == 'i') {
339 if ((*endp)[2] == 'B')
340 (*endp) += 3;
341 else
342 (*endp) += 2;
343 }
344 }
345 return result;
346 }
347
348
parse_num(size_t * num,const char * token)349 static int parse_num(size_t *num, const char *token)
350 {
351 char *endp;
352 size_t n;
353
354 n = (size_t) ustrtoul(token, &endp, 0);
355 if (*endp)
356 return -EINVAL;
357
358 *num = n;
359 return 0;
360 }
361
362
kill_final_newline(char * str)363 static inline void kill_final_newline(char *str)
364 {
365 char *newline = strrchr(str, '\n');
366 if (newline && !newline[1])
367 *newline = 0;
368 }
369
370
371 #ifndef MODULE
372 static int block2mtd_init_called = 0;
373 /* 80 for device, 12 for erase size */
374 static char block2mtd_paramline[80 + 12];
375 #endif
376
block2mtd_setup2(const char * val)377 static int block2mtd_setup2(const char *val)
378 {
379 /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
380 char buf[80 + 12 + 80 + 8];
381 char *str = buf;
382 char *token[2];
383 char *name;
384 size_t erase_size = PAGE_SIZE;
385 unsigned long timeout = MTD_DEFAULT_TIMEOUT;
386 int i, ret;
387
388 if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
389 pr_err("parameter too long\n");
390 return 0;
391 }
392
393 strcpy(str, val);
394 kill_final_newline(str);
395
396 for (i = 0; i < 2; i++)
397 token[i] = strsep(&str, ",");
398
399 if (str) {
400 pr_err("too many arguments\n");
401 return 0;
402 }
403
404 if (!token[0]) {
405 pr_err("no argument\n");
406 return 0;
407 }
408
409 name = token[0];
410 if (strlen(name) + 1 > 80) {
411 pr_err("device name too long\n");
412 return 0;
413 }
414
415 if (token[1]) {
416 ret = parse_num(&erase_size, token[1]);
417 if (ret) {
418 pr_err("illegal erase size\n");
419 return 0;
420 }
421 }
422
423 add_device(name, erase_size, timeout);
424
425 return 0;
426 }
427
428
block2mtd_setup(const char * val,const struct kernel_param * kp)429 static int block2mtd_setup(const char *val, const struct kernel_param *kp)
430 {
431 #ifdef MODULE
432 return block2mtd_setup2(val);
433 #else
434 /* If more parameters are later passed in via
435 /sys/module/block2mtd/parameters/block2mtd
436 and block2mtd_init() has already been called,
437 we can parse the argument now. */
438
439 if (block2mtd_init_called)
440 return block2mtd_setup2(val);
441
442 /* During early boot stage, we only save the parameters
443 here. We must parse them later: if the param passed
444 from kernel boot command line, block2mtd_setup() is
445 called so early that it is not possible to resolve
446 the device (even kmalloc() fails). Deter that work to
447 block2mtd_setup2(). */
448
449 strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
450
451 return 0;
452 #endif
453 }
454
455
456 module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
457 MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
458
block2mtd_init(void)459 static int __init block2mtd_init(void)
460 {
461 int ret = 0;
462
463 #ifndef MODULE
464 if (strlen(block2mtd_paramline))
465 ret = block2mtd_setup2(block2mtd_paramline);
466 block2mtd_init_called = 1;
467 #endif
468
469 return ret;
470 }
471
472
block2mtd_exit(void)473 static void block2mtd_exit(void)
474 {
475 struct list_head *pos, *next;
476
477 /* Remove the MTD devices */
478 list_for_each_safe(pos, next, &blkmtd_device_list) {
479 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
480 block2mtd_sync(&dev->mtd);
481 mtd_device_unregister(&dev->mtd);
482 mutex_destroy(&dev->write_mutex);
483 pr_info("mtd%d: [%s] removed\n",
484 dev->mtd.index,
485 dev->mtd.name + strlen("block2mtd: "));
486 list_del(&dev->list);
487 block2mtd_free_device(dev);
488 }
489 }
490
491 late_initcall(block2mtd_init);
492 module_exit(block2mtd_exit);
493
494 MODULE_LICENSE("GPL");
495 MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
496 MODULE_DESCRIPTION("Emulate an MTD using a block device");
497