1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) International Business Machines Corp., 2006
4  *
5  * Author: Artem Bityutskiy (Битюцкий Артём)
6  */
7 
8 /*
9  * This file includes implementation of UBI character device operations.
10  *
11  * There are two kinds of character devices in UBI: UBI character devices and
12  * UBI volume character devices. UBI character devices allow users to
13  * manipulate whole volumes: create, remove, and re-size them. Volume character
14  * devices provide volume I/O capabilities.
15  *
16  * Major and minor numbers are assigned dynamically to both UBI and volume
17  * character devices.
18  *
19  * Well, there is the third kind of character devices - the UBI control
20  * character device, which allows to manipulate by UBI devices - create and
21  * delete them. In other words, it is used for attaching and detaching MTD
22  * devices.
23  */
24 
25 #include <linux/module.h>
26 #include <linux/stat.h>
27 #include <linux/slab.h>
28 #include <linux/ioctl.h>
29 #include <linux/capability.h>
30 #include <linux/uaccess.h>
31 #include <linux/compat.h>
32 #include <linux/math64.h>
33 #include <mtd/ubi-user.h>
34 #include "ubi.h"
35 
36 /**
37  * get_exclusive - get exclusive access to an UBI volume.
38  * @desc: volume descriptor
39  *
40  * This function changes UBI volume open mode to "exclusive". Returns previous
41  * mode value (positive integer) in case of success and a negative error code
42  * in case of failure.
43  */
get_exclusive(struct ubi_volume_desc * desc)44 static int get_exclusive(struct ubi_volume_desc *desc)
45 {
46 	int users, err;
47 	struct ubi_volume *vol = desc->vol;
48 
49 	spin_lock(&vol->ubi->volumes_lock);
50 	users = vol->readers + vol->writers + vol->exclusive + vol->metaonly;
51 	ubi_assert(users > 0);
52 	if (users > 1) {
53 		ubi_err(vol->ubi, "%d users for volume %d", users, vol->vol_id);
54 		err = -EBUSY;
55 	} else {
56 		vol->readers = vol->writers = vol->metaonly = 0;
57 		vol->exclusive = 1;
58 		err = desc->mode;
59 		desc->mode = UBI_EXCLUSIVE;
60 	}
61 	spin_unlock(&vol->ubi->volumes_lock);
62 
63 	return err;
64 }
65 
66 /**
67  * revoke_exclusive - revoke exclusive mode.
68  * @desc: volume descriptor
69  * @mode: new mode to switch to
70  */
revoke_exclusive(struct ubi_volume_desc * desc,int mode)71 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
72 {
73 	struct ubi_volume *vol = desc->vol;
74 
75 	spin_lock(&vol->ubi->volumes_lock);
76 	ubi_assert(vol->readers == 0 && vol->writers == 0 && vol->metaonly == 0);
77 	ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE);
78 	vol->exclusive = 0;
79 	if (mode == UBI_READONLY)
80 		vol->readers = 1;
81 	else if (mode == UBI_READWRITE)
82 		vol->writers = 1;
83 	else if (mode == UBI_METAONLY)
84 		vol->metaonly = 1;
85 	else
86 		vol->exclusive = 1;
87 	spin_unlock(&vol->ubi->volumes_lock);
88 
89 	desc->mode = mode;
90 }
91 
vol_cdev_open(struct inode * inode,struct file * file)92 static int vol_cdev_open(struct inode *inode, struct file *file)
93 {
94 	struct ubi_volume_desc *desc;
95 	int vol_id = iminor(inode) - 1, mode, ubi_num;
96 
97 	ubi_num = ubi_major2num(imajor(inode));
98 	if (ubi_num < 0)
99 		return ubi_num;
100 
101 	if (file->f_mode & FMODE_WRITE)
102 		mode = UBI_READWRITE;
103 	else
104 		mode = UBI_READONLY;
105 
106 	dbg_gen("open device %d, volume %d, mode %d",
107 		ubi_num, vol_id, mode);
108 
109 	desc = ubi_open_volume(ubi_num, vol_id, mode);
110 	if (IS_ERR(desc))
111 		return PTR_ERR(desc);
112 
113 	file->private_data = desc;
114 	return 0;
115 }
116 
vol_cdev_release(struct inode * inode,struct file * file)117 static int vol_cdev_release(struct inode *inode, struct file *file)
118 {
119 	struct ubi_volume_desc *desc = file->private_data;
120 	struct ubi_volume *vol = desc->vol;
121 
122 	dbg_gen("release device %d, volume %d, mode %d",
123 		vol->ubi->ubi_num, vol->vol_id, desc->mode);
124 
125 	if (vol->updating) {
126 		ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
127 			 vol->vol_id);
128 		ubi_assert(!vol->changing_leb);
129 		vol->updating = 0;
130 		vfree(vol->upd_buf);
131 	} else if (vol->changing_leb) {
132 		dbg_gen("only %lld of %lld bytes received for atomic LEB change for volume %d:%d, cancel",
133 			vol->upd_received, vol->upd_bytes, vol->ubi->ubi_num,
134 			vol->vol_id);
135 		vol->changing_leb = 0;
136 		vfree(vol->upd_buf);
137 	}
138 
139 	ubi_close_volume(desc);
140 	return 0;
141 }
142 
vol_cdev_llseek(struct file * file,loff_t offset,int origin)143 static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
144 {
145 	struct ubi_volume_desc *desc = file->private_data;
146 	struct ubi_volume *vol = desc->vol;
147 
148 	if (vol->updating) {
149 		/* Update is in progress, seeking is prohibited */
150 		ubi_err(vol->ubi, "updating");
151 		return -EBUSY;
152 	}
153 
154 	return fixed_size_llseek(file, offset, origin, vol->used_bytes);
155 }
156 
vol_cdev_fsync(struct file * file,loff_t start,loff_t end,int datasync)157 static int vol_cdev_fsync(struct file *file, loff_t start, loff_t end,
158 			  int datasync)
159 {
160 	struct ubi_volume_desc *desc = file->private_data;
161 	struct ubi_device *ubi = desc->vol->ubi;
162 	struct inode *inode = file_inode(file);
163 	int err;
164 	inode_lock(inode);
165 	err = ubi_sync(ubi->ubi_num);
166 	inode_unlock(inode);
167 	return err;
168 }
169 
170 
vol_cdev_read(struct file * file,__user char * buf,size_t count,loff_t * offp)171 static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
172 			     loff_t *offp)
173 {
174 	struct ubi_volume_desc *desc = file->private_data;
175 	struct ubi_volume *vol = desc->vol;
176 	struct ubi_device *ubi = vol->ubi;
177 	int err, lnum, off, len,  tbuf_size;
178 	size_t count_save = count;
179 	void *tbuf;
180 
181 	dbg_gen("read %zd bytes from offset %lld of volume %d",
182 		count, *offp, vol->vol_id);
183 
184 	if (vol->updating) {
185 		ubi_err(vol->ubi, "updating");
186 		return -EBUSY;
187 	}
188 	if (vol->upd_marker) {
189 		ubi_err(vol->ubi, "damaged volume, update marker is set");
190 		return -EBADF;
191 	}
192 	if (*offp == vol->used_bytes || count == 0)
193 		return 0;
194 
195 	if (vol->corrupted)
196 		dbg_gen("read from corrupted volume %d", vol->vol_id);
197 
198 	if (*offp + count > vol->used_bytes)
199 		count_save = count = vol->used_bytes - *offp;
200 
201 	tbuf_size = vol->usable_leb_size;
202 	if (count < tbuf_size)
203 		tbuf_size = ALIGN(count, ubi->min_io_size);
204 	tbuf = vmalloc(tbuf_size);
205 	if (!tbuf)
206 		return -ENOMEM;
207 
208 	len = count > tbuf_size ? tbuf_size : count;
209 	lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
210 
211 	do {
212 		cond_resched();
213 
214 		if (off + len >= vol->usable_leb_size)
215 			len = vol->usable_leb_size - off;
216 
217 		err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
218 		if (err)
219 			break;
220 
221 		off += len;
222 		if (off == vol->usable_leb_size) {
223 			lnum += 1;
224 			off -= vol->usable_leb_size;
225 		}
226 
227 		count -= len;
228 		*offp += len;
229 
230 		err = copy_to_user(buf, tbuf, len);
231 		if (err) {
232 			err = -EFAULT;
233 			break;
234 		}
235 
236 		buf += len;
237 		len = count > tbuf_size ? tbuf_size : count;
238 	} while (count);
239 
240 	vfree(tbuf);
241 	return err ? err : count_save - count;
242 }
243 
244 /*
245  * This function allows to directly write to dynamic UBI volumes, without
246  * issuing the volume update operation.
247  */
vol_cdev_direct_write(struct file * file,const char __user * buf,size_t count,loff_t * offp)248 static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
249 				     size_t count, loff_t *offp)
250 {
251 	struct ubi_volume_desc *desc = file->private_data;
252 	struct ubi_volume *vol = desc->vol;
253 	struct ubi_device *ubi = vol->ubi;
254 	int lnum, off, len, tbuf_size, err = 0;
255 	size_t count_save = count;
256 	char *tbuf;
257 
258 	if (!vol->direct_writes)
259 		return -EPERM;
260 
261 	dbg_gen("requested: write %zd bytes to offset %lld of volume %u",
262 		count, *offp, vol->vol_id);
263 
264 	if (vol->vol_type == UBI_STATIC_VOLUME)
265 		return -EROFS;
266 
267 	lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
268 	if (off & (ubi->min_io_size - 1)) {
269 		ubi_err(ubi, "unaligned position");
270 		return -EINVAL;
271 	}
272 
273 	if (*offp + count > vol->used_bytes)
274 		count_save = count = vol->used_bytes - *offp;
275 
276 	/* We can write only in fractions of the minimum I/O unit */
277 	if (count & (ubi->min_io_size - 1)) {
278 		ubi_err(ubi, "unaligned write length");
279 		return -EINVAL;
280 	}
281 
282 	tbuf_size = vol->usable_leb_size;
283 	if (count < tbuf_size)
284 		tbuf_size = ALIGN(count, ubi->min_io_size);
285 	tbuf = vmalloc(tbuf_size);
286 	if (!tbuf)
287 		return -ENOMEM;
288 
289 	len = count > tbuf_size ? tbuf_size : count;
290 
291 	while (count) {
292 		cond_resched();
293 
294 		if (off + len >= vol->usable_leb_size)
295 			len = vol->usable_leb_size - off;
296 
297 		err = copy_from_user(tbuf, buf, len);
298 		if (err) {
299 			err = -EFAULT;
300 			break;
301 		}
302 
303 		err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len);
304 		if (err)
305 			break;
306 
307 		off += len;
308 		if (off == vol->usable_leb_size) {
309 			lnum += 1;
310 			off -= vol->usable_leb_size;
311 		}
312 
313 		count -= len;
314 		*offp += len;
315 		buf += len;
316 		len = count > tbuf_size ? tbuf_size : count;
317 	}
318 
319 	vfree(tbuf);
320 	return err ? err : count_save - count;
321 }
322 
vol_cdev_write(struct file * file,const char __user * buf,size_t count,loff_t * offp)323 static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
324 			      size_t count, loff_t *offp)
325 {
326 	int err = 0;
327 	struct ubi_volume_desc *desc = file->private_data;
328 	struct ubi_volume *vol = desc->vol;
329 	struct ubi_device *ubi = vol->ubi;
330 
331 	if (!vol->updating && !vol->changing_leb)
332 		return vol_cdev_direct_write(file, buf, count, offp);
333 
334 	if (vol->updating)
335 		err = ubi_more_update_data(ubi, vol, buf, count);
336 	else
337 		err = ubi_more_leb_change_data(ubi, vol, buf, count);
338 
339 	if (err < 0) {
340 		ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
341 			count, err);
342 		return err;
343 	}
344 
345 	if (err) {
346 		/*
347 		 * The operation is finished, @err contains number of actually
348 		 * written bytes.
349 		 */
350 		count = err;
351 
352 		if (vol->changing_leb) {
353 			revoke_exclusive(desc, UBI_READWRITE);
354 			return count;
355 		}
356 
357 		/*
358 		 * We voluntarily do not take into account the skip_check flag
359 		 * as we want to make sure what we wrote was correctly written.
360 		 */
361 		err = ubi_check_volume(ubi, vol->vol_id);
362 		if (err < 0)
363 			return err;
364 
365 		if (err) {
366 			ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
367 				 vol->vol_id, ubi->ubi_num);
368 			vol->corrupted = 1;
369 		}
370 		vol->checked = 1;
371 		ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
372 		revoke_exclusive(desc, UBI_READWRITE);
373 	}
374 
375 	return count;
376 }
377 
vol_cdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)378 static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
379 			   unsigned long arg)
380 {
381 	int err = 0;
382 	struct ubi_volume_desc *desc = file->private_data;
383 	struct ubi_volume *vol = desc->vol;
384 	struct ubi_device *ubi = vol->ubi;
385 	void __user *argp = (void __user *)arg;
386 
387 	switch (cmd) {
388 	/* Volume update command */
389 	case UBI_IOCVOLUP:
390 	{
391 		int64_t bytes, rsvd_bytes;
392 
393 		if (!capable(CAP_SYS_RESOURCE)) {
394 			err = -EPERM;
395 			break;
396 		}
397 
398 		err = copy_from_user(&bytes, argp, sizeof(int64_t));
399 		if (err) {
400 			err = -EFAULT;
401 			break;
402 		}
403 
404 		if (desc->mode == UBI_READONLY) {
405 			err = -EROFS;
406 			break;
407 		}
408 
409 		rsvd_bytes = (long long)vol->reserved_pebs *
410 					vol->usable_leb_size;
411 		if (bytes < 0 || bytes > rsvd_bytes) {
412 			err = -EINVAL;
413 			break;
414 		}
415 
416 		err = get_exclusive(desc);
417 		if (err < 0)
418 			break;
419 
420 		err = ubi_start_update(ubi, vol, bytes);
421 		if (bytes == 0) {
422 			ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
423 			revoke_exclusive(desc, UBI_READWRITE);
424 		}
425 		break;
426 	}
427 
428 	/* Atomic logical eraseblock change command */
429 	case UBI_IOCEBCH:
430 	{
431 		struct ubi_leb_change_req req;
432 
433 		err = copy_from_user(&req, argp,
434 				     sizeof(struct ubi_leb_change_req));
435 		if (err) {
436 			err = -EFAULT;
437 			break;
438 		}
439 
440 		if (desc->mode == UBI_READONLY ||
441 		    vol->vol_type == UBI_STATIC_VOLUME) {
442 			err = -EROFS;
443 			break;
444 		}
445 
446 		/* Validate the request */
447 		err = -EINVAL;
448 		if (!ubi_leb_valid(vol, req.lnum) ||
449 		    req.bytes < 0 || req.bytes > vol->usable_leb_size)
450 			break;
451 
452 		err = get_exclusive(desc);
453 		if (err < 0)
454 			break;
455 
456 		err = ubi_start_leb_change(ubi, vol, &req);
457 		if (req.bytes == 0)
458 			revoke_exclusive(desc, UBI_READWRITE);
459 		break;
460 	}
461 
462 	/* Logical eraseblock erasure command */
463 	case UBI_IOCEBER:
464 	{
465 		int32_t lnum;
466 
467 		err = get_user(lnum, (__user int32_t *)argp);
468 		if (err) {
469 			err = -EFAULT;
470 			break;
471 		}
472 
473 		if (desc->mode == UBI_READONLY ||
474 		    vol->vol_type == UBI_STATIC_VOLUME) {
475 			err = -EROFS;
476 			break;
477 		}
478 
479 		if (!ubi_leb_valid(vol, lnum)) {
480 			err = -EINVAL;
481 			break;
482 		}
483 
484 		dbg_gen("erase LEB %d:%d", vol->vol_id, lnum);
485 		err = ubi_eba_unmap_leb(ubi, vol, lnum);
486 		if (err)
487 			break;
488 
489 		err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
490 		break;
491 	}
492 
493 	/* Logical eraseblock map command */
494 	case UBI_IOCEBMAP:
495 	{
496 		struct ubi_map_req req;
497 
498 		err = copy_from_user(&req, argp, sizeof(struct ubi_map_req));
499 		if (err) {
500 			err = -EFAULT;
501 			break;
502 		}
503 		err = ubi_leb_map(desc, req.lnum);
504 		break;
505 	}
506 
507 	/* Logical eraseblock un-map command */
508 	case UBI_IOCEBUNMAP:
509 	{
510 		int32_t lnum;
511 
512 		err = get_user(lnum, (__user int32_t *)argp);
513 		if (err) {
514 			err = -EFAULT;
515 			break;
516 		}
517 		err = ubi_leb_unmap(desc, lnum);
518 		break;
519 	}
520 
521 	/* Check if logical eraseblock is mapped command */
522 	case UBI_IOCEBISMAP:
523 	{
524 		int32_t lnum;
525 
526 		err = get_user(lnum, (__user int32_t *)argp);
527 		if (err) {
528 			err = -EFAULT;
529 			break;
530 		}
531 		err = ubi_is_mapped(desc, lnum);
532 		break;
533 	}
534 
535 	/* Set volume property command */
536 	case UBI_IOCSETVOLPROP:
537 	{
538 		struct ubi_set_vol_prop_req req;
539 
540 		err = copy_from_user(&req, argp,
541 				     sizeof(struct ubi_set_vol_prop_req));
542 		if (err) {
543 			err = -EFAULT;
544 			break;
545 		}
546 		switch (req.property) {
547 		case UBI_VOL_PROP_DIRECT_WRITE:
548 			mutex_lock(&ubi->device_mutex);
549 			desc->vol->direct_writes = !!req.value;
550 			mutex_unlock(&ubi->device_mutex);
551 			break;
552 		default:
553 			err = -EINVAL;
554 			break;
555 		}
556 		break;
557 	}
558 
559 	/* Create a R/O block device on top of the UBI volume */
560 	case UBI_IOCVOLCRBLK:
561 	{
562 		struct ubi_volume_info vi;
563 
564 		ubi_get_volume_info(desc, &vi);
565 		err = ubiblock_create(&vi);
566 		break;
567 	}
568 
569 	/* Remove the R/O block device */
570 	case UBI_IOCVOLRMBLK:
571 	{
572 		struct ubi_volume_info vi;
573 
574 		ubi_get_volume_info(desc, &vi);
575 		err = ubiblock_remove(&vi);
576 		break;
577 	}
578 
579 	default:
580 		err = -ENOTTY;
581 		break;
582 	}
583 	return err;
584 }
585 
586 /**
587  * verify_mkvol_req - verify volume creation request.
588  * @ubi: UBI device description object
589  * @req: the request to check
590  *
591  * This function zero if the request is correct, and %-EINVAL if not.
592  */
verify_mkvol_req(const struct ubi_device * ubi,const struct ubi_mkvol_req * req)593 static int verify_mkvol_req(const struct ubi_device *ubi,
594 			    const struct ubi_mkvol_req *req)
595 {
596 	int n, err = -EINVAL;
597 
598 	if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 ||
599 	    req->name_len < 0)
600 		goto bad;
601 
602 	if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) &&
603 	    req->vol_id != UBI_VOL_NUM_AUTO)
604 		goto bad;
605 
606 	if (req->alignment == 0)
607 		goto bad;
608 
609 	if (req->bytes == 0)
610 		goto bad;
611 
612 	if (req->vol_type != UBI_DYNAMIC_VOLUME &&
613 	    req->vol_type != UBI_STATIC_VOLUME)
614 		goto bad;
615 
616 	if (req->flags & ~UBI_VOL_VALID_FLGS)
617 		goto bad;
618 
619 	if (req->flags & UBI_VOL_SKIP_CRC_CHECK_FLG &&
620 	    req->vol_type != UBI_STATIC_VOLUME)
621 		goto bad;
622 
623 	if (req->alignment > ubi->leb_size)
624 		goto bad;
625 
626 	n = req->alignment & (ubi->min_io_size - 1);
627 	if (req->alignment != 1 && n)
628 		goto bad;
629 
630 	if (!req->name[0] || !req->name_len)
631 		goto bad;
632 
633 	if (req->name_len > UBI_VOL_NAME_MAX) {
634 		err = -ENAMETOOLONG;
635 		goto bad;
636 	}
637 
638 	n = strnlen(req->name, req->name_len + 1);
639 	if (n != req->name_len)
640 		goto bad;
641 
642 	return 0;
643 
644 bad:
645 	ubi_err(ubi, "bad volume creation request");
646 	ubi_dump_mkvol_req(req);
647 	return err;
648 }
649 
650 /**
651  * verify_rsvol_req - verify volume re-size request.
652  * @ubi: UBI device description object
653  * @req: the request to check
654  *
655  * This function returns zero if the request is correct, and %-EINVAL if not.
656  */
verify_rsvol_req(const struct ubi_device * ubi,const struct ubi_rsvol_req * req)657 static int verify_rsvol_req(const struct ubi_device *ubi,
658 			    const struct ubi_rsvol_req *req)
659 {
660 	if (req->bytes <= 0)
661 		return -EINVAL;
662 
663 	if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots)
664 		return -EINVAL;
665 
666 	return 0;
667 }
668 
669 /**
670  * rename_volumes - rename UBI volumes.
671  * @ubi: UBI device description object
672  * @req: volumes re-name request
673  *
674  * This is a helper function for the volume re-name IOCTL which validates the
675  * the request, opens the volume and calls corresponding volumes management
676  * function. Returns zero in case of success and a negative error code in case
677  * of failure.
678  */
rename_volumes(struct ubi_device * ubi,struct ubi_rnvol_req * req)679 static int rename_volumes(struct ubi_device *ubi,
680 			  struct ubi_rnvol_req *req)
681 {
682 	int i, n, err;
683 	struct list_head rename_list;
684 	struct ubi_rename_entry *re, *re1;
685 
686 	if (req->count < 0 || req->count > UBI_MAX_RNVOL)
687 		return -EINVAL;
688 
689 	if (req->count == 0)
690 		return 0;
691 
692 	/* Validate volume IDs and names in the request */
693 	for (i = 0; i < req->count; i++) {
694 		if (req->ents[i].vol_id < 0 ||
695 		    req->ents[i].vol_id >= ubi->vtbl_slots)
696 			return -EINVAL;
697 		if (req->ents[i].name_len < 0)
698 			return -EINVAL;
699 		if (req->ents[i].name_len > UBI_VOL_NAME_MAX)
700 			return -ENAMETOOLONG;
701 		req->ents[i].name[req->ents[i].name_len] = '\0';
702 		n = strlen(req->ents[i].name);
703 		if (n != req->ents[i].name_len)
704 			return -EINVAL;
705 	}
706 
707 	/* Make sure volume IDs and names are unique */
708 	for (i = 0; i < req->count - 1; i++) {
709 		for (n = i + 1; n < req->count; n++) {
710 			if (req->ents[i].vol_id == req->ents[n].vol_id) {
711 				ubi_err(ubi, "duplicated volume id %d",
712 					req->ents[i].vol_id);
713 				return -EINVAL;
714 			}
715 			if (!strcmp(req->ents[i].name, req->ents[n].name)) {
716 				ubi_err(ubi, "duplicated volume name \"%s\"",
717 					req->ents[i].name);
718 				return -EINVAL;
719 			}
720 		}
721 	}
722 
723 	/* Create the re-name list */
724 	INIT_LIST_HEAD(&rename_list);
725 	for (i = 0; i < req->count; i++) {
726 		int vol_id = req->ents[i].vol_id;
727 		int name_len = req->ents[i].name_len;
728 		const char *name = req->ents[i].name;
729 
730 		re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
731 		if (!re) {
732 			err = -ENOMEM;
733 			goto out_free;
734 		}
735 
736 		re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY);
737 		if (IS_ERR(re->desc)) {
738 			err = PTR_ERR(re->desc);
739 			ubi_err(ubi, "cannot open volume %d, error %d",
740 				vol_id, err);
741 			kfree(re);
742 			goto out_free;
743 		}
744 
745 		/* Skip this re-naming if the name does not really change */
746 		if (re->desc->vol->name_len == name_len &&
747 		    !memcmp(re->desc->vol->name, name, name_len)) {
748 			ubi_close_volume(re->desc);
749 			kfree(re);
750 			continue;
751 		}
752 
753 		re->new_name_len = name_len;
754 		memcpy(re->new_name, name, name_len);
755 		list_add_tail(&re->list, &rename_list);
756 		dbg_gen("will rename volume %d from \"%s\" to \"%s\"",
757 			vol_id, re->desc->vol->name, name);
758 	}
759 
760 	if (list_empty(&rename_list))
761 		return 0;
762 
763 	/* Find out the volumes which have to be removed */
764 	list_for_each_entry(re, &rename_list, list) {
765 		struct ubi_volume_desc *desc;
766 		int no_remove_needed = 0;
767 
768 		/*
769 		 * Volume @re->vol_id is going to be re-named to
770 		 * @re->new_name, while its current name is @name. If a volume
771 		 * with name @re->new_name currently exists, it has to be
772 		 * removed, unless it is also re-named in the request (@req).
773 		 */
774 		list_for_each_entry(re1, &rename_list, list) {
775 			if (re->new_name_len == re1->desc->vol->name_len &&
776 			    !memcmp(re->new_name, re1->desc->vol->name,
777 				    re1->desc->vol->name_len)) {
778 				no_remove_needed = 1;
779 				break;
780 			}
781 		}
782 
783 		if (no_remove_needed)
784 			continue;
785 
786 		/*
787 		 * It seems we need to remove volume with name @re->new_name,
788 		 * if it exists.
789 		 */
790 		desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
791 					  UBI_EXCLUSIVE);
792 		if (IS_ERR(desc)) {
793 			err = PTR_ERR(desc);
794 			if (err == -ENODEV)
795 				/* Re-naming into a non-existing volume name */
796 				continue;
797 
798 			/* The volume exists but busy, or an error occurred */
799 			ubi_err(ubi, "cannot open volume \"%s\", error %d",
800 				re->new_name, err);
801 			goto out_free;
802 		}
803 
804 		re1 = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL);
805 		if (!re1) {
806 			err = -ENOMEM;
807 			ubi_close_volume(desc);
808 			goto out_free;
809 		}
810 
811 		re1->remove = 1;
812 		re1->desc = desc;
813 		list_add(&re1->list, &rename_list);
814 		dbg_gen("will remove volume %d, name \"%s\"",
815 			re1->desc->vol->vol_id, re1->desc->vol->name);
816 	}
817 
818 	mutex_lock(&ubi->device_mutex);
819 	err = ubi_rename_volumes(ubi, &rename_list);
820 	mutex_unlock(&ubi->device_mutex);
821 
822 out_free:
823 	list_for_each_entry_safe(re, re1, &rename_list, list) {
824 		ubi_close_volume(re->desc);
825 		list_del(&re->list);
826 		kfree(re);
827 	}
828 	return err;
829 }
830 
ubi_cdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)831 static long ubi_cdev_ioctl(struct file *file, unsigned int cmd,
832 			   unsigned long arg)
833 {
834 	int err = 0;
835 	struct ubi_device *ubi;
836 	struct ubi_volume_desc *desc;
837 	void __user *argp = (void __user *)arg;
838 
839 	if (!capable(CAP_SYS_RESOURCE))
840 		return -EPERM;
841 
842 	ubi = ubi_get_by_major(imajor(file->f_mapping->host));
843 	if (!ubi)
844 		return -ENODEV;
845 
846 	switch (cmd) {
847 	/* Create volume command */
848 	case UBI_IOCMKVOL:
849 	{
850 		struct ubi_mkvol_req req;
851 
852 		dbg_gen("create volume");
853 		err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
854 		if (err) {
855 			err = -EFAULT;
856 			break;
857 		}
858 
859 		err = verify_mkvol_req(ubi, &req);
860 		if (err)
861 			break;
862 
863 		mutex_lock(&ubi->device_mutex);
864 		err = ubi_create_volume(ubi, &req);
865 		mutex_unlock(&ubi->device_mutex);
866 		if (err)
867 			break;
868 
869 		err = put_user(req.vol_id, (__user int32_t *)argp);
870 		if (err)
871 			err = -EFAULT;
872 
873 		break;
874 	}
875 
876 	/* Remove volume command */
877 	case UBI_IOCRMVOL:
878 	{
879 		int vol_id;
880 
881 		dbg_gen("remove volume");
882 		err = get_user(vol_id, (__user int32_t *)argp);
883 		if (err) {
884 			err = -EFAULT;
885 			break;
886 		}
887 
888 		desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE);
889 		if (IS_ERR(desc)) {
890 			err = PTR_ERR(desc);
891 			break;
892 		}
893 
894 		mutex_lock(&ubi->device_mutex);
895 		err = ubi_remove_volume(desc, 0);
896 		mutex_unlock(&ubi->device_mutex);
897 
898 		/*
899 		 * The volume is deleted (unless an error occurred), and the
900 		 * 'struct ubi_volume' object will be freed when
901 		 * 'ubi_close_volume()' will call 'put_device()'.
902 		 */
903 		ubi_close_volume(desc);
904 		break;
905 	}
906 
907 	/* Re-size volume command */
908 	case UBI_IOCRSVOL:
909 	{
910 		int pebs;
911 		struct ubi_rsvol_req req;
912 
913 		dbg_gen("re-size volume");
914 		err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
915 		if (err) {
916 			err = -EFAULT;
917 			break;
918 		}
919 
920 		err = verify_rsvol_req(ubi, &req);
921 		if (err)
922 			break;
923 
924 		desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE);
925 		if (IS_ERR(desc)) {
926 			err = PTR_ERR(desc);
927 			break;
928 		}
929 
930 		pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1,
931 			       desc->vol->usable_leb_size);
932 
933 		mutex_lock(&ubi->device_mutex);
934 		err = ubi_resize_volume(desc, pebs);
935 		mutex_unlock(&ubi->device_mutex);
936 		ubi_close_volume(desc);
937 		break;
938 	}
939 
940 	/* Re-name volumes command */
941 	case UBI_IOCRNVOL:
942 	{
943 		struct ubi_rnvol_req *req;
944 
945 		dbg_gen("re-name volumes");
946 		req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL);
947 		if (!req) {
948 			err = -ENOMEM;
949 			break;
950 		}
951 
952 		err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req));
953 		if (err) {
954 			err = -EFAULT;
955 			kfree(req);
956 			break;
957 		}
958 
959 		err = rename_volumes(ubi, req);
960 		kfree(req);
961 		break;
962 	}
963 
964 	/* Check a specific PEB for bitflips and scrub it if needed */
965 	case UBI_IOCRPEB:
966 	{
967 		int pnum;
968 
969 		err = get_user(pnum, (__user int32_t *)argp);
970 		if (err) {
971 			err = -EFAULT;
972 			break;
973 		}
974 
975 		err = ubi_bitflip_check(ubi, pnum, 0);
976 		break;
977 	}
978 
979 	/* Force scrubbing for a specific PEB */
980 	case UBI_IOCSPEB:
981 	{
982 		int pnum;
983 
984 		err = get_user(pnum, (__user int32_t *)argp);
985 		if (err) {
986 			err = -EFAULT;
987 			break;
988 		}
989 
990 		err = ubi_bitflip_check(ubi, pnum, 1);
991 		break;
992 	}
993 
994 	default:
995 		err = -ENOTTY;
996 		break;
997 	}
998 
999 	ubi_put_device(ubi);
1000 	return err;
1001 }
1002 
ctrl_cdev_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1003 static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
1004 			    unsigned long arg)
1005 {
1006 	int err = 0;
1007 	void __user *argp = (void __user *)arg;
1008 
1009 	if (!capable(CAP_SYS_RESOURCE))
1010 		return -EPERM;
1011 
1012 	switch (cmd) {
1013 	/* Attach an MTD device command */
1014 	case UBI_IOCATT:
1015 	{
1016 		struct ubi_attach_req req;
1017 		struct mtd_info *mtd;
1018 
1019 		dbg_gen("attach MTD device");
1020 		err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
1021 		if (err) {
1022 			err = -EFAULT;
1023 			break;
1024 		}
1025 
1026 		if (req.mtd_num < 0 ||
1027 		    (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
1028 			err = -EINVAL;
1029 			break;
1030 		}
1031 
1032 		mtd = get_mtd_device(NULL, req.mtd_num);
1033 		if (IS_ERR(mtd)) {
1034 			err = PTR_ERR(mtd);
1035 			break;
1036 		}
1037 
1038 		/*
1039 		 * Note, further request verification is done by
1040 		 * 'ubi_attach_mtd_dev()'.
1041 		 */
1042 		mutex_lock(&ubi_devices_mutex);
1043 		err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
1044 					 req.max_beb_per1024);
1045 		mutex_unlock(&ubi_devices_mutex);
1046 		if (err < 0)
1047 			put_mtd_device(mtd);
1048 		else
1049 			/* @err contains UBI device number */
1050 			err = put_user(err, (__user int32_t *)argp);
1051 
1052 		break;
1053 	}
1054 
1055 	/* Detach an MTD device command */
1056 	case UBI_IOCDET:
1057 	{
1058 		int ubi_num;
1059 
1060 		dbg_gen("detach MTD device");
1061 		err = get_user(ubi_num, (__user int32_t *)argp);
1062 		if (err) {
1063 			err = -EFAULT;
1064 			break;
1065 		}
1066 
1067 		mutex_lock(&ubi_devices_mutex);
1068 		err = ubi_detach_mtd_dev(ubi_num, 0);
1069 		mutex_unlock(&ubi_devices_mutex);
1070 		break;
1071 	}
1072 
1073 	default:
1074 		err = -ENOTTY;
1075 		break;
1076 	}
1077 
1078 	return err;
1079 }
1080 
1081 /* UBI volume character device operations */
1082 const struct file_operations ubi_vol_cdev_operations = {
1083 	.owner          = THIS_MODULE,
1084 	.open           = vol_cdev_open,
1085 	.release        = vol_cdev_release,
1086 	.llseek         = vol_cdev_llseek,
1087 	.read           = vol_cdev_read,
1088 	.write          = vol_cdev_write,
1089 	.fsync		= vol_cdev_fsync,
1090 	.unlocked_ioctl = vol_cdev_ioctl,
1091 	.compat_ioctl   = compat_ptr_ioctl,
1092 };
1093 
1094 /* UBI character device operations */
1095 const struct file_operations ubi_cdev_operations = {
1096 	.owner          = THIS_MODULE,
1097 	.llseek         = no_llseek,
1098 	.unlocked_ioctl = ubi_cdev_ioctl,
1099 	.compat_ioctl   = compat_ptr_ioctl,
1100 };
1101 
1102 /* UBI control character device operations */
1103 const struct file_operations ubi_ctrl_cdev_operations = {
1104 	.owner          = THIS_MODULE,
1105 	.unlocked_ioctl = ctrl_cdev_ioctl,
1106 	.compat_ioctl   = compat_ptr_ioctl,
1107 	.llseek		= no_llseek,
1108 };
1109