1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4  * Initial release: Matias Bjorling <m@bjorling.me>
5  */
6 
7 #define pr_fmt(fmt) "nvm: " fmt
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/sem.h>
12 #include <linux/bitmap.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/miscdevice.h>
16 #include <linux/lightnvm.h>
17 #include <linux/sched/sysctl.h>
18 
19 static LIST_HEAD(nvm_tgt_types);
20 static DECLARE_RWSEM(nvm_tgtt_lock);
21 static LIST_HEAD(nvm_devices);
22 static DECLARE_RWSEM(nvm_lock);
23 
24 /* Map between virtual and physical channel and lun */
25 struct nvm_ch_map {
26 	int ch_off;
27 	int num_lun;
28 	int *lun_offs;
29 };
30 
31 struct nvm_dev_map {
32 	struct nvm_ch_map *chnls;
33 	int num_ch;
34 };
35 
36 static void nvm_free(struct kref *ref);
37 
nvm_find_target(struct nvm_dev * dev,const char * name)38 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
39 {
40 	struct nvm_target *tgt;
41 
42 	list_for_each_entry(tgt, &dev->targets, list)
43 		if (!strcmp(name, tgt->disk->disk_name))
44 			return tgt;
45 
46 	return NULL;
47 }
48 
nvm_target_exists(const char * name)49 static bool nvm_target_exists(const char *name)
50 {
51 	struct nvm_dev *dev;
52 	struct nvm_target *tgt;
53 	bool ret = false;
54 
55 	down_write(&nvm_lock);
56 	list_for_each_entry(dev, &nvm_devices, devices) {
57 		mutex_lock(&dev->mlock);
58 		list_for_each_entry(tgt, &dev->targets, list) {
59 			if (!strcmp(name, tgt->disk->disk_name)) {
60 				ret = true;
61 				mutex_unlock(&dev->mlock);
62 				goto out;
63 			}
64 		}
65 		mutex_unlock(&dev->mlock);
66 	}
67 
68 out:
69 	up_write(&nvm_lock);
70 	return ret;
71 }
72 
nvm_reserve_luns(struct nvm_dev * dev,int lun_begin,int lun_end)73 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
74 {
75 	int i;
76 
77 	for (i = lun_begin; i <= lun_end; i++) {
78 		if (test_and_set_bit(i, dev->lun_map)) {
79 			pr_err("lun %d already allocated\n", i);
80 			goto err;
81 		}
82 	}
83 
84 	return 0;
85 err:
86 	while (--i >= lun_begin)
87 		clear_bit(i, dev->lun_map);
88 
89 	return -EBUSY;
90 }
91 
nvm_release_luns_err(struct nvm_dev * dev,int lun_begin,int lun_end)92 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
93 				 int lun_end)
94 {
95 	int i;
96 
97 	for (i = lun_begin; i <= lun_end; i++)
98 		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
99 }
100 
nvm_remove_tgt_dev(struct nvm_tgt_dev * tgt_dev,int clear)101 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
102 {
103 	struct nvm_dev *dev = tgt_dev->parent;
104 	struct nvm_dev_map *dev_map = tgt_dev->map;
105 	int i, j;
106 
107 	for (i = 0; i < dev_map->num_ch; i++) {
108 		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109 		int *lun_offs = ch_map->lun_offs;
110 		int ch = i + ch_map->ch_off;
111 
112 		if (clear) {
113 			for (j = 0; j < ch_map->num_lun; j++) {
114 				int lun = j + lun_offs[j];
115 				int lunid = (ch * dev->geo.num_lun) + lun;
116 
117 				WARN_ON(!test_and_clear_bit(lunid,
118 							dev->lun_map));
119 			}
120 		}
121 
122 		kfree(ch_map->lun_offs);
123 	}
124 
125 	kfree(dev_map->chnls);
126 	kfree(dev_map);
127 
128 	kfree(tgt_dev->luns);
129 	kfree(tgt_dev);
130 }
131 
nvm_create_tgt_dev(struct nvm_dev * dev,u16 lun_begin,u16 lun_end,u16 op)132 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133 					      u16 lun_begin, u16 lun_end,
134 					      u16 op)
135 {
136 	struct nvm_tgt_dev *tgt_dev = NULL;
137 	struct nvm_dev_map *dev_rmap = dev->rmap;
138 	struct nvm_dev_map *dev_map;
139 	struct ppa_addr *luns;
140 	int num_lun = lun_end - lun_begin + 1;
141 	int luns_left = num_lun;
142 	int num_ch = num_lun / dev->geo.num_lun;
143 	int num_ch_mod = num_lun % dev->geo.num_lun;
144 	int bch = lun_begin / dev->geo.num_lun;
145 	int blun = lun_begin % dev->geo.num_lun;
146 	int lunid = 0;
147 	int lun_balanced = 1;
148 	int sec_per_lun, prev_num_lun;
149 	int i, j;
150 
151 	num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
152 
153 	dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
154 	if (!dev_map)
155 		goto err_dev;
156 
157 	dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
158 	if (!dev_map->chnls)
159 		goto err_chnls;
160 
161 	luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
162 	if (!luns)
163 		goto err_luns;
164 
165 	prev_num_lun = (luns_left > dev->geo.num_lun) ?
166 					dev->geo.num_lun : luns_left;
167 	for (i = 0; i < num_ch; i++) {
168 		struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169 		int *lun_roffs = ch_rmap->lun_offs;
170 		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
171 		int *lun_offs;
172 		int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173 					dev->geo.num_lun : luns_left;
174 
175 		if (lun_balanced && prev_num_lun != luns_in_chnl)
176 			lun_balanced = 0;
177 
178 		ch_map->ch_off = ch_rmap->ch_off = bch;
179 		ch_map->num_lun = luns_in_chnl;
180 
181 		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
182 		if (!lun_offs)
183 			goto err_ch;
184 
185 		for (j = 0; j < luns_in_chnl; j++) {
186 			luns[lunid].ppa = 0;
187 			luns[lunid].a.ch = i;
188 			luns[lunid++].a.lun = j;
189 
190 			lun_offs[j] = blun;
191 			lun_roffs[j + blun] = blun;
192 		}
193 
194 		ch_map->lun_offs = lun_offs;
195 
196 		/* when starting a new channel, lun offset is reset */
197 		blun = 0;
198 		luns_left -= luns_in_chnl;
199 	}
200 
201 	dev_map->num_ch = num_ch;
202 
203 	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
204 	if (!tgt_dev)
205 		goto err_ch;
206 
207 	/* Inherit device geometry from parent */
208 	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
209 
210 	/* Target device only owns a portion of the physical device */
211 	tgt_dev->geo.num_ch = num_ch;
212 	tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213 	tgt_dev->geo.all_luns = num_lun;
214 	tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
215 
216 	tgt_dev->geo.op = op;
217 
218 	sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219 	tgt_dev->geo.total_secs = num_lun * sec_per_lun;
220 
221 	tgt_dev->q = dev->q;
222 	tgt_dev->map = dev_map;
223 	tgt_dev->luns = luns;
224 	tgt_dev->parent = dev;
225 
226 	return tgt_dev;
227 err_ch:
228 	while (--i >= 0)
229 		kfree(dev_map->chnls[i].lun_offs);
230 	kfree(luns);
231 err_luns:
232 	kfree(dev_map->chnls);
233 err_chnls:
234 	kfree(dev_map);
235 err_dev:
236 	return tgt_dev;
237 }
238 
__nvm_find_target_type(const char * name)239 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
240 {
241 	struct nvm_tgt_type *tt;
242 
243 	list_for_each_entry(tt, &nvm_tgt_types, list)
244 		if (!strcmp(name, tt->name))
245 			return tt;
246 
247 	return NULL;
248 }
249 
nvm_find_target_type(const char * name)250 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
251 {
252 	struct nvm_tgt_type *tt;
253 
254 	down_write(&nvm_tgtt_lock);
255 	tt = __nvm_find_target_type(name);
256 	up_write(&nvm_tgtt_lock);
257 
258 	return tt;
259 }
260 
nvm_config_check_luns(struct nvm_geo * geo,int lun_begin,int lun_end)261 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
262 				 int lun_end)
263 {
264 	if (lun_begin > lun_end || lun_end >= geo->all_luns) {
265 		pr_err("lun out of bound (%u:%u > %u)\n",
266 			lun_begin, lun_end, geo->all_luns - 1);
267 		return -EINVAL;
268 	}
269 
270 	return 0;
271 }
272 
__nvm_config_simple(struct nvm_dev * dev,struct nvm_ioctl_create_simple * s)273 static int __nvm_config_simple(struct nvm_dev *dev,
274 			       struct nvm_ioctl_create_simple *s)
275 {
276 	struct nvm_geo *geo = &dev->geo;
277 
278 	if (s->lun_begin == -1 && s->lun_end == -1) {
279 		s->lun_begin = 0;
280 		s->lun_end = geo->all_luns - 1;
281 	}
282 
283 	return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
284 }
285 
__nvm_config_extended(struct nvm_dev * dev,struct nvm_ioctl_create_extended * e)286 static int __nvm_config_extended(struct nvm_dev *dev,
287 				 struct nvm_ioctl_create_extended *e)
288 {
289 	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
290 		e->lun_begin = 0;
291 		e->lun_end = dev->geo.all_luns - 1;
292 	}
293 
294 	/* op not set falls into target's default */
295 	if (e->op == 0xFFFF) {
296 		e->op = NVM_TARGET_DEFAULT_OP;
297 	} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
298 		pr_err("invalid over provisioning value\n");
299 		return -EINVAL;
300 	}
301 
302 	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
303 }
304 
nvm_create_tgt(struct nvm_dev * dev,struct nvm_ioctl_create * create)305 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
306 {
307 	struct nvm_ioctl_create_extended e;
308 	struct request_queue *tqueue;
309 	struct gendisk *tdisk;
310 	struct nvm_tgt_type *tt;
311 	struct nvm_target *t;
312 	struct nvm_tgt_dev *tgt_dev;
313 	void *targetdata;
314 	unsigned int mdts;
315 	int ret;
316 
317 	switch (create->conf.type) {
318 	case NVM_CONFIG_TYPE_SIMPLE:
319 		ret = __nvm_config_simple(dev, &create->conf.s);
320 		if (ret)
321 			return ret;
322 
323 		e.lun_begin = create->conf.s.lun_begin;
324 		e.lun_end = create->conf.s.lun_end;
325 		e.op = NVM_TARGET_DEFAULT_OP;
326 		break;
327 	case NVM_CONFIG_TYPE_EXTENDED:
328 		ret = __nvm_config_extended(dev, &create->conf.e);
329 		if (ret)
330 			return ret;
331 
332 		e = create->conf.e;
333 		break;
334 	default:
335 		pr_err("config type not valid\n");
336 		return -EINVAL;
337 	}
338 
339 	tt = nvm_find_target_type(create->tgttype);
340 	if (!tt) {
341 		pr_err("target type %s not found\n", create->tgttype);
342 		return -EINVAL;
343 	}
344 
345 	if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
346 		pr_err("device is incompatible with target L2P type.\n");
347 		return -EINVAL;
348 	}
349 
350 	if (nvm_target_exists(create->tgtname)) {
351 		pr_err("target name already exists (%s)\n",
352 							create->tgtname);
353 		return -EINVAL;
354 	}
355 
356 	ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
357 	if (ret)
358 		return ret;
359 
360 	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
361 	if (!t) {
362 		ret = -ENOMEM;
363 		goto err_reserve;
364 	}
365 
366 	tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
367 	if (!tgt_dev) {
368 		pr_err("could not create target device\n");
369 		ret = -ENOMEM;
370 		goto err_t;
371 	}
372 
373 	tdisk = alloc_disk(0);
374 	if (!tdisk) {
375 		ret = -ENOMEM;
376 		goto err_dev;
377 	}
378 
379 	tqueue = blk_alloc_queue(dev->q->node);
380 	if (!tqueue) {
381 		ret = -ENOMEM;
382 		goto err_disk;
383 	}
384 
385 	strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
386 	tdisk->flags = GENHD_FL_EXT_DEVT;
387 	tdisk->major = 0;
388 	tdisk->first_minor = 0;
389 	tdisk->fops = tt->bops;
390 	tdisk->queue = tqueue;
391 
392 	targetdata = tt->init(tgt_dev, tdisk, create->flags);
393 	if (IS_ERR(targetdata)) {
394 		ret = PTR_ERR(targetdata);
395 		goto err_init;
396 	}
397 
398 	tdisk->private_data = targetdata;
399 	tqueue->queuedata = targetdata;
400 
401 	mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
402 	if (dev->geo.mdts) {
403 		mdts = min_t(u32, dev->geo.mdts,
404 				(dev->geo.csecs >> 9) * NVM_MAX_VLBA);
405 	}
406 	blk_queue_max_hw_sectors(tqueue, mdts);
407 
408 	set_capacity(tdisk, tt->capacity(targetdata));
409 	add_disk(tdisk);
410 
411 	if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
412 		ret = -ENOMEM;
413 		goto err_sysfs;
414 	}
415 
416 	t->type = tt;
417 	t->disk = tdisk;
418 	t->dev = tgt_dev;
419 
420 	mutex_lock(&dev->mlock);
421 	list_add_tail(&t->list, &dev->targets);
422 	mutex_unlock(&dev->mlock);
423 
424 	__module_get(tt->owner);
425 
426 	return 0;
427 err_sysfs:
428 	if (tt->exit)
429 		tt->exit(targetdata, true);
430 err_init:
431 	blk_cleanup_queue(tqueue);
432 	tdisk->queue = NULL;
433 err_disk:
434 	put_disk(tdisk);
435 err_dev:
436 	nvm_remove_tgt_dev(tgt_dev, 0);
437 err_t:
438 	kfree(t);
439 err_reserve:
440 	nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
441 	return ret;
442 }
443 
__nvm_remove_target(struct nvm_target * t,bool graceful)444 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
445 {
446 	struct nvm_tgt_type *tt = t->type;
447 	struct gendisk *tdisk = t->disk;
448 	struct request_queue *q = tdisk->queue;
449 
450 	del_gendisk(tdisk);
451 	blk_cleanup_queue(q);
452 
453 	if (tt->sysfs_exit)
454 		tt->sysfs_exit(tdisk);
455 
456 	if (tt->exit)
457 		tt->exit(tdisk->private_data, graceful);
458 
459 	nvm_remove_tgt_dev(t->dev, 1);
460 	put_disk(tdisk);
461 	module_put(t->type->owner);
462 
463 	list_del(&t->list);
464 	kfree(t);
465 }
466 
467 /**
468  * nvm_remove_tgt - Removes a target from the media manager
469  * @remove:	ioctl structure with target name to remove.
470  *
471  * Returns:
472  * 0: on success
473  * 1: on not found
474  * <0: on error
475  */
nvm_remove_tgt(struct nvm_ioctl_remove * remove)476 static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
477 {
478 	struct nvm_target *t = NULL;
479 	struct nvm_dev *dev;
480 
481 	down_read(&nvm_lock);
482 	list_for_each_entry(dev, &nvm_devices, devices) {
483 		mutex_lock(&dev->mlock);
484 		t = nvm_find_target(dev, remove->tgtname);
485 		if (t) {
486 			mutex_unlock(&dev->mlock);
487 			break;
488 		}
489 		mutex_unlock(&dev->mlock);
490 	}
491 	up_read(&nvm_lock);
492 
493 	if (!t) {
494 		pr_err("failed to remove target %s\n",
495 				remove->tgtname);
496 		return 1;
497 	}
498 
499 	__nvm_remove_target(t, true);
500 	kref_put(&dev->ref, nvm_free);
501 
502 	return 0;
503 }
504 
nvm_register_map(struct nvm_dev * dev)505 static int nvm_register_map(struct nvm_dev *dev)
506 {
507 	struct nvm_dev_map *rmap;
508 	int i, j;
509 
510 	rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
511 	if (!rmap)
512 		goto err_rmap;
513 
514 	rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
515 								GFP_KERNEL);
516 	if (!rmap->chnls)
517 		goto err_chnls;
518 
519 	for (i = 0; i < dev->geo.num_ch; i++) {
520 		struct nvm_ch_map *ch_rmap;
521 		int *lun_roffs;
522 		int luns_in_chnl = dev->geo.num_lun;
523 
524 		ch_rmap = &rmap->chnls[i];
525 
526 		ch_rmap->ch_off = -1;
527 		ch_rmap->num_lun = luns_in_chnl;
528 
529 		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
530 		if (!lun_roffs)
531 			goto err_ch;
532 
533 		for (j = 0; j < luns_in_chnl; j++)
534 			lun_roffs[j] = -1;
535 
536 		ch_rmap->lun_offs = lun_roffs;
537 	}
538 
539 	dev->rmap = rmap;
540 
541 	return 0;
542 err_ch:
543 	while (--i >= 0)
544 		kfree(rmap->chnls[i].lun_offs);
545 err_chnls:
546 	kfree(rmap);
547 err_rmap:
548 	return -ENOMEM;
549 }
550 
nvm_unregister_map(struct nvm_dev * dev)551 static void nvm_unregister_map(struct nvm_dev *dev)
552 {
553 	struct nvm_dev_map *rmap = dev->rmap;
554 	int i;
555 
556 	for (i = 0; i < dev->geo.num_ch; i++)
557 		kfree(rmap->chnls[i].lun_offs);
558 
559 	kfree(rmap->chnls);
560 	kfree(rmap);
561 }
562 
nvm_map_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)563 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
564 {
565 	struct nvm_dev_map *dev_map = tgt_dev->map;
566 	struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
567 	int lun_off = ch_map->lun_offs[p->a.lun];
568 
569 	p->a.ch += ch_map->ch_off;
570 	p->a.lun += lun_off;
571 }
572 
nvm_map_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)573 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
574 {
575 	struct nvm_dev *dev = tgt_dev->parent;
576 	struct nvm_dev_map *dev_rmap = dev->rmap;
577 	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
578 	int lun_roff = ch_rmap->lun_offs[p->a.lun];
579 
580 	p->a.ch -= ch_rmap->ch_off;
581 	p->a.lun -= lun_roff;
582 }
583 
nvm_ppa_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)584 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
585 				struct ppa_addr *ppa_list, int nr_ppas)
586 {
587 	int i;
588 
589 	for (i = 0; i < nr_ppas; i++) {
590 		nvm_map_to_dev(tgt_dev, &ppa_list[i]);
591 		ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
592 	}
593 }
594 
nvm_ppa_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)595 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
596 				struct ppa_addr *ppa_list, int nr_ppas)
597 {
598 	int i;
599 
600 	for (i = 0; i < nr_ppas; i++) {
601 		ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
602 		nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
603 	}
604 }
605 
nvm_rq_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)606 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
607 {
608 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
609 
610 	nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
611 }
612 
nvm_rq_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)613 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
614 {
615 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
616 
617 	nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
618 }
619 
nvm_register_tgt_type(struct nvm_tgt_type * tt)620 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
621 {
622 	int ret = 0;
623 
624 	down_write(&nvm_tgtt_lock);
625 	if (__nvm_find_target_type(tt->name))
626 		ret = -EEXIST;
627 	else
628 		list_add(&tt->list, &nvm_tgt_types);
629 	up_write(&nvm_tgtt_lock);
630 
631 	return ret;
632 }
633 EXPORT_SYMBOL(nvm_register_tgt_type);
634 
nvm_unregister_tgt_type(struct nvm_tgt_type * tt)635 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
636 {
637 	if (!tt)
638 		return;
639 
640 	down_write(&nvm_tgtt_lock);
641 	list_del(&tt->list);
642 	up_write(&nvm_tgtt_lock);
643 }
644 EXPORT_SYMBOL(nvm_unregister_tgt_type);
645 
nvm_dev_dma_alloc(struct nvm_dev * dev,gfp_t mem_flags,dma_addr_t * dma_handler)646 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
647 							dma_addr_t *dma_handler)
648 {
649 	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
650 								dma_handler);
651 }
652 EXPORT_SYMBOL(nvm_dev_dma_alloc);
653 
nvm_dev_dma_free(struct nvm_dev * dev,void * addr,dma_addr_t dma_handler)654 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
655 {
656 	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
657 }
658 EXPORT_SYMBOL(nvm_dev_dma_free);
659 
nvm_find_nvm_dev(const char * name)660 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
661 {
662 	struct nvm_dev *dev;
663 
664 	list_for_each_entry(dev, &nvm_devices, devices)
665 		if (!strcmp(name, dev->name))
666 			return dev;
667 
668 	return NULL;
669 }
670 
nvm_set_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,const struct ppa_addr * ppas,int nr_ppas)671 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
672 			const struct ppa_addr *ppas, int nr_ppas)
673 {
674 	struct nvm_dev *dev = tgt_dev->parent;
675 	struct nvm_geo *geo = &tgt_dev->geo;
676 	int i, plane_cnt, pl_idx;
677 	struct ppa_addr ppa;
678 
679 	if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
680 		rqd->nr_ppas = nr_ppas;
681 		rqd->ppa_addr = ppas[0];
682 
683 		return 0;
684 	}
685 
686 	rqd->nr_ppas = nr_ppas;
687 	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
688 	if (!rqd->ppa_list) {
689 		pr_err("failed to allocate dma memory\n");
690 		return -ENOMEM;
691 	}
692 
693 	plane_cnt = geo->pln_mode;
694 	rqd->nr_ppas *= plane_cnt;
695 
696 	for (i = 0; i < nr_ppas; i++) {
697 		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
698 			ppa = ppas[i];
699 			ppa.g.pl = pl_idx;
700 			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
701 		}
702 	}
703 
704 	return 0;
705 }
706 
nvm_free_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)707 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
708 			struct nvm_rq *rqd)
709 {
710 	if (!rqd->ppa_list)
711 		return;
712 
713 	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
714 }
715 
nvm_set_flags(struct nvm_geo * geo,struct nvm_rq * rqd)716 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
717 {
718 	int flags = 0;
719 
720 	if (geo->version == NVM_OCSSD_SPEC_20)
721 		return 0;
722 
723 	if (rqd->is_seq)
724 		flags |= geo->pln_mode >> 1;
725 
726 	if (rqd->opcode == NVM_OP_PREAD)
727 		flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
728 	else if (rqd->opcode == NVM_OP_PWRITE)
729 		flags |= NVM_IO_SCRAMBLE_ENABLE;
730 
731 	return flags;
732 }
733 
nvm_submit_io(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)734 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
735 {
736 	struct nvm_dev *dev = tgt_dev->parent;
737 	int ret;
738 
739 	if (!dev->ops->submit_io)
740 		return -ENODEV;
741 
742 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
743 
744 	rqd->dev = tgt_dev;
745 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
746 
747 	/* In case of error, fail with right address format */
748 	ret = dev->ops->submit_io(dev, rqd, buf);
749 	if (ret)
750 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
751 	return ret;
752 }
753 EXPORT_SYMBOL(nvm_submit_io);
754 
nvm_sync_end_io(struct nvm_rq * rqd)755 static void nvm_sync_end_io(struct nvm_rq *rqd)
756 {
757 	struct completion *waiting = rqd->private;
758 
759 	complete(waiting);
760 }
761 
nvm_submit_io_wait(struct nvm_dev * dev,struct nvm_rq * rqd,void * buf)762 static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
763 			      void *buf)
764 {
765 	DECLARE_COMPLETION_ONSTACK(wait);
766 	int ret = 0;
767 
768 	rqd->end_io = nvm_sync_end_io;
769 	rqd->private = &wait;
770 
771 	ret = dev->ops->submit_io(dev, rqd, buf);
772 	if (ret)
773 		return ret;
774 
775 	wait_for_completion_io(&wait);
776 
777 	return 0;
778 }
779 
nvm_submit_io_sync(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)780 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
781 		       void *buf)
782 {
783 	struct nvm_dev *dev = tgt_dev->parent;
784 	int ret;
785 
786 	if (!dev->ops->submit_io)
787 		return -ENODEV;
788 
789 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
790 
791 	rqd->dev = tgt_dev;
792 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
793 
794 	ret = nvm_submit_io_wait(dev, rqd, buf);
795 
796 	return ret;
797 }
798 EXPORT_SYMBOL(nvm_submit_io_sync);
799 
nvm_end_io(struct nvm_rq * rqd)800 void nvm_end_io(struct nvm_rq *rqd)
801 {
802 	struct nvm_tgt_dev *tgt_dev = rqd->dev;
803 
804 	/* Convert address space */
805 	if (tgt_dev)
806 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
807 
808 	if (rqd->end_io)
809 		rqd->end_io(rqd);
810 }
811 EXPORT_SYMBOL(nvm_end_io);
812 
nvm_submit_io_sync_raw(struct nvm_dev * dev,struct nvm_rq * rqd)813 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
814 {
815 	if (!dev->ops->submit_io)
816 		return -ENODEV;
817 
818 	rqd->dev = NULL;
819 	rqd->flags = nvm_set_flags(&dev->geo, rqd);
820 
821 	return nvm_submit_io_wait(dev, rqd, NULL);
822 }
823 
nvm_bb_chunk_sense(struct nvm_dev * dev,struct ppa_addr ppa)824 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
825 {
826 	struct nvm_rq rqd = { NULL };
827 	struct bio bio;
828 	struct bio_vec bio_vec;
829 	struct page *page;
830 	int ret;
831 
832 	page = alloc_page(GFP_KERNEL);
833 	if (!page)
834 		return -ENOMEM;
835 
836 	bio_init(&bio, &bio_vec, 1);
837 	bio_add_page(&bio, page, PAGE_SIZE, 0);
838 	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
839 
840 	rqd.bio = &bio;
841 	rqd.opcode = NVM_OP_PREAD;
842 	rqd.is_seq = 1;
843 	rqd.nr_ppas = 1;
844 	rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
845 
846 	ret = nvm_submit_io_sync_raw(dev, &rqd);
847 	if (ret)
848 		return ret;
849 
850 	__free_page(page);
851 
852 	return rqd.error;
853 }
854 
855 /*
856  * Scans a 1.2 chunk first and last page to determine if its state.
857  * If the chunk is found to be open, also scan it to update the write
858  * pointer.
859  */
nvm_bb_chunk_scan(struct nvm_dev * dev,struct ppa_addr ppa,struct nvm_chk_meta * meta)860 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
861 			     struct nvm_chk_meta *meta)
862 {
863 	struct nvm_geo *geo = &dev->geo;
864 	int ret, pg, pl;
865 
866 	/* sense first page */
867 	ret = nvm_bb_chunk_sense(dev, ppa);
868 	if (ret < 0) /* io error */
869 		return ret;
870 	else if (ret == 0) /* valid data */
871 		meta->state = NVM_CHK_ST_OPEN;
872 	else if (ret > 0) {
873 		/*
874 		 * If empty page, the chunk is free, else it is an
875 		 * actual io error. In that case, mark it offline.
876 		 */
877 		switch (ret) {
878 		case NVM_RSP_ERR_EMPTYPAGE:
879 			meta->state = NVM_CHK_ST_FREE;
880 			return 0;
881 		case NVM_RSP_ERR_FAILCRC:
882 		case NVM_RSP_ERR_FAILECC:
883 		case NVM_RSP_WARN_HIGHECC:
884 			meta->state = NVM_CHK_ST_OPEN;
885 			goto scan;
886 		default:
887 			return -ret; /* other io error */
888 		}
889 	}
890 
891 	/* sense last page */
892 	ppa.g.pg = geo->num_pg - 1;
893 	ppa.g.pl = geo->num_pln - 1;
894 
895 	ret = nvm_bb_chunk_sense(dev, ppa);
896 	if (ret < 0) /* io error */
897 		return ret;
898 	else if (ret == 0) { /* Chunk fully written */
899 		meta->state = NVM_CHK_ST_CLOSED;
900 		meta->wp = geo->clba;
901 		return 0;
902 	} else if (ret > 0) {
903 		switch (ret) {
904 		case NVM_RSP_ERR_EMPTYPAGE:
905 		case NVM_RSP_ERR_FAILCRC:
906 		case NVM_RSP_ERR_FAILECC:
907 		case NVM_RSP_WARN_HIGHECC:
908 			meta->state = NVM_CHK_ST_OPEN;
909 			break;
910 		default:
911 			return -ret; /* other io error */
912 		}
913 	}
914 
915 scan:
916 	/*
917 	 * chunk is open, we scan sequentially to update the write pointer.
918 	 * We make the assumption that targets write data across all planes
919 	 * before moving to the next page.
920 	 */
921 	for (pg = 0; pg < geo->num_pg; pg++) {
922 		for (pl = 0; pl < geo->num_pln; pl++) {
923 			ppa.g.pg = pg;
924 			ppa.g.pl = pl;
925 
926 			ret = nvm_bb_chunk_sense(dev, ppa);
927 			if (ret < 0) /* io error */
928 				return ret;
929 			else if (ret == 0) {
930 				meta->wp += geo->ws_min;
931 			} else if (ret > 0) {
932 				switch (ret) {
933 				case NVM_RSP_ERR_EMPTYPAGE:
934 					return 0;
935 				case NVM_RSP_ERR_FAILCRC:
936 				case NVM_RSP_ERR_FAILECC:
937 				case NVM_RSP_WARN_HIGHECC:
938 					meta->wp += geo->ws_min;
939 					break;
940 				default:
941 					return -ret; /* other io error */
942 				}
943 			}
944 		}
945 	}
946 
947 	return 0;
948 }
949 
950 /*
951  * folds a bad block list from its plane representation to its
952  * chunk representation.
953  *
954  * If any of the planes status are bad or grown bad, the chunk is marked
955  * offline. If not bad, the first plane state acts as the chunk state.
956  */
nvm_bb_to_chunk(struct nvm_dev * dev,struct ppa_addr ppa,u8 * blks,int nr_blks,struct nvm_chk_meta * meta)957 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
958 			   u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
959 {
960 	struct nvm_geo *geo = &dev->geo;
961 	int ret, blk, pl, offset, blktype;
962 
963 	for (blk = 0; blk < geo->num_chk; blk++) {
964 		offset = blk * geo->pln_mode;
965 		blktype = blks[offset];
966 
967 		for (pl = 0; pl < geo->pln_mode; pl++) {
968 			if (blks[offset + pl] &
969 					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
970 				blktype = blks[offset + pl];
971 				break;
972 			}
973 		}
974 
975 		ppa.g.blk = blk;
976 
977 		meta->wp = 0;
978 		meta->type = NVM_CHK_TP_W_SEQ;
979 		meta->wi = 0;
980 		meta->slba = generic_to_dev_addr(dev, ppa).ppa;
981 		meta->cnlb = dev->geo.clba;
982 
983 		if (blktype == NVM_BLK_T_FREE) {
984 			ret = nvm_bb_chunk_scan(dev, ppa, meta);
985 			if (ret)
986 				return ret;
987 		} else {
988 			meta->state = NVM_CHK_ST_OFFLINE;
989 		}
990 
991 		meta++;
992 	}
993 
994 	return 0;
995 }
996 
nvm_get_bb_meta(struct nvm_dev * dev,sector_t slba,int nchks,struct nvm_chk_meta * meta)997 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
998 			   int nchks, struct nvm_chk_meta *meta)
999 {
1000 	struct nvm_geo *geo = &dev->geo;
1001 	struct ppa_addr ppa;
1002 	u8 *blks;
1003 	int ch, lun, nr_blks;
1004 	int ret = 0;
1005 
1006 	ppa.ppa = slba;
1007 	ppa = dev_to_generic_addr(dev, ppa);
1008 
1009 	if (ppa.g.blk != 0)
1010 		return -EINVAL;
1011 
1012 	if ((nchks % geo->num_chk) != 0)
1013 		return -EINVAL;
1014 
1015 	nr_blks = geo->num_chk * geo->pln_mode;
1016 
1017 	blks = kmalloc(nr_blks, GFP_KERNEL);
1018 	if (!blks)
1019 		return -ENOMEM;
1020 
1021 	for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1022 		for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1023 			struct ppa_addr ppa_gen, ppa_dev;
1024 
1025 			if (!nchks)
1026 				goto done;
1027 
1028 			ppa_gen.ppa = 0;
1029 			ppa_gen.g.ch = ch;
1030 			ppa_gen.g.lun = lun;
1031 			ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1032 
1033 			ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1034 			if (ret)
1035 				goto done;
1036 
1037 			ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1038 									meta);
1039 			if (ret)
1040 				goto done;
1041 
1042 			meta += geo->num_chk;
1043 			nchks -= geo->num_chk;
1044 		}
1045 	}
1046 done:
1047 	kfree(blks);
1048 	return ret;
1049 }
1050 
nvm_get_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr ppa,int nchks,struct nvm_chk_meta * meta)1051 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1052 		       int nchks, struct nvm_chk_meta *meta)
1053 {
1054 	struct nvm_dev *dev = tgt_dev->parent;
1055 
1056 	nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1057 
1058 	if (dev->geo.version == NVM_OCSSD_SPEC_12)
1059 		return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1060 
1061 	return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1062 }
1063 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1064 
nvm_set_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppas,int nr_ppas,int type)1065 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1066 		       int nr_ppas, int type)
1067 {
1068 	struct nvm_dev *dev = tgt_dev->parent;
1069 	struct nvm_rq rqd;
1070 	int ret;
1071 
1072 	if (dev->geo.version == NVM_OCSSD_SPEC_20)
1073 		return 0;
1074 
1075 	if (nr_ppas > NVM_MAX_VLBA) {
1076 		pr_err("unable to update all blocks atomically\n");
1077 		return -EINVAL;
1078 	}
1079 
1080 	memset(&rqd, 0, sizeof(struct nvm_rq));
1081 
1082 	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1083 	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1084 
1085 	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1086 	nvm_free_rqd_ppalist(tgt_dev, &rqd);
1087 	if (ret)
1088 		return -EINVAL;
1089 
1090 	return 0;
1091 }
1092 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1093 
nvm_core_init(struct nvm_dev * dev)1094 static int nvm_core_init(struct nvm_dev *dev)
1095 {
1096 	struct nvm_geo *geo = &dev->geo;
1097 	int ret;
1098 
1099 	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1100 					sizeof(unsigned long), GFP_KERNEL);
1101 	if (!dev->lun_map)
1102 		return -ENOMEM;
1103 
1104 	INIT_LIST_HEAD(&dev->area_list);
1105 	INIT_LIST_HEAD(&dev->targets);
1106 	mutex_init(&dev->mlock);
1107 	spin_lock_init(&dev->lock);
1108 
1109 	ret = nvm_register_map(dev);
1110 	if (ret)
1111 		goto err_fmtype;
1112 
1113 	return 0;
1114 err_fmtype:
1115 	kfree(dev->lun_map);
1116 	return ret;
1117 }
1118 
nvm_free(struct kref * ref)1119 static void nvm_free(struct kref *ref)
1120 {
1121 	struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1122 
1123 	if (dev->dma_pool)
1124 		dev->ops->destroy_dma_pool(dev->dma_pool);
1125 
1126 	if (dev->rmap)
1127 		nvm_unregister_map(dev);
1128 
1129 	kfree(dev->lun_map);
1130 	kfree(dev);
1131 }
1132 
nvm_init(struct nvm_dev * dev)1133 static int nvm_init(struct nvm_dev *dev)
1134 {
1135 	struct nvm_geo *geo = &dev->geo;
1136 	int ret = -EINVAL;
1137 
1138 	if (dev->ops->identity(dev)) {
1139 		pr_err("device could not be identified\n");
1140 		goto err;
1141 	}
1142 
1143 	pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1144 			geo->minor_ver_id, geo->vmnt);
1145 
1146 	ret = nvm_core_init(dev);
1147 	if (ret) {
1148 		pr_err("could not initialize core structures.\n");
1149 		goto err;
1150 	}
1151 
1152 	pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1153 			dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1154 			dev->geo.num_chk, dev->geo.all_luns,
1155 			dev->geo.num_ch);
1156 	return 0;
1157 err:
1158 	pr_err("failed to initialize nvm\n");
1159 	return ret;
1160 }
1161 
nvm_alloc_dev(int node)1162 struct nvm_dev *nvm_alloc_dev(int node)
1163 {
1164 	struct nvm_dev *dev;
1165 
1166 	dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1167 	if (dev)
1168 		kref_init(&dev->ref);
1169 
1170 	return dev;
1171 }
1172 EXPORT_SYMBOL(nvm_alloc_dev);
1173 
nvm_register(struct nvm_dev * dev)1174 int nvm_register(struct nvm_dev *dev)
1175 {
1176 	int ret, exp_pool_size;
1177 
1178 	if (!dev->q || !dev->ops) {
1179 		kref_put(&dev->ref, nvm_free);
1180 		return -EINVAL;
1181 	}
1182 
1183 	ret = nvm_init(dev);
1184 	if (ret) {
1185 		kref_put(&dev->ref, nvm_free);
1186 		return ret;
1187 	}
1188 
1189 	exp_pool_size = max_t(int, PAGE_SIZE,
1190 			      (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1191 	exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1192 
1193 	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1194 						  exp_pool_size);
1195 	if (!dev->dma_pool) {
1196 		pr_err("could not create dma pool\n");
1197 		kref_put(&dev->ref, nvm_free);
1198 		return -ENOMEM;
1199 	}
1200 
1201 	/* register device with a supported media manager */
1202 	down_write(&nvm_lock);
1203 	list_add(&dev->devices, &nvm_devices);
1204 	up_write(&nvm_lock);
1205 
1206 	return 0;
1207 }
1208 EXPORT_SYMBOL(nvm_register);
1209 
nvm_unregister(struct nvm_dev * dev)1210 void nvm_unregister(struct nvm_dev *dev)
1211 {
1212 	struct nvm_target *t, *tmp;
1213 
1214 	mutex_lock(&dev->mlock);
1215 	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1216 		if (t->dev->parent != dev)
1217 			continue;
1218 		__nvm_remove_target(t, false);
1219 		kref_put(&dev->ref, nvm_free);
1220 	}
1221 	mutex_unlock(&dev->mlock);
1222 
1223 	down_write(&nvm_lock);
1224 	list_del(&dev->devices);
1225 	up_write(&nvm_lock);
1226 
1227 	kref_put(&dev->ref, nvm_free);
1228 }
1229 EXPORT_SYMBOL(nvm_unregister);
1230 
__nvm_configure_create(struct nvm_ioctl_create * create)1231 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1232 {
1233 	struct nvm_dev *dev;
1234 	int ret;
1235 
1236 	down_write(&nvm_lock);
1237 	dev = nvm_find_nvm_dev(create->dev);
1238 	up_write(&nvm_lock);
1239 
1240 	if (!dev) {
1241 		pr_err("device not found\n");
1242 		return -EINVAL;
1243 	}
1244 
1245 	kref_get(&dev->ref);
1246 	ret = nvm_create_tgt(dev, create);
1247 	if (ret)
1248 		kref_put(&dev->ref, nvm_free);
1249 
1250 	return ret;
1251 }
1252 
nvm_ioctl_info(struct file * file,void __user * arg)1253 static long nvm_ioctl_info(struct file *file, void __user *arg)
1254 {
1255 	struct nvm_ioctl_info *info;
1256 	struct nvm_tgt_type *tt;
1257 	int tgt_iter = 0;
1258 
1259 	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1260 	if (IS_ERR(info))
1261 		return -EFAULT;
1262 
1263 	info->version[0] = NVM_VERSION_MAJOR;
1264 	info->version[1] = NVM_VERSION_MINOR;
1265 	info->version[2] = NVM_VERSION_PATCH;
1266 
1267 	down_write(&nvm_tgtt_lock);
1268 	list_for_each_entry(tt, &nvm_tgt_types, list) {
1269 		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1270 
1271 		tgt->version[0] = tt->version[0];
1272 		tgt->version[1] = tt->version[1];
1273 		tgt->version[2] = tt->version[2];
1274 		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1275 
1276 		tgt_iter++;
1277 	}
1278 
1279 	info->tgtsize = tgt_iter;
1280 	up_write(&nvm_tgtt_lock);
1281 
1282 	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1283 		kfree(info);
1284 		return -EFAULT;
1285 	}
1286 
1287 	kfree(info);
1288 	return 0;
1289 }
1290 
nvm_ioctl_get_devices(struct file * file,void __user * arg)1291 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1292 {
1293 	struct nvm_ioctl_get_devices *devices;
1294 	struct nvm_dev *dev;
1295 	int i = 0;
1296 
1297 	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1298 	if (!devices)
1299 		return -ENOMEM;
1300 
1301 	down_write(&nvm_lock);
1302 	list_for_each_entry(dev, &nvm_devices, devices) {
1303 		struct nvm_ioctl_device_info *info = &devices->info[i];
1304 
1305 		strlcpy(info->devname, dev->name, sizeof(info->devname));
1306 
1307 		/* kept for compatibility */
1308 		info->bmversion[0] = 1;
1309 		info->bmversion[1] = 0;
1310 		info->bmversion[2] = 0;
1311 		strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1312 		i++;
1313 
1314 		if (i >= ARRAY_SIZE(devices->info)) {
1315 			pr_err("max %zd devices can be reported.\n",
1316 			       ARRAY_SIZE(devices->info));
1317 			break;
1318 		}
1319 	}
1320 	up_write(&nvm_lock);
1321 
1322 	devices->nr_devices = i;
1323 
1324 	if (copy_to_user(arg, devices,
1325 			 sizeof(struct nvm_ioctl_get_devices))) {
1326 		kfree(devices);
1327 		return -EFAULT;
1328 	}
1329 
1330 	kfree(devices);
1331 	return 0;
1332 }
1333 
nvm_ioctl_dev_create(struct file * file,void __user * arg)1334 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1335 {
1336 	struct nvm_ioctl_create create;
1337 
1338 	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1339 		return -EFAULT;
1340 
1341 	if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1342 	    create.conf.e.rsv != 0) {
1343 		pr_err("reserved config field in use\n");
1344 		return -EINVAL;
1345 	}
1346 
1347 	create.dev[DISK_NAME_LEN - 1] = '\0';
1348 	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1349 	create.tgtname[DISK_NAME_LEN - 1] = '\0';
1350 
1351 	if (create.flags != 0) {
1352 		__u32 flags = create.flags;
1353 
1354 		/* Check for valid flags */
1355 		if (flags & NVM_TARGET_FACTORY)
1356 			flags &= ~NVM_TARGET_FACTORY;
1357 
1358 		if (flags) {
1359 			pr_err("flag not supported\n");
1360 			return -EINVAL;
1361 		}
1362 	}
1363 
1364 	return __nvm_configure_create(&create);
1365 }
1366 
nvm_ioctl_dev_remove(struct file * file,void __user * arg)1367 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1368 {
1369 	struct nvm_ioctl_remove remove;
1370 
1371 	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1372 		return -EFAULT;
1373 
1374 	remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1375 
1376 	if (remove.flags != 0) {
1377 		pr_err("no flags supported\n");
1378 		return -EINVAL;
1379 	}
1380 
1381 	return nvm_remove_tgt(&remove);
1382 }
1383 
1384 /* kept for compatibility reasons */
nvm_ioctl_dev_init(struct file * file,void __user * arg)1385 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1386 {
1387 	struct nvm_ioctl_dev_init init;
1388 
1389 	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1390 		return -EFAULT;
1391 
1392 	if (init.flags != 0) {
1393 		pr_err("no flags supported\n");
1394 		return -EINVAL;
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 /* Kept for compatibility reasons */
nvm_ioctl_dev_factory(struct file * file,void __user * arg)1401 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1402 {
1403 	struct nvm_ioctl_dev_factory fact;
1404 
1405 	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1406 		return -EFAULT;
1407 
1408 	fact.dev[DISK_NAME_LEN - 1] = '\0';
1409 
1410 	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1411 		return -EINVAL;
1412 
1413 	return 0;
1414 }
1415 
nvm_ctl_ioctl(struct file * file,uint cmd,unsigned long arg)1416 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1417 {
1418 	void __user *argp = (void __user *)arg;
1419 
1420 	if (!capable(CAP_SYS_ADMIN))
1421 		return -EPERM;
1422 
1423 	switch (cmd) {
1424 	case NVM_INFO:
1425 		return nvm_ioctl_info(file, argp);
1426 	case NVM_GET_DEVICES:
1427 		return nvm_ioctl_get_devices(file, argp);
1428 	case NVM_DEV_CREATE:
1429 		return nvm_ioctl_dev_create(file, argp);
1430 	case NVM_DEV_REMOVE:
1431 		return nvm_ioctl_dev_remove(file, argp);
1432 	case NVM_DEV_INIT:
1433 		return nvm_ioctl_dev_init(file, argp);
1434 	case NVM_DEV_FACTORY:
1435 		return nvm_ioctl_dev_factory(file, argp);
1436 	}
1437 	return 0;
1438 }
1439 
1440 static const struct file_operations _ctl_fops = {
1441 	.open = nonseekable_open,
1442 	.unlocked_ioctl = nvm_ctl_ioctl,
1443 	.owner = THIS_MODULE,
1444 	.llseek  = noop_llseek,
1445 };
1446 
1447 static struct miscdevice _nvm_misc = {
1448 	.minor		= MISC_DYNAMIC_MINOR,
1449 	.name		= "lightnvm",
1450 	.nodename	= "lightnvm/control",
1451 	.fops		= &_ctl_fops,
1452 };
1453 builtin_misc_device(_nvm_misc);
1454