1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
4  * Initial release: Matias Bjorling <m@bjorling.me>
5  */
6 
7 #define pr_fmt(fmt) "nvm: " fmt
8 
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/sem.h>
12 #include <linux/bitmap.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/miscdevice.h>
16 #include <linux/lightnvm.h>
17 #include <linux/sched/sysctl.h>
18 
19 static LIST_HEAD(nvm_tgt_types);
20 static DECLARE_RWSEM(nvm_tgtt_lock);
21 static LIST_HEAD(nvm_devices);
22 static DECLARE_RWSEM(nvm_lock);
23 
24 /* Map between virtual and physical channel and lun */
25 struct nvm_ch_map {
26 	int ch_off;
27 	int num_lun;
28 	int *lun_offs;
29 };
30 
31 struct nvm_dev_map {
32 	struct nvm_ch_map *chnls;
33 	int num_ch;
34 };
35 
36 static void nvm_free(struct kref *ref);
37 
nvm_find_target(struct nvm_dev * dev,const char * name)38 static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
39 {
40 	struct nvm_target *tgt;
41 
42 	list_for_each_entry(tgt, &dev->targets, list)
43 		if (!strcmp(name, tgt->disk->disk_name))
44 			return tgt;
45 
46 	return NULL;
47 }
48 
nvm_target_exists(const char * name)49 static bool nvm_target_exists(const char *name)
50 {
51 	struct nvm_dev *dev;
52 	struct nvm_target *tgt;
53 	bool ret = false;
54 
55 	down_write(&nvm_lock);
56 	list_for_each_entry(dev, &nvm_devices, devices) {
57 		mutex_lock(&dev->mlock);
58 		list_for_each_entry(tgt, &dev->targets, list) {
59 			if (!strcmp(name, tgt->disk->disk_name)) {
60 				ret = true;
61 				mutex_unlock(&dev->mlock);
62 				goto out;
63 			}
64 		}
65 		mutex_unlock(&dev->mlock);
66 	}
67 
68 out:
69 	up_write(&nvm_lock);
70 	return ret;
71 }
72 
nvm_reserve_luns(struct nvm_dev * dev,int lun_begin,int lun_end)73 static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
74 {
75 	int i;
76 
77 	for (i = lun_begin; i <= lun_end; i++) {
78 		if (test_and_set_bit(i, dev->lun_map)) {
79 			pr_err("lun %d already allocated\n", i);
80 			goto err;
81 		}
82 	}
83 
84 	return 0;
85 err:
86 	while (--i >= lun_begin)
87 		clear_bit(i, dev->lun_map);
88 
89 	return -EBUSY;
90 }
91 
nvm_release_luns_err(struct nvm_dev * dev,int lun_begin,int lun_end)92 static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
93 				 int lun_end)
94 {
95 	int i;
96 
97 	for (i = lun_begin; i <= lun_end; i++)
98 		WARN_ON(!test_and_clear_bit(i, dev->lun_map));
99 }
100 
nvm_remove_tgt_dev(struct nvm_tgt_dev * tgt_dev,int clear)101 static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
102 {
103 	struct nvm_dev *dev = tgt_dev->parent;
104 	struct nvm_dev_map *dev_map = tgt_dev->map;
105 	int i, j;
106 
107 	for (i = 0; i < dev_map->num_ch; i++) {
108 		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
109 		int *lun_offs = ch_map->lun_offs;
110 		int ch = i + ch_map->ch_off;
111 
112 		if (clear) {
113 			for (j = 0; j < ch_map->num_lun; j++) {
114 				int lun = j + lun_offs[j];
115 				int lunid = (ch * dev->geo.num_lun) + lun;
116 
117 				WARN_ON(!test_and_clear_bit(lunid,
118 							dev->lun_map));
119 			}
120 		}
121 
122 		kfree(ch_map->lun_offs);
123 	}
124 
125 	kfree(dev_map->chnls);
126 	kfree(dev_map);
127 
128 	kfree(tgt_dev->luns);
129 	kfree(tgt_dev);
130 }
131 
nvm_create_tgt_dev(struct nvm_dev * dev,u16 lun_begin,u16 lun_end,u16 op)132 static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
133 					      u16 lun_begin, u16 lun_end,
134 					      u16 op)
135 {
136 	struct nvm_tgt_dev *tgt_dev = NULL;
137 	struct nvm_dev_map *dev_rmap = dev->rmap;
138 	struct nvm_dev_map *dev_map;
139 	struct ppa_addr *luns;
140 	int num_lun = lun_end - lun_begin + 1;
141 	int luns_left = num_lun;
142 	int num_ch = num_lun / dev->geo.num_lun;
143 	int num_ch_mod = num_lun % dev->geo.num_lun;
144 	int bch = lun_begin / dev->geo.num_lun;
145 	int blun = lun_begin % dev->geo.num_lun;
146 	int lunid = 0;
147 	int lun_balanced = 1;
148 	int sec_per_lun, prev_num_lun;
149 	int i, j;
150 
151 	num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
152 
153 	dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
154 	if (!dev_map)
155 		goto err_dev;
156 
157 	dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
158 	if (!dev_map->chnls)
159 		goto err_chnls;
160 
161 	luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
162 	if (!luns)
163 		goto err_luns;
164 
165 	prev_num_lun = (luns_left > dev->geo.num_lun) ?
166 					dev->geo.num_lun : luns_left;
167 	for (i = 0; i < num_ch; i++) {
168 		struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
169 		int *lun_roffs = ch_rmap->lun_offs;
170 		struct nvm_ch_map *ch_map = &dev_map->chnls[i];
171 		int *lun_offs;
172 		int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
173 					dev->geo.num_lun : luns_left;
174 
175 		if (lun_balanced && prev_num_lun != luns_in_chnl)
176 			lun_balanced = 0;
177 
178 		ch_map->ch_off = ch_rmap->ch_off = bch;
179 		ch_map->num_lun = luns_in_chnl;
180 
181 		lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
182 		if (!lun_offs)
183 			goto err_ch;
184 
185 		for (j = 0; j < luns_in_chnl; j++) {
186 			luns[lunid].ppa = 0;
187 			luns[lunid].a.ch = i;
188 			luns[lunid++].a.lun = j;
189 
190 			lun_offs[j] = blun;
191 			lun_roffs[j + blun] = blun;
192 		}
193 
194 		ch_map->lun_offs = lun_offs;
195 
196 		/* when starting a new channel, lun offset is reset */
197 		blun = 0;
198 		luns_left -= luns_in_chnl;
199 	}
200 
201 	dev_map->num_ch = num_ch;
202 
203 	tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
204 	if (!tgt_dev)
205 		goto err_ch;
206 
207 	/* Inherit device geometry from parent */
208 	memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
209 
210 	/* Target device only owns a portion of the physical device */
211 	tgt_dev->geo.num_ch = num_ch;
212 	tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
213 	tgt_dev->geo.all_luns = num_lun;
214 	tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
215 
216 	tgt_dev->geo.op = op;
217 
218 	sec_per_lun = dev->geo.clba * dev->geo.num_chk;
219 	tgt_dev->geo.total_secs = num_lun * sec_per_lun;
220 
221 	tgt_dev->q = dev->q;
222 	tgt_dev->map = dev_map;
223 	tgt_dev->luns = luns;
224 	tgt_dev->parent = dev;
225 
226 	return tgt_dev;
227 err_ch:
228 	while (--i >= 0)
229 		kfree(dev_map->chnls[i].lun_offs);
230 	kfree(luns);
231 err_luns:
232 	kfree(dev_map->chnls);
233 err_chnls:
234 	kfree(dev_map);
235 err_dev:
236 	return tgt_dev;
237 }
238 
239 static const struct block_device_operations nvm_fops = {
240 	.owner		= THIS_MODULE,
241 };
242 
__nvm_find_target_type(const char * name)243 static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
244 {
245 	struct nvm_tgt_type *tt;
246 
247 	list_for_each_entry(tt, &nvm_tgt_types, list)
248 		if (!strcmp(name, tt->name))
249 			return tt;
250 
251 	return NULL;
252 }
253 
nvm_find_target_type(const char * name)254 static struct nvm_tgt_type *nvm_find_target_type(const char *name)
255 {
256 	struct nvm_tgt_type *tt;
257 
258 	down_write(&nvm_tgtt_lock);
259 	tt = __nvm_find_target_type(name);
260 	up_write(&nvm_tgtt_lock);
261 
262 	return tt;
263 }
264 
nvm_config_check_luns(struct nvm_geo * geo,int lun_begin,int lun_end)265 static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
266 				 int lun_end)
267 {
268 	if (lun_begin > lun_end || lun_end >= geo->all_luns) {
269 		pr_err("lun out of bound (%u:%u > %u)\n",
270 			lun_begin, lun_end, geo->all_luns - 1);
271 		return -EINVAL;
272 	}
273 
274 	return 0;
275 }
276 
__nvm_config_simple(struct nvm_dev * dev,struct nvm_ioctl_create_simple * s)277 static int __nvm_config_simple(struct nvm_dev *dev,
278 			       struct nvm_ioctl_create_simple *s)
279 {
280 	struct nvm_geo *geo = &dev->geo;
281 
282 	if (s->lun_begin == -1 && s->lun_end == -1) {
283 		s->lun_begin = 0;
284 		s->lun_end = geo->all_luns - 1;
285 	}
286 
287 	return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
288 }
289 
__nvm_config_extended(struct nvm_dev * dev,struct nvm_ioctl_create_extended * e)290 static int __nvm_config_extended(struct nvm_dev *dev,
291 				 struct nvm_ioctl_create_extended *e)
292 {
293 	if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
294 		e->lun_begin = 0;
295 		e->lun_end = dev->geo.all_luns - 1;
296 	}
297 
298 	/* op not set falls into target's default */
299 	if (e->op == 0xFFFF) {
300 		e->op = NVM_TARGET_DEFAULT_OP;
301 	} else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
302 		pr_err("invalid over provisioning value\n");
303 		return -EINVAL;
304 	}
305 
306 	return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
307 }
308 
nvm_create_tgt(struct nvm_dev * dev,struct nvm_ioctl_create * create)309 static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
310 {
311 	struct nvm_ioctl_create_extended e;
312 	struct request_queue *tqueue;
313 	struct gendisk *tdisk;
314 	struct nvm_tgt_type *tt;
315 	struct nvm_target *t;
316 	struct nvm_tgt_dev *tgt_dev;
317 	void *targetdata;
318 	unsigned int mdts;
319 	int ret;
320 
321 	switch (create->conf.type) {
322 	case NVM_CONFIG_TYPE_SIMPLE:
323 		ret = __nvm_config_simple(dev, &create->conf.s);
324 		if (ret)
325 			return ret;
326 
327 		e.lun_begin = create->conf.s.lun_begin;
328 		e.lun_end = create->conf.s.lun_end;
329 		e.op = NVM_TARGET_DEFAULT_OP;
330 		break;
331 	case NVM_CONFIG_TYPE_EXTENDED:
332 		ret = __nvm_config_extended(dev, &create->conf.e);
333 		if (ret)
334 			return ret;
335 
336 		e = create->conf.e;
337 		break;
338 	default:
339 		pr_err("config type not valid\n");
340 		return -EINVAL;
341 	}
342 
343 	tt = nvm_find_target_type(create->tgttype);
344 	if (!tt) {
345 		pr_err("target type %s not found\n", create->tgttype);
346 		return -EINVAL;
347 	}
348 
349 	if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
350 		pr_err("device is incompatible with target L2P type.\n");
351 		return -EINVAL;
352 	}
353 
354 	if (nvm_target_exists(create->tgtname)) {
355 		pr_err("target name already exists (%s)\n",
356 							create->tgtname);
357 		return -EINVAL;
358 	}
359 
360 	ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
361 	if (ret)
362 		return ret;
363 
364 	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
365 	if (!t) {
366 		ret = -ENOMEM;
367 		goto err_reserve;
368 	}
369 
370 	tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
371 	if (!tgt_dev) {
372 		pr_err("could not create target device\n");
373 		ret = -ENOMEM;
374 		goto err_t;
375 	}
376 
377 	tdisk = alloc_disk(0);
378 	if (!tdisk) {
379 		ret = -ENOMEM;
380 		goto err_dev;
381 	}
382 
383 	tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
384 	if (!tqueue) {
385 		ret = -ENOMEM;
386 		goto err_disk;
387 	}
388 	blk_queue_make_request(tqueue, tt->make_rq);
389 
390 	strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
391 	tdisk->flags = GENHD_FL_EXT_DEVT;
392 	tdisk->major = 0;
393 	tdisk->first_minor = 0;
394 	tdisk->fops = &nvm_fops;
395 	tdisk->queue = tqueue;
396 
397 	targetdata = tt->init(tgt_dev, tdisk, create->flags);
398 	if (IS_ERR(targetdata)) {
399 		ret = PTR_ERR(targetdata);
400 		goto err_init;
401 	}
402 
403 	tdisk->private_data = targetdata;
404 	tqueue->queuedata = targetdata;
405 
406 	mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
407 	if (dev->geo.mdts) {
408 		mdts = min_t(u32, dev->geo.mdts,
409 				(dev->geo.csecs >> 9) * NVM_MAX_VLBA);
410 	}
411 	blk_queue_max_hw_sectors(tqueue, mdts);
412 
413 	set_capacity(tdisk, tt->capacity(targetdata));
414 	add_disk(tdisk);
415 
416 	if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
417 		ret = -ENOMEM;
418 		goto err_sysfs;
419 	}
420 
421 	t->type = tt;
422 	t->disk = tdisk;
423 	t->dev = tgt_dev;
424 
425 	mutex_lock(&dev->mlock);
426 	list_add_tail(&t->list, &dev->targets);
427 	mutex_unlock(&dev->mlock);
428 
429 	__module_get(tt->owner);
430 
431 	return 0;
432 err_sysfs:
433 	if (tt->exit)
434 		tt->exit(targetdata, true);
435 err_init:
436 	blk_cleanup_queue(tqueue);
437 	tdisk->queue = NULL;
438 err_disk:
439 	put_disk(tdisk);
440 err_dev:
441 	nvm_remove_tgt_dev(tgt_dev, 0);
442 err_t:
443 	kfree(t);
444 err_reserve:
445 	nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
446 	return ret;
447 }
448 
__nvm_remove_target(struct nvm_target * t,bool graceful)449 static void __nvm_remove_target(struct nvm_target *t, bool graceful)
450 {
451 	struct nvm_tgt_type *tt = t->type;
452 	struct gendisk *tdisk = t->disk;
453 	struct request_queue *q = tdisk->queue;
454 
455 	del_gendisk(tdisk);
456 	blk_cleanup_queue(q);
457 
458 	if (tt->sysfs_exit)
459 		tt->sysfs_exit(tdisk);
460 
461 	if (tt->exit)
462 		tt->exit(tdisk->private_data, graceful);
463 
464 	nvm_remove_tgt_dev(t->dev, 1);
465 	put_disk(tdisk);
466 	module_put(t->type->owner);
467 
468 	list_del(&t->list);
469 	kfree(t);
470 }
471 
472 /**
473  * nvm_remove_tgt - Removes a target from the media manager
474  * @remove:	ioctl structure with target name to remove.
475  *
476  * Returns:
477  * 0: on success
478  * 1: on not found
479  * <0: on error
480  */
nvm_remove_tgt(struct nvm_ioctl_remove * remove)481 static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
482 {
483 	struct nvm_target *t = NULL;
484 	struct nvm_dev *dev;
485 
486 	down_read(&nvm_lock);
487 	list_for_each_entry(dev, &nvm_devices, devices) {
488 		mutex_lock(&dev->mlock);
489 		t = nvm_find_target(dev, remove->tgtname);
490 		if (t) {
491 			mutex_unlock(&dev->mlock);
492 			break;
493 		}
494 		mutex_unlock(&dev->mlock);
495 	}
496 	up_read(&nvm_lock);
497 
498 	if (!t) {
499 		pr_err("failed to remove target %s\n",
500 				remove->tgtname);
501 		return 1;
502 	}
503 
504 	__nvm_remove_target(t, true);
505 	kref_put(&dev->ref, nvm_free);
506 
507 	return 0;
508 }
509 
nvm_register_map(struct nvm_dev * dev)510 static int nvm_register_map(struct nvm_dev *dev)
511 {
512 	struct nvm_dev_map *rmap;
513 	int i, j;
514 
515 	rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
516 	if (!rmap)
517 		goto err_rmap;
518 
519 	rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
520 								GFP_KERNEL);
521 	if (!rmap->chnls)
522 		goto err_chnls;
523 
524 	for (i = 0; i < dev->geo.num_ch; i++) {
525 		struct nvm_ch_map *ch_rmap;
526 		int *lun_roffs;
527 		int luns_in_chnl = dev->geo.num_lun;
528 
529 		ch_rmap = &rmap->chnls[i];
530 
531 		ch_rmap->ch_off = -1;
532 		ch_rmap->num_lun = luns_in_chnl;
533 
534 		lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
535 		if (!lun_roffs)
536 			goto err_ch;
537 
538 		for (j = 0; j < luns_in_chnl; j++)
539 			lun_roffs[j] = -1;
540 
541 		ch_rmap->lun_offs = lun_roffs;
542 	}
543 
544 	dev->rmap = rmap;
545 
546 	return 0;
547 err_ch:
548 	while (--i >= 0)
549 		kfree(rmap->chnls[i].lun_offs);
550 err_chnls:
551 	kfree(rmap);
552 err_rmap:
553 	return -ENOMEM;
554 }
555 
nvm_unregister_map(struct nvm_dev * dev)556 static void nvm_unregister_map(struct nvm_dev *dev)
557 {
558 	struct nvm_dev_map *rmap = dev->rmap;
559 	int i;
560 
561 	for (i = 0; i < dev->geo.num_ch; i++)
562 		kfree(rmap->chnls[i].lun_offs);
563 
564 	kfree(rmap->chnls);
565 	kfree(rmap);
566 }
567 
nvm_map_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)568 static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
569 {
570 	struct nvm_dev_map *dev_map = tgt_dev->map;
571 	struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
572 	int lun_off = ch_map->lun_offs[p->a.lun];
573 
574 	p->a.ch += ch_map->ch_off;
575 	p->a.lun += lun_off;
576 }
577 
nvm_map_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * p)578 static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
579 {
580 	struct nvm_dev *dev = tgt_dev->parent;
581 	struct nvm_dev_map *dev_rmap = dev->rmap;
582 	struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
583 	int lun_roff = ch_rmap->lun_offs[p->a.lun];
584 
585 	p->a.ch -= ch_rmap->ch_off;
586 	p->a.lun -= lun_roff;
587 }
588 
nvm_ppa_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)589 static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
590 				struct ppa_addr *ppa_list, int nr_ppas)
591 {
592 	int i;
593 
594 	for (i = 0; i < nr_ppas; i++) {
595 		nvm_map_to_dev(tgt_dev, &ppa_list[i]);
596 		ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
597 	}
598 }
599 
nvm_ppa_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppa_list,int nr_ppas)600 static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
601 				struct ppa_addr *ppa_list, int nr_ppas)
602 {
603 	int i;
604 
605 	for (i = 0; i < nr_ppas; i++) {
606 		ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
607 		nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
608 	}
609 }
610 
nvm_rq_tgt_to_dev(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)611 static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
612 {
613 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
614 
615 	nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
616 }
617 
nvm_rq_dev_to_tgt(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)618 static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
619 {
620 	struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
621 
622 	nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
623 }
624 
nvm_register_tgt_type(struct nvm_tgt_type * tt)625 int nvm_register_tgt_type(struct nvm_tgt_type *tt)
626 {
627 	int ret = 0;
628 
629 	down_write(&nvm_tgtt_lock);
630 	if (__nvm_find_target_type(tt->name))
631 		ret = -EEXIST;
632 	else
633 		list_add(&tt->list, &nvm_tgt_types);
634 	up_write(&nvm_tgtt_lock);
635 
636 	return ret;
637 }
638 EXPORT_SYMBOL(nvm_register_tgt_type);
639 
nvm_unregister_tgt_type(struct nvm_tgt_type * tt)640 void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
641 {
642 	if (!tt)
643 		return;
644 
645 	down_write(&nvm_tgtt_lock);
646 	list_del(&tt->list);
647 	up_write(&nvm_tgtt_lock);
648 }
649 EXPORT_SYMBOL(nvm_unregister_tgt_type);
650 
nvm_dev_dma_alloc(struct nvm_dev * dev,gfp_t mem_flags,dma_addr_t * dma_handler)651 void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
652 							dma_addr_t *dma_handler)
653 {
654 	return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
655 								dma_handler);
656 }
657 EXPORT_SYMBOL(nvm_dev_dma_alloc);
658 
nvm_dev_dma_free(struct nvm_dev * dev,void * addr,dma_addr_t dma_handler)659 void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
660 {
661 	dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
662 }
663 EXPORT_SYMBOL(nvm_dev_dma_free);
664 
nvm_find_nvm_dev(const char * name)665 static struct nvm_dev *nvm_find_nvm_dev(const char *name)
666 {
667 	struct nvm_dev *dev;
668 
669 	list_for_each_entry(dev, &nvm_devices, devices)
670 		if (!strcmp(name, dev->name))
671 			return dev;
672 
673 	return NULL;
674 }
675 
nvm_set_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,const struct ppa_addr * ppas,int nr_ppas)676 static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
677 			const struct ppa_addr *ppas, int nr_ppas)
678 {
679 	struct nvm_dev *dev = tgt_dev->parent;
680 	struct nvm_geo *geo = &tgt_dev->geo;
681 	int i, plane_cnt, pl_idx;
682 	struct ppa_addr ppa;
683 
684 	if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
685 		rqd->nr_ppas = nr_ppas;
686 		rqd->ppa_addr = ppas[0];
687 
688 		return 0;
689 	}
690 
691 	rqd->nr_ppas = nr_ppas;
692 	rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
693 	if (!rqd->ppa_list) {
694 		pr_err("failed to allocate dma memory\n");
695 		return -ENOMEM;
696 	}
697 
698 	plane_cnt = geo->pln_mode;
699 	rqd->nr_ppas *= plane_cnt;
700 
701 	for (i = 0; i < nr_ppas; i++) {
702 		for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
703 			ppa = ppas[i];
704 			ppa.g.pl = pl_idx;
705 			rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
706 		}
707 	}
708 
709 	return 0;
710 }
711 
nvm_free_rqd_ppalist(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd)712 static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
713 			struct nvm_rq *rqd)
714 {
715 	if (!rqd->ppa_list)
716 		return;
717 
718 	nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
719 }
720 
nvm_set_flags(struct nvm_geo * geo,struct nvm_rq * rqd)721 static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
722 {
723 	int flags = 0;
724 
725 	if (geo->version == NVM_OCSSD_SPEC_20)
726 		return 0;
727 
728 	if (rqd->is_seq)
729 		flags |= geo->pln_mode >> 1;
730 
731 	if (rqd->opcode == NVM_OP_PREAD)
732 		flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
733 	else if (rqd->opcode == NVM_OP_PWRITE)
734 		flags |= NVM_IO_SCRAMBLE_ENABLE;
735 
736 	return flags;
737 }
738 
nvm_submit_io(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)739 int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
740 {
741 	struct nvm_dev *dev = tgt_dev->parent;
742 	int ret;
743 
744 	if (!dev->ops->submit_io)
745 		return -ENODEV;
746 
747 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
748 
749 	rqd->dev = tgt_dev;
750 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
751 
752 	/* In case of error, fail with right address format */
753 	ret = dev->ops->submit_io(dev, rqd, buf);
754 	if (ret)
755 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
756 	return ret;
757 }
758 EXPORT_SYMBOL(nvm_submit_io);
759 
nvm_sync_end_io(struct nvm_rq * rqd)760 static void nvm_sync_end_io(struct nvm_rq *rqd)
761 {
762 	struct completion *waiting = rqd->private;
763 
764 	complete(waiting);
765 }
766 
nvm_submit_io_wait(struct nvm_dev * dev,struct nvm_rq * rqd,void * buf)767 static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
768 			      void *buf)
769 {
770 	DECLARE_COMPLETION_ONSTACK(wait);
771 	int ret = 0;
772 
773 	rqd->end_io = nvm_sync_end_io;
774 	rqd->private = &wait;
775 
776 	ret = dev->ops->submit_io(dev, rqd, buf);
777 	if (ret)
778 		return ret;
779 
780 	wait_for_completion_io(&wait);
781 
782 	return 0;
783 }
784 
nvm_submit_io_sync(struct nvm_tgt_dev * tgt_dev,struct nvm_rq * rqd,void * buf)785 int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
786 		       void *buf)
787 {
788 	struct nvm_dev *dev = tgt_dev->parent;
789 	int ret;
790 
791 	if (!dev->ops->submit_io)
792 		return -ENODEV;
793 
794 	nvm_rq_tgt_to_dev(tgt_dev, rqd);
795 
796 	rqd->dev = tgt_dev;
797 	rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
798 
799 	ret = nvm_submit_io_wait(dev, rqd, buf);
800 
801 	return ret;
802 }
803 EXPORT_SYMBOL(nvm_submit_io_sync);
804 
nvm_end_io(struct nvm_rq * rqd)805 void nvm_end_io(struct nvm_rq *rqd)
806 {
807 	struct nvm_tgt_dev *tgt_dev = rqd->dev;
808 
809 	/* Convert address space */
810 	if (tgt_dev)
811 		nvm_rq_dev_to_tgt(tgt_dev, rqd);
812 
813 	if (rqd->end_io)
814 		rqd->end_io(rqd);
815 }
816 EXPORT_SYMBOL(nvm_end_io);
817 
nvm_submit_io_sync_raw(struct nvm_dev * dev,struct nvm_rq * rqd)818 static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
819 {
820 	if (!dev->ops->submit_io)
821 		return -ENODEV;
822 
823 	rqd->dev = NULL;
824 	rqd->flags = nvm_set_flags(&dev->geo, rqd);
825 
826 	return nvm_submit_io_wait(dev, rqd, NULL);
827 }
828 
nvm_bb_chunk_sense(struct nvm_dev * dev,struct ppa_addr ppa)829 static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
830 {
831 	struct nvm_rq rqd = { NULL };
832 	struct bio bio;
833 	struct bio_vec bio_vec;
834 	struct page *page;
835 	int ret;
836 
837 	page = alloc_page(GFP_KERNEL);
838 	if (!page)
839 		return -ENOMEM;
840 
841 	bio_init(&bio, &bio_vec, 1);
842 	bio_add_page(&bio, page, PAGE_SIZE, 0);
843 	bio_set_op_attrs(&bio, REQ_OP_READ, 0);
844 
845 	rqd.bio = &bio;
846 	rqd.opcode = NVM_OP_PREAD;
847 	rqd.is_seq = 1;
848 	rqd.nr_ppas = 1;
849 	rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
850 
851 	ret = nvm_submit_io_sync_raw(dev, &rqd);
852 	if (ret)
853 		return ret;
854 
855 	__free_page(page);
856 
857 	return rqd.error;
858 }
859 
860 /*
861  * Scans a 1.2 chunk first and last page to determine if its state.
862  * If the chunk is found to be open, also scan it to update the write
863  * pointer.
864  */
nvm_bb_chunk_scan(struct nvm_dev * dev,struct ppa_addr ppa,struct nvm_chk_meta * meta)865 static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
866 			     struct nvm_chk_meta *meta)
867 {
868 	struct nvm_geo *geo = &dev->geo;
869 	int ret, pg, pl;
870 
871 	/* sense first page */
872 	ret = nvm_bb_chunk_sense(dev, ppa);
873 	if (ret < 0) /* io error */
874 		return ret;
875 	else if (ret == 0) /* valid data */
876 		meta->state = NVM_CHK_ST_OPEN;
877 	else if (ret > 0) {
878 		/*
879 		 * If empty page, the chunk is free, else it is an
880 		 * actual io error. In that case, mark it offline.
881 		 */
882 		switch (ret) {
883 		case NVM_RSP_ERR_EMPTYPAGE:
884 			meta->state = NVM_CHK_ST_FREE;
885 			return 0;
886 		case NVM_RSP_ERR_FAILCRC:
887 		case NVM_RSP_ERR_FAILECC:
888 		case NVM_RSP_WARN_HIGHECC:
889 			meta->state = NVM_CHK_ST_OPEN;
890 			goto scan;
891 		default:
892 			return -ret; /* other io error */
893 		}
894 	}
895 
896 	/* sense last page */
897 	ppa.g.pg = geo->num_pg - 1;
898 	ppa.g.pl = geo->num_pln - 1;
899 
900 	ret = nvm_bb_chunk_sense(dev, ppa);
901 	if (ret < 0) /* io error */
902 		return ret;
903 	else if (ret == 0) { /* Chunk fully written */
904 		meta->state = NVM_CHK_ST_CLOSED;
905 		meta->wp = geo->clba;
906 		return 0;
907 	} else if (ret > 0) {
908 		switch (ret) {
909 		case NVM_RSP_ERR_EMPTYPAGE:
910 		case NVM_RSP_ERR_FAILCRC:
911 		case NVM_RSP_ERR_FAILECC:
912 		case NVM_RSP_WARN_HIGHECC:
913 			meta->state = NVM_CHK_ST_OPEN;
914 			break;
915 		default:
916 			return -ret; /* other io error */
917 		}
918 	}
919 
920 scan:
921 	/*
922 	 * chunk is open, we scan sequentially to update the write pointer.
923 	 * We make the assumption that targets write data across all planes
924 	 * before moving to the next page.
925 	 */
926 	for (pg = 0; pg < geo->num_pg; pg++) {
927 		for (pl = 0; pl < geo->num_pln; pl++) {
928 			ppa.g.pg = pg;
929 			ppa.g.pl = pl;
930 
931 			ret = nvm_bb_chunk_sense(dev, ppa);
932 			if (ret < 0) /* io error */
933 				return ret;
934 			else if (ret == 0) {
935 				meta->wp += geo->ws_min;
936 			} else if (ret > 0) {
937 				switch (ret) {
938 				case NVM_RSP_ERR_EMPTYPAGE:
939 					return 0;
940 				case NVM_RSP_ERR_FAILCRC:
941 				case NVM_RSP_ERR_FAILECC:
942 				case NVM_RSP_WARN_HIGHECC:
943 					meta->wp += geo->ws_min;
944 					break;
945 				default:
946 					return -ret; /* other io error */
947 				}
948 			}
949 		}
950 	}
951 
952 	return 0;
953 }
954 
955 /*
956  * folds a bad block list from its plane representation to its
957  * chunk representation.
958  *
959  * If any of the planes status are bad or grown bad, the chunk is marked
960  * offline. If not bad, the first plane state acts as the chunk state.
961  */
nvm_bb_to_chunk(struct nvm_dev * dev,struct ppa_addr ppa,u8 * blks,int nr_blks,struct nvm_chk_meta * meta)962 static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
963 			   u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
964 {
965 	struct nvm_geo *geo = &dev->geo;
966 	int ret, blk, pl, offset, blktype;
967 
968 	for (blk = 0; blk < geo->num_chk; blk++) {
969 		offset = blk * geo->pln_mode;
970 		blktype = blks[offset];
971 
972 		for (pl = 0; pl < geo->pln_mode; pl++) {
973 			if (blks[offset + pl] &
974 					(NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
975 				blktype = blks[offset + pl];
976 				break;
977 			}
978 		}
979 
980 		ppa.g.blk = blk;
981 
982 		meta->wp = 0;
983 		meta->type = NVM_CHK_TP_W_SEQ;
984 		meta->wi = 0;
985 		meta->slba = generic_to_dev_addr(dev, ppa).ppa;
986 		meta->cnlb = dev->geo.clba;
987 
988 		if (blktype == NVM_BLK_T_FREE) {
989 			ret = nvm_bb_chunk_scan(dev, ppa, meta);
990 			if (ret)
991 				return ret;
992 		} else {
993 			meta->state = NVM_CHK_ST_OFFLINE;
994 		}
995 
996 		meta++;
997 	}
998 
999 	return 0;
1000 }
1001 
nvm_get_bb_meta(struct nvm_dev * dev,sector_t slba,int nchks,struct nvm_chk_meta * meta)1002 static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
1003 			   int nchks, struct nvm_chk_meta *meta)
1004 {
1005 	struct nvm_geo *geo = &dev->geo;
1006 	struct ppa_addr ppa;
1007 	u8 *blks;
1008 	int ch, lun, nr_blks;
1009 	int ret = 0;
1010 
1011 	ppa.ppa = slba;
1012 	ppa = dev_to_generic_addr(dev, ppa);
1013 
1014 	if (ppa.g.blk != 0)
1015 		return -EINVAL;
1016 
1017 	if ((nchks % geo->num_chk) != 0)
1018 		return -EINVAL;
1019 
1020 	nr_blks = geo->num_chk * geo->pln_mode;
1021 
1022 	blks = kmalloc(nr_blks, GFP_KERNEL);
1023 	if (!blks)
1024 		return -ENOMEM;
1025 
1026 	for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
1027 		for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
1028 			struct ppa_addr ppa_gen, ppa_dev;
1029 
1030 			if (!nchks)
1031 				goto done;
1032 
1033 			ppa_gen.ppa = 0;
1034 			ppa_gen.g.ch = ch;
1035 			ppa_gen.g.lun = lun;
1036 			ppa_dev = generic_to_dev_addr(dev, ppa_gen);
1037 
1038 			ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
1039 			if (ret)
1040 				goto done;
1041 
1042 			ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
1043 									meta);
1044 			if (ret)
1045 				goto done;
1046 
1047 			meta += geo->num_chk;
1048 			nchks -= geo->num_chk;
1049 		}
1050 	}
1051 done:
1052 	kfree(blks);
1053 	return ret;
1054 }
1055 
nvm_get_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr ppa,int nchks,struct nvm_chk_meta * meta)1056 int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
1057 		       int nchks, struct nvm_chk_meta *meta)
1058 {
1059 	struct nvm_dev *dev = tgt_dev->parent;
1060 
1061 	nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
1062 
1063 	if (dev->geo.version == NVM_OCSSD_SPEC_12)
1064 		return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1065 
1066 	return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
1067 }
1068 EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
1069 
nvm_set_chunk_meta(struct nvm_tgt_dev * tgt_dev,struct ppa_addr * ppas,int nr_ppas,int type)1070 int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
1071 		       int nr_ppas, int type)
1072 {
1073 	struct nvm_dev *dev = tgt_dev->parent;
1074 	struct nvm_rq rqd;
1075 	int ret;
1076 
1077 	if (dev->geo.version == NVM_OCSSD_SPEC_20)
1078 		return 0;
1079 
1080 	if (nr_ppas > NVM_MAX_VLBA) {
1081 		pr_err("unable to update all blocks atomically\n");
1082 		return -EINVAL;
1083 	}
1084 
1085 	memset(&rqd, 0, sizeof(struct nvm_rq));
1086 
1087 	nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
1088 	nvm_rq_tgt_to_dev(tgt_dev, &rqd);
1089 
1090 	ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
1091 	nvm_free_rqd_ppalist(tgt_dev, &rqd);
1092 	if (ret)
1093 		return -EINVAL;
1094 
1095 	return 0;
1096 }
1097 EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
1098 
nvm_core_init(struct nvm_dev * dev)1099 static int nvm_core_init(struct nvm_dev *dev)
1100 {
1101 	struct nvm_geo *geo = &dev->geo;
1102 	int ret;
1103 
1104 	dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
1105 					sizeof(unsigned long), GFP_KERNEL);
1106 	if (!dev->lun_map)
1107 		return -ENOMEM;
1108 
1109 	INIT_LIST_HEAD(&dev->area_list);
1110 	INIT_LIST_HEAD(&dev->targets);
1111 	mutex_init(&dev->mlock);
1112 	spin_lock_init(&dev->lock);
1113 
1114 	ret = nvm_register_map(dev);
1115 	if (ret)
1116 		goto err_fmtype;
1117 
1118 	return 0;
1119 err_fmtype:
1120 	kfree(dev->lun_map);
1121 	return ret;
1122 }
1123 
nvm_free(struct kref * ref)1124 static void nvm_free(struct kref *ref)
1125 {
1126 	struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
1127 
1128 	if (dev->dma_pool)
1129 		dev->ops->destroy_dma_pool(dev->dma_pool);
1130 
1131 	if (dev->rmap)
1132 		nvm_unregister_map(dev);
1133 
1134 	kfree(dev->lun_map);
1135 	kfree(dev);
1136 }
1137 
nvm_init(struct nvm_dev * dev)1138 static int nvm_init(struct nvm_dev *dev)
1139 {
1140 	struct nvm_geo *geo = &dev->geo;
1141 	int ret = -EINVAL;
1142 
1143 	if (dev->ops->identity(dev)) {
1144 		pr_err("device could not be identified\n");
1145 		goto err;
1146 	}
1147 
1148 	pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
1149 			geo->minor_ver_id, geo->vmnt);
1150 
1151 	ret = nvm_core_init(dev);
1152 	if (ret) {
1153 		pr_err("could not initialize core structures.\n");
1154 		goto err;
1155 	}
1156 
1157 	pr_info("registered %s [%u/%u/%u/%u/%u]\n",
1158 			dev->name, dev->geo.ws_min, dev->geo.ws_opt,
1159 			dev->geo.num_chk, dev->geo.all_luns,
1160 			dev->geo.num_ch);
1161 	return 0;
1162 err:
1163 	pr_err("failed to initialize nvm\n");
1164 	return ret;
1165 }
1166 
nvm_alloc_dev(int node)1167 struct nvm_dev *nvm_alloc_dev(int node)
1168 {
1169 	struct nvm_dev *dev;
1170 
1171 	dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
1172 	if (dev)
1173 		kref_init(&dev->ref);
1174 
1175 	return dev;
1176 }
1177 EXPORT_SYMBOL(nvm_alloc_dev);
1178 
nvm_register(struct nvm_dev * dev)1179 int nvm_register(struct nvm_dev *dev)
1180 {
1181 	int ret, exp_pool_size;
1182 
1183 	if (!dev->q || !dev->ops) {
1184 		kref_put(&dev->ref, nvm_free);
1185 		return -EINVAL;
1186 	}
1187 
1188 	ret = nvm_init(dev);
1189 	if (ret) {
1190 		kref_put(&dev->ref, nvm_free);
1191 		return ret;
1192 	}
1193 
1194 	exp_pool_size = max_t(int, PAGE_SIZE,
1195 			      (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
1196 	exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
1197 
1198 	dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
1199 						  exp_pool_size);
1200 	if (!dev->dma_pool) {
1201 		pr_err("could not create dma pool\n");
1202 		kref_put(&dev->ref, nvm_free);
1203 		return -ENOMEM;
1204 	}
1205 
1206 	/* register device with a supported media manager */
1207 	down_write(&nvm_lock);
1208 	list_add(&dev->devices, &nvm_devices);
1209 	up_write(&nvm_lock);
1210 
1211 	return 0;
1212 }
1213 EXPORT_SYMBOL(nvm_register);
1214 
nvm_unregister(struct nvm_dev * dev)1215 void nvm_unregister(struct nvm_dev *dev)
1216 {
1217 	struct nvm_target *t, *tmp;
1218 
1219 	mutex_lock(&dev->mlock);
1220 	list_for_each_entry_safe(t, tmp, &dev->targets, list) {
1221 		if (t->dev->parent != dev)
1222 			continue;
1223 		__nvm_remove_target(t, false);
1224 		kref_put(&dev->ref, nvm_free);
1225 	}
1226 	mutex_unlock(&dev->mlock);
1227 
1228 	down_write(&nvm_lock);
1229 	list_del(&dev->devices);
1230 	up_write(&nvm_lock);
1231 
1232 	kref_put(&dev->ref, nvm_free);
1233 }
1234 EXPORT_SYMBOL(nvm_unregister);
1235 
__nvm_configure_create(struct nvm_ioctl_create * create)1236 static int __nvm_configure_create(struct nvm_ioctl_create *create)
1237 {
1238 	struct nvm_dev *dev;
1239 	int ret;
1240 
1241 	down_write(&nvm_lock);
1242 	dev = nvm_find_nvm_dev(create->dev);
1243 	up_write(&nvm_lock);
1244 
1245 	if (!dev) {
1246 		pr_err("device not found\n");
1247 		return -EINVAL;
1248 	}
1249 
1250 	kref_get(&dev->ref);
1251 	ret = nvm_create_tgt(dev, create);
1252 	if (ret)
1253 		kref_put(&dev->ref, nvm_free);
1254 
1255 	return ret;
1256 }
1257 
nvm_ioctl_info(struct file * file,void __user * arg)1258 static long nvm_ioctl_info(struct file *file, void __user *arg)
1259 {
1260 	struct nvm_ioctl_info *info;
1261 	struct nvm_tgt_type *tt;
1262 	int tgt_iter = 0;
1263 
1264 	info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
1265 	if (IS_ERR(info))
1266 		return -EFAULT;
1267 
1268 	info->version[0] = NVM_VERSION_MAJOR;
1269 	info->version[1] = NVM_VERSION_MINOR;
1270 	info->version[2] = NVM_VERSION_PATCH;
1271 
1272 	down_write(&nvm_tgtt_lock);
1273 	list_for_each_entry(tt, &nvm_tgt_types, list) {
1274 		struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
1275 
1276 		tgt->version[0] = tt->version[0];
1277 		tgt->version[1] = tt->version[1];
1278 		tgt->version[2] = tt->version[2];
1279 		strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
1280 
1281 		tgt_iter++;
1282 	}
1283 
1284 	info->tgtsize = tgt_iter;
1285 	up_write(&nvm_tgtt_lock);
1286 
1287 	if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
1288 		kfree(info);
1289 		return -EFAULT;
1290 	}
1291 
1292 	kfree(info);
1293 	return 0;
1294 }
1295 
nvm_ioctl_get_devices(struct file * file,void __user * arg)1296 static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
1297 {
1298 	struct nvm_ioctl_get_devices *devices;
1299 	struct nvm_dev *dev;
1300 	int i = 0;
1301 
1302 	devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
1303 	if (!devices)
1304 		return -ENOMEM;
1305 
1306 	down_write(&nvm_lock);
1307 	list_for_each_entry(dev, &nvm_devices, devices) {
1308 		struct nvm_ioctl_device_info *info = &devices->info[i];
1309 
1310 		strlcpy(info->devname, dev->name, sizeof(info->devname));
1311 
1312 		/* kept for compatibility */
1313 		info->bmversion[0] = 1;
1314 		info->bmversion[1] = 0;
1315 		info->bmversion[2] = 0;
1316 		strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
1317 		i++;
1318 
1319 		if (i > 31) {
1320 			pr_err("max 31 devices can be reported.\n");
1321 			break;
1322 		}
1323 	}
1324 	up_write(&nvm_lock);
1325 
1326 	devices->nr_devices = i;
1327 
1328 	if (copy_to_user(arg, devices,
1329 			 sizeof(struct nvm_ioctl_get_devices))) {
1330 		kfree(devices);
1331 		return -EFAULT;
1332 	}
1333 
1334 	kfree(devices);
1335 	return 0;
1336 }
1337 
nvm_ioctl_dev_create(struct file * file,void __user * arg)1338 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
1339 {
1340 	struct nvm_ioctl_create create;
1341 
1342 	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
1343 		return -EFAULT;
1344 
1345 	if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
1346 	    create.conf.e.rsv != 0) {
1347 		pr_err("reserved config field in use\n");
1348 		return -EINVAL;
1349 	}
1350 
1351 	create.dev[DISK_NAME_LEN - 1] = '\0';
1352 	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
1353 	create.tgtname[DISK_NAME_LEN - 1] = '\0';
1354 
1355 	if (create.flags != 0) {
1356 		__u32 flags = create.flags;
1357 
1358 		/* Check for valid flags */
1359 		if (flags & NVM_TARGET_FACTORY)
1360 			flags &= ~NVM_TARGET_FACTORY;
1361 
1362 		if (flags) {
1363 			pr_err("flag not supported\n");
1364 			return -EINVAL;
1365 		}
1366 	}
1367 
1368 	return __nvm_configure_create(&create);
1369 }
1370 
nvm_ioctl_dev_remove(struct file * file,void __user * arg)1371 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
1372 {
1373 	struct nvm_ioctl_remove remove;
1374 
1375 	if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
1376 		return -EFAULT;
1377 
1378 	remove.tgtname[DISK_NAME_LEN - 1] = '\0';
1379 
1380 	if (remove.flags != 0) {
1381 		pr_err("no flags supported\n");
1382 		return -EINVAL;
1383 	}
1384 
1385 	return nvm_remove_tgt(&remove);
1386 }
1387 
1388 /* kept for compatibility reasons */
nvm_ioctl_dev_init(struct file * file,void __user * arg)1389 static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
1390 {
1391 	struct nvm_ioctl_dev_init init;
1392 
1393 	if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
1394 		return -EFAULT;
1395 
1396 	if (init.flags != 0) {
1397 		pr_err("no flags supported\n");
1398 		return -EINVAL;
1399 	}
1400 
1401 	return 0;
1402 }
1403 
1404 /* Kept for compatibility reasons */
nvm_ioctl_dev_factory(struct file * file,void __user * arg)1405 static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1406 {
1407 	struct nvm_ioctl_dev_factory fact;
1408 
1409 	if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
1410 		return -EFAULT;
1411 
1412 	fact.dev[DISK_NAME_LEN - 1] = '\0';
1413 
1414 	if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
1415 		return -EINVAL;
1416 
1417 	return 0;
1418 }
1419 
nvm_ctl_ioctl(struct file * file,uint cmd,unsigned long arg)1420 static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
1421 {
1422 	void __user *argp = (void __user *)arg;
1423 
1424 	if (!capable(CAP_SYS_ADMIN))
1425 		return -EPERM;
1426 
1427 	switch (cmd) {
1428 	case NVM_INFO:
1429 		return nvm_ioctl_info(file, argp);
1430 	case NVM_GET_DEVICES:
1431 		return nvm_ioctl_get_devices(file, argp);
1432 	case NVM_DEV_CREATE:
1433 		return nvm_ioctl_dev_create(file, argp);
1434 	case NVM_DEV_REMOVE:
1435 		return nvm_ioctl_dev_remove(file, argp);
1436 	case NVM_DEV_INIT:
1437 		return nvm_ioctl_dev_init(file, argp);
1438 	case NVM_DEV_FACTORY:
1439 		return nvm_ioctl_dev_factory(file, argp);
1440 	}
1441 	return 0;
1442 }
1443 
1444 static const struct file_operations _ctl_fops = {
1445 	.open = nonseekable_open,
1446 	.unlocked_ioctl = nvm_ctl_ioctl,
1447 	.owner = THIS_MODULE,
1448 	.llseek  = noop_llseek,
1449 };
1450 
1451 static struct miscdevice _nvm_misc = {
1452 	.minor		= MISC_DYNAMIC_MINOR,
1453 	.name		= "lightnvm",
1454 	.nodename	= "lightnvm/control",
1455 	.fops		= &_ctl_fops,
1456 };
1457 builtin_misc_device(_nvm_misc);
1458