1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
4  * Copyright (C) 2014 Red Hat, Inc.
5  * Copyright (C) 2015 Arrikto, Inc.
6  * Copyright (C) 2017 Chinamobile, Inc.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/parser.h>
14 #include <linux/vmalloc.h>
15 #include <linux/uio_driver.h>
16 #include <linux/xarray.h>
17 #include <linux/stringify.h>
18 #include <linux/bitops.h>
19 #include <linux/highmem.h>
20 #include <linux/configfs.h>
21 #include <linux/mutex.h>
22 #include <linux/workqueue.h>
23 #include <net/genetlink.h>
24 #include <scsi/scsi_common.h>
25 #include <scsi/scsi_proto.h>
26 #include <target/target_core_base.h>
27 #include <target/target_core_fabric.h>
28 #include <target/target_core_backend.h>
29 
30 #include <linux/target_core_user.h>
31 
32 /**
33  * DOC: Userspace I/O
34  * Userspace I/O
35  * -------------
36  *
37  * Define a shared-memory interface for LIO to pass SCSI commands and
38  * data to userspace for processing. This is to allow backends that
39  * are too complex for in-kernel support to be possible.
40  *
41  * It uses the UIO framework to do a lot of the device-creation and
42  * introspection work for us.
43  *
44  * See the .h file for how the ring is laid out. Note that while the
45  * command ring is defined, the particulars of the data area are
46  * not. Offset values in the command entry point to other locations
47  * internal to the mmap-ed area. There is separate space outside the
48  * command ring for data buffers. This leaves maximum flexibility for
49  * moving buffer allocations, or even page flipping or other
50  * allocation techniques, without altering the command ring layout.
51  *
52  * SECURITY:
53  * The user process must be assumed to be malicious. There's no way to
54  * prevent it breaking the command ring protocol if it wants, but in
55  * order to prevent other issues we must only ever read *data* from
56  * the shared memory area, not offsets or sizes. This applies to
57  * command ring entries as well as the mailbox. Extra code needed for
58  * this may have a 'UAM' comment.
59  */
60 
61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
62 
63 /* For mailbox plus cmd ring, the size is fixed 8MB */
64 #define MB_CMDR_SIZE (8 * 1024 * 1024)
65 /* Offset of cmd ring is size of mailbox */
66 #define CMDR_OFF sizeof(struct tcmu_mailbox)
67 #define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF)
68 
69 /*
70  * For data area, the default block size is PAGE_SIZE and
71  * the default total size is 256K * PAGE_SIZE.
72  */
73 #define DATA_PAGES_PER_BLK_DEF 1
74 #define DATA_AREA_PAGES_DEF (256 * 1024)
75 
76 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
77 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
78 
79 /*
80  * Default number of global data blocks(512K * PAGE_SIZE)
81  * when the unmap thread will be started.
82  */
83 #define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024)
84 
85 static u8 tcmu_kern_cmd_reply_supported;
86 static u8 tcmu_netlink_blocked;
87 
88 static struct device *tcmu_root_device;
89 
90 struct tcmu_hba {
91 	u32 host_id;
92 };
93 
94 #define TCMU_CONFIG_LEN 256
95 
96 static DEFINE_MUTEX(tcmu_nl_cmd_mutex);
97 static LIST_HEAD(tcmu_nl_cmd_list);
98 
99 struct tcmu_dev;
100 
101 struct tcmu_nl_cmd {
102 	/* wake up thread waiting for reply */
103 	struct completion complete;
104 	struct list_head nl_list;
105 	struct tcmu_dev *udev;
106 	int cmd;
107 	int status;
108 };
109 
110 struct tcmu_dev {
111 	struct list_head node;
112 	struct kref kref;
113 
114 	struct se_device se_dev;
115 	struct se_dev_plug se_plug;
116 
117 	char *name;
118 	struct se_hba *hba;
119 
120 #define TCMU_DEV_BIT_OPEN 0
121 #define TCMU_DEV_BIT_BROKEN 1
122 #define TCMU_DEV_BIT_BLOCKED 2
123 #define TCMU_DEV_BIT_TMR_NOTIFY 3
124 #define TCMU_DEV_BIT_PLUGGED 4
125 	unsigned long flags;
126 
127 	struct uio_info uio_info;
128 
129 	struct inode *inode;
130 
131 	uint64_t dev_size;
132 
133 	struct tcmu_mailbox *mb_addr;
134 	void *cmdr;
135 	u32 cmdr_size;
136 	u32 cmdr_last_cleaned;
137 	/* Offset of data area from start of mb */
138 	/* Must add data_off and mb_addr to get the address */
139 	size_t data_off;
140 	int data_area_mb;
141 	uint32_t max_blocks;
142 	size_t mmap_pages;
143 
144 	struct mutex cmdr_lock;
145 	struct list_head qfull_queue;
146 	struct list_head tmr_queue;
147 
148 	uint32_t dbi_max;
149 	uint32_t dbi_thresh;
150 	unsigned long *data_bitmap;
151 	struct xarray data_pages;
152 	uint32_t data_pages_per_blk;
153 	uint32_t data_blk_size;
154 
155 	struct xarray commands;
156 
157 	struct timer_list cmd_timer;
158 	unsigned int cmd_time_out;
159 	struct list_head inflight_queue;
160 
161 	struct timer_list qfull_timer;
162 	int qfull_time_out;
163 
164 	struct list_head timedout_entry;
165 
166 	struct tcmu_nl_cmd curr_nl_cmd;
167 
168 	char dev_config[TCMU_CONFIG_LEN];
169 
170 	int nl_reply_supported;
171 };
172 
173 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
174 
175 struct tcmu_cmd {
176 	struct se_cmd *se_cmd;
177 	struct tcmu_dev *tcmu_dev;
178 	struct list_head queue_entry;
179 
180 	uint16_t cmd_id;
181 
182 	/* Can't use se_cmd when cleaning up expired cmds, because if
183 	   cmd has been completed then accessing se_cmd is off limits */
184 	uint32_t dbi_cnt;
185 	uint32_t dbi_bidi_cnt;
186 	uint32_t dbi_cur;
187 	uint32_t *dbi;
188 
189 	uint32_t data_len_bidi;
190 
191 	unsigned long deadline;
192 
193 #define TCMU_CMD_BIT_EXPIRED 0
194 #define TCMU_CMD_BIT_KEEP_BUF 1
195 	unsigned long flags;
196 };
197 
198 struct tcmu_tmr {
199 	struct list_head queue_entry;
200 
201 	uint8_t tmr_type;
202 	uint32_t tmr_cmd_cnt;
203 	int16_t tmr_cmd_ids[];
204 };
205 
206 /*
207  * To avoid dead lock the mutex lock order should always be:
208  *
209  * mutex_lock(&root_udev_mutex);
210  * ...
211  * mutex_lock(&tcmu_dev->cmdr_lock);
212  * mutex_unlock(&tcmu_dev->cmdr_lock);
213  * ...
214  * mutex_unlock(&root_udev_mutex);
215  */
216 static DEFINE_MUTEX(root_udev_mutex);
217 static LIST_HEAD(root_udev);
218 
219 static DEFINE_SPINLOCK(timed_out_udevs_lock);
220 static LIST_HEAD(timed_out_udevs);
221 
222 static struct kmem_cache *tcmu_cmd_cache;
223 
224 static atomic_t global_page_count = ATOMIC_INIT(0);
225 static struct delayed_work tcmu_unmap_work;
226 static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF;
227 
tcmu_set_global_max_data_area(const char * str,const struct kernel_param * kp)228 static int tcmu_set_global_max_data_area(const char *str,
229 					 const struct kernel_param *kp)
230 {
231 	int ret, max_area_mb;
232 
233 	ret = kstrtoint(str, 10, &max_area_mb);
234 	if (ret)
235 		return -EINVAL;
236 
237 	if (max_area_mb <= 0) {
238 		pr_err("global_max_data_area must be larger than 0.\n");
239 		return -EINVAL;
240 	}
241 
242 	tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb);
243 	if (atomic_read(&global_page_count) > tcmu_global_max_pages)
244 		schedule_delayed_work(&tcmu_unmap_work, 0);
245 	else
246 		cancel_delayed_work_sync(&tcmu_unmap_work);
247 
248 	return 0;
249 }
250 
tcmu_get_global_max_data_area(char * buffer,const struct kernel_param * kp)251 static int tcmu_get_global_max_data_area(char *buffer,
252 					 const struct kernel_param *kp)
253 {
254 	return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
255 }
256 
257 static const struct kernel_param_ops tcmu_global_max_data_area_op = {
258 	.set = tcmu_set_global_max_data_area,
259 	.get = tcmu_get_global_max_data_area,
260 };
261 
262 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL,
263 		S_IWUSR | S_IRUGO);
264 MODULE_PARM_DESC(global_max_data_area_mb,
265 		 "Max MBs allowed to be allocated to all the tcmu device's "
266 		 "data areas.");
267 
tcmu_get_block_netlink(char * buffer,const struct kernel_param * kp)268 static int tcmu_get_block_netlink(char *buffer,
269 				  const struct kernel_param *kp)
270 {
271 	return sprintf(buffer, "%s\n", tcmu_netlink_blocked ?
272 		       "blocked" : "unblocked");
273 }
274 
tcmu_set_block_netlink(const char * str,const struct kernel_param * kp)275 static int tcmu_set_block_netlink(const char *str,
276 				  const struct kernel_param *kp)
277 {
278 	int ret;
279 	u8 val;
280 
281 	ret = kstrtou8(str, 0, &val);
282 	if (ret < 0)
283 		return ret;
284 
285 	if (val > 1) {
286 		pr_err("Invalid block netlink value %u\n", val);
287 		return -EINVAL;
288 	}
289 
290 	tcmu_netlink_blocked = val;
291 	return 0;
292 }
293 
294 static const struct kernel_param_ops tcmu_block_netlink_op = {
295 	.set = tcmu_set_block_netlink,
296 	.get = tcmu_get_block_netlink,
297 };
298 
299 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO);
300 MODULE_PARM_DESC(block_netlink, "Block new netlink commands.");
301 
tcmu_fail_netlink_cmd(struct tcmu_nl_cmd * nl_cmd)302 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd)
303 {
304 	struct tcmu_dev *udev = nl_cmd->udev;
305 
306 	if (!tcmu_netlink_blocked) {
307 		pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n");
308 		return -EBUSY;
309 	}
310 
311 	if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
312 		pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name);
313 		nl_cmd->status = -EINTR;
314 		list_del(&nl_cmd->nl_list);
315 		complete(&nl_cmd->complete);
316 	}
317 	return 0;
318 }
319 
tcmu_set_reset_netlink(const char * str,const struct kernel_param * kp)320 static int tcmu_set_reset_netlink(const char *str,
321 				  const struct kernel_param *kp)
322 {
323 	struct tcmu_nl_cmd *nl_cmd, *tmp_cmd;
324 	int ret;
325 	u8 val;
326 
327 	ret = kstrtou8(str, 0, &val);
328 	if (ret < 0)
329 		return ret;
330 
331 	if (val != 1) {
332 		pr_err("Invalid reset netlink value %u\n", val);
333 		return -EINVAL;
334 	}
335 
336 	mutex_lock(&tcmu_nl_cmd_mutex);
337 	list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) {
338 		ret = tcmu_fail_netlink_cmd(nl_cmd);
339 		if (ret)
340 			break;
341 	}
342 	mutex_unlock(&tcmu_nl_cmd_mutex);
343 
344 	return ret;
345 }
346 
347 static const struct kernel_param_ops tcmu_reset_netlink_op = {
348 	.set = tcmu_set_reset_netlink,
349 };
350 
351 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR);
352 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands.");
353 
354 /* multicast group */
355 enum tcmu_multicast_groups {
356 	TCMU_MCGRP_CONFIG,
357 };
358 
359 static const struct genl_multicast_group tcmu_mcgrps[] = {
360 	[TCMU_MCGRP_CONFIG] = { .name = "config", },
361 };
362 
363 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = {
364 	[TCMU_ATTR_DEVICE]	= { .type = NLA_STRING },
365 	[TCMU_ATTR_MINOR]	= { .type = NLA_U32 },
366 	[TCMU_ATTR_CMD_STATUS]	= { .type = NLA_S32 },
367 	[TCMU_ATTR_DEVICE_ID]	= { .type = NLA_U32 },
368 	[TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 },
369 };
370 
tcmu_genl_cmd_done(struct genl_info * info,int completed_cmd)371 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd)
372 {
373 	struct tcmu_dev *udev = NULL;
374 	struct tcmu_nl_cmd *nl_cmd;
375 	int dev_id, rc, ret = 0;
376 
377 	if (!info->attrs[TCMU_ATTR_CMD_STATUS] ||
378 	    !info->attrs[TCMU_ATTR_DEVICE_ID]) {
379 		printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n");
380 		return -EINVAL;
381         }
382 
383 	dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]);
384 	rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]);
385 
386 	mutex_lock(&tcmu_nl_cmd_mutex);
387 	list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) {
388 		if (nl_cmd->udev->se_dev.dev_index == dev_id) {
389 			udev = nl_cmd->udev;
390 			break;
391 		}
392 	}
393 
394 	if (!udev) {
395 		pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n",
396 		       completed_cmd, rc, dev_id);
397 		ret = -ENODEV;
398 		goto unlock;
399 	}
400 	list_del(&nl_cmd->nl_list);
401 
402 	pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n",
403 		 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc,
404 		 nl_cmd->status);
405 
406 	if (nl_cmd->cmd != completed_cmd) {
407 		pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n",
408 		       udev->name, completed_cmd, nl_cmd->cmd);
409 		ret = -EINVAL;
410 		goto unlock;
411 	}
412 
413 	nl_cmd->status = rc;
414 	complete(&nl_cmd->complete);
415 unlock:
416 	mutex_unlock(&tcmu_nl_cmd_mutex);
417 	return ret;
418 }
419 
tcmu_genl_rm_dev_done(struct sk_buff * skb,struct genl_info * info)420 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info)
421 {
422 	return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE);
423 }
424 
tcmu_genl_add_dev_done(struct sk_buff * skb,struct genl_info * info)425 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info)
426 {
427 	return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE);
428 }
429 
tcmu_genl_reconfig_dev_done(struct sk_buff * skb,struct genl_info * info)430 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb,
431 				       struct genl_info *info)
432 {
433 	return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE);
434 }
435 
tcmu_genl_set_features(struct sk_buff * skb,struct genl_info * info)436 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
437 {
438 	if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) {
439 		tcmu_kern_cmd_reply_supported  =
440 			nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]);
441 		printk(KERN_INFO "tcmu daemon: command reply support %u.\n",
442 		       tcmu_kern_cmd_reply_supported);
443 	}
444 
445 	return 0;
446 }
447 
448 static const struct genl_small_ops tcmu_genl_ops[] = {
449 	{
450 		.cmd	= TCMU_CMD_SET_FEATURES,
451 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
452 		.flags	= GENL_ADMIN_PERM,
453 		.doit	= tcmu_genl_set_features,
454 	},
455 	{
456 		.cmd	= TCMU_CMD_ADDED_DEVICE_DONE,
457 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
458 		.flags	= GENL_ADMIN_PERM,
459 		.doit	= tcmu_genl_add_dev_done,
460 	},
461 	{
462 		.cmd	= TCMU_CMD_REMOVED_DEVICE_DONE,
463 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
464 		.flags	= GENL_ADMIN_PERM,
465 		.doit	= tcmu_genl_rm_dev_done,
466 	},
467 	{
468 		.cmd	= TCMU_CMD_RECONFIG_DEVICE_DONE,
469 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
470 		.flags	= GENL_ADMIN_PERM,
471 		.doit	= tcmu_genl_reconfig_dev_done,
472 	},
473 };
474 
475 /* Our generic netlink family */
476 static struct genl_family tcmu_genl_family __ro_after_init = {
477 	.module = THIS_MODULE,
478 	.hdrsize = 0,
479 	.name = "TCM-USER",
480 	.version = 2,
481 	.maxattr = TCMU_ATTR_MAX,
482 	.policy = tcmu_attr_policy,
483 	.mcgrps = tcmu_mcgrps,
484 	.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
485 	.netnsok = true,
486 	.small_ops = tcmu_genl_ops,
487 	.n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
488 };
489 
490 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
491 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0)
492 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
493 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
494 
tcmu_cmd_free_data(struct tcmu_cmd * tcmu_cmd,uint32_t len)495 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
496 {
497 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
498 	uint32_t i;
499 
500 	for (i = 0; i < len; i++)
501 		clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
502 }
503 
tcmu_get_empty_block(struct tcmu_dev * udev,struct tcmu_cmd * tcmu_cmd,int prev_dbi,int length,int * iov_cnt)504 static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
505 				       struct tcmu_cmd *tcmu_cmd,
506 				       int prev_dbi, int length, int *iov_cnt)
507 {
508 	XA_STATE(xas, &udev->data_pages, 0);
509 	struct page *page;
510 	int i, cnt, dbi, dpi;
511 	int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE);
512 
513 	dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
514 	if (dbi == udev->dbi_thresh)
515 		return -1;
516 
517 	dpi = dbi * udev->data_pages_per_blk;
518 	/* Count the number of already allocated pages */
519 	xas_set(&xas, dpi);
520 	rcu_read_lock();
521 	for (cnt = 0; xas_next(&xas) && cnt < page_cnt;)
522 		cnt++;
523 	rcu_read_unlock();
524 
525 	for (i = cnt; i < page_cnt; i++) {
526 		/* try to get new page from the mm */
527 		page = alloc_page(GFP_NOIO);
528 		if (!page)
529 			break;
530 
531 		if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) {
532 			__free_page(page);
533 			break;
534 		}
535 	}
536 	if (atomic_add_return(i - cnt, &global_page_count) >
537 			      tcmu_global_max_pages)
538 		schedule_delayed_work(&tcmu_unmap_work, 0);
539 
540 	if (i && dbi > udev->dbi_max)
541 		udev->dbi_max = dbi;
542 
543 	set_bit(dbi, udev->data_bitmap);
544 	tcmu_cmd_set_dbi(tcmu_cmd, dbi);
545 
546 	if (dbi != prev_dbi + 1)
547 		*iov_cnt += 1;
548 
549 	return i == page_cnt ? dbi : -1;
550 }
551 
tcmu_get_empty_blocks(struct tcmu_dev * udev,struct tcmu_cmd * tcmu_cmd,int length)552 static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
553 				 struct tcmu_cmd *tcmu_cmd, int length)
554 {
555 	/* start value of dbi + 1 must not be a valid dbi */
556 	int dbi = -2;
557 	int blk_data_len, iov_cnt = 0;
558 	uint32_t blk_size = udev->data_blk_size;
559 
560 	for (; length > 0; length -= blk_size) {
561 		blk_data_len = min_t(uint32_t, length, blk_size);
562 		dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len,
563 					   &iov_cnt);
564 		if (dbi < 0)
565 			return -1;
566 	}
567 	return iov_cnt;
568 }
569 
tcmu_free_cmd(struct tcmu_cmd * tcmu_cmd)570 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
571 {
572 	kfree(tcmu_cmd->dbi);
573 	kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
574 }
575 
tcmu_cmd_set_block_cnts(struct tcmu_cmd * cmd)576 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
577 {
578 	int i, len;
579 	struct se_cmd *se_cmd = cmd->se_cmd;
580 	uint32_t blk_size = cmd->tcmu_dev->data_blk_size;
581 
582 	cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size);
583 
584 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
585 		BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
586 		for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
587 			len += se_cmd->t_bidi_data_sg[i].length;
588 		cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size);
589 		cmd->dbi_cnt += cmd->dbi_bidi_cnt;
590 		cmd->data_len_bidi = len;
591 	}
592 }
593 
new_block_to_iov(struct tcmu_dev * udev,struct tcmu_cmd * cmd,struct iovec ** iov,int prev_dbi,int len)594 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
595 			    struct iovec **iov, int prev_dbi, int len)
596 {
597 	/* Get the next dbi */
598 	int dbi = tcmu_cmd_get_dbi(cmd);
599 
600 	/* Do not add more than udev->data_blk_size to iov */
601 	len = min_t(int,  len, udev->data_blk_size);
602 
603 	/*
604 	 * The following code will gather and map the blocks to the same iovec
605 	 * when the blocks are all next to each other.
606 	 */
607 	if (dbi != prev_dbi + 1) {
608 		/* dbi is not next to previous dbi, so start new iov */
609 		if (prev_dbi >= 0)
610 			(*iov)++;
611 		/* write offset relative to mb_addr */
612 		(*iov)->iov_base = (void __user *)
613 				   (udev->data_off + dbi * udev->data_blk_size);
614 	}
615 	(*iov)->iov_len += len;
616 
617 	return dbi;
618 }
619 
tcmu_setup_iovs(struct tcmu_dev * udev,struct tcmu_cmd * cmd,struct iovec ** iov,int data_length)620 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
621 			    struct iovec **iov, int data_length)
622 {
623 	/* start value of dbi + 1 must not be a valid dbi */
624 	int dbi = -2;
625 
626 	/* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
627 	for (; data_length > 0; data_length -= udev->data_blk_size)
628 		dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length);
629 }
630 
tcmu_alloc_cmd(struct se_cmd * se_cmd)631 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
632 {
633 	struct se_device *se_dev = se_cmd->se_dev;
634 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
635 	struct tcmu_cmd *tcmu_cmd;
636 
637 	tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO);
638 	if (!tcmu_cmd)
639 		return NULL;
640 
641 	INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
642 	tcmu_cmd->se_cmd = se_cmd;
643 	tcmu_cmd->tcmu_dev = udev;
644 
645 	tcmu_cmd_set_block_cnts(tcmu_cmd);
646 	tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
647 				GFP_NOIO);
648 	if (!tcmu_cmd->dbi) {
649 		kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
650 		return NULL;
651 	}
652 
653 	return tcmu_cmd;
654 }
655 
tcmu_flush_dcache_range(void * vaddr,size_t size)656 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
657 {
658 	unsigned long offset = offset_in_page(vaddr);
659 	void *start = vaddr - offset;
660 
661 	size = round_up(size+offset, PAGE_SIZE);
662 
663 	while (size) {
664 		flush_dcache_page(vmalloc_to_page(start));
665 		start += PAGE_SIZE;
666 		size -= PAGE_SIZE;
667 	}
668 }
669 
670 /*
671  * Some ring helper functions. We don't assume size is a power of 2 so
672  * we can't use circ_buf.h.
673  */
spc_used(size_t head,size_t tail,size_t size)674 static inline size_t spc_used(size_t head, size_t tail, size_t size)
675 {
676 	int diff = head - tail;
677 
678 	if (diff >= 0)
679 		return diff;
680 	else
681 		return size + diff;
682 }
683 
spc_free(size_t head,size_t tail,size_t size)684 static inline size_t spc_free(size_t head, size_t tail, size_t size)
685 {
686 	/* Keep 1 byte unused or we can't tell full from empty */
687 	return (size - spc_used(head, tail, size) - 1);
688 }
689 
head_to_end(size_t head,size_t size)690 static inline size_t head_to_end(size_t head, size_t size)
691 {
692 	return size - head;
693 }
694 
695 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
696 
697 #define TCMU_SG_TO_DATA_AREA 1
698 #define TCMU_DATA_AREA_TO_SG 2
699 
tcmu_copy_data(struct tcmu_dev * udev,struct tcmu_cmd * tcmu_cmd,uint32_t direction,struct scatterlist * sg,unsigned int sg_nents,struct iovec ** iov,size_t data_len)700 static inline void tcmu_copy_data(struct tcmu_dev *udev,
701 				  struct tcmu_cmd *tcmu_cmd, uint32_t direction,
702 				  struct scatterlist *sg, unsigned int sg_nents,
703 				  struct iovec **iov, size_t data_len)
704 {
705 	/* start value of dbi + 1 must not be a valid dbi */
706 	int dbi = -2;
707 	size_t page_remaining, cp_len;
708 	int page_cnt, page_inx, dpi;
709 	struct sg_mapping_iter sg_iter;
710 	unsigned int sg_flags;
711 	struct page *page;
712 	void *data_page_start, *data_addr;
713 
714 	if (direction == TCMU_SG_TO_DATA_AREA)
715 		sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG;
716 	else
717 		sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
718 	sg_miter_start(&sg_iter, sg, sg_nents, sg_flags);
719 
720 	while (data_len) {
721 		if (direction == TCMU_SG_TO_DATA_AREA)
722 			dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
723 					       data_len);
724 		else
725 			dbi = tcmu_cmd_get_dbi(tcmu_cmd);
726 
727 		page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE);
728 		if (page_cnt > udev->data_pages_per_blk)
729 			page_cnt = udev->data_pages_per_blk;
730 
731 		dpi = dbi * udev->data_pages_per_blk;
732 		for (page_inx = 0; page_inx < page_cnt && data_len;
733 		     page_inx++, dpi++) {
734 			page = xa_load(&udev->data_pages, dpi);
735 
736 			if (direction == TCMU_DATA_AREA_TO_SG)
737 				flush_dcache_page(page);
738 			data_page_start = kmap_atomic(page);
739 			page_remaining = PAGE_SIZE;
740 
741 			while (page_remaining && data_len) {
742 				if (!sg_miter_next(&sg_iter)) {
743 					/* set length to 0 to abort outer loop */
744 					data_len = 0;
745 					pr_debug("%s: aborting data copy due to exhausted sg_list\n",
746 						 __func__);
747 					break;
748 				}
749 				cp_len = min3(sg_iter.length, page_remaining,
750 					      data_len);
751 
752 				data_addr = data_page_start +
753 					    PAGE_SIZE - page_remaining;
754 				if (direction == TCMU_SG_TO_DATA_AREA)
755 					memcpy(data_addr, sg_iter.addr, cp_len);
756 				else
757 					memcpy(sg_iter.addr, data_addr, cp_len);
758 
759 				data_len -= cp_len;
760 				page_remaining -= cp_len;
761 				sg_iter.consumed = cp_len;
762 			}
763 			sg_miter_stop(&sg_iter);
764 
765 			kunmap_atomic(data_page_start);
766 			if (direction == TCMU_SG_TO_DATA_AREA)
767 				flush_dcache_page(page);
768 		}
769 	}
770 }
771 
scatter_data_area(struct tcmu_dev * udev,struct tcmu_cmd * tcmu_cmd,struct iovec ** iov)772 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
773 			      struct iovec **iov)
774 {
775 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
776 
777 	tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg,
778 		       se_cmd->t_data_nents, iov, se_cmd->data_length);
779 }
780 
gather_data_area(struct tcmu_dev * udev,struct tcmu_cmd * tcmu_cmd,bool bidi,uint32_t read_len)781 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
782 			     bool bidi, uint32_t read_len)
783 {
784 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
785 	struct scatterlist *data_sg;
786 	unsigned int data_nents;
787 
788 	if (!bidi) {
789 		data_sg = se_cmd->t_data_sg;
790 		data_nents = se_cmd->t_data_nents;
791 	} else {
792 		/*
793 		 * For bidi case, the first count blocks are for Data-Out
794 		 * buffer blocks, and before gathering the Data-In buffer
795 		 * the Data-Out buffer blocks should be skipped.
796 		 */
797 		tcmu_cmd_set_dbi_cur(tcmu_cmd,
798 				     tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt);
799 
800 		data_sg = se_cmd->t_bidi_data_sg;
801 		data_nents = se_cmd->t_bidi_data_nents;
802 	}
803 
804 	tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg,
805 		       data_nents, NULL, read_len);
806 }
807 
spc_bitmap_free(unsigned long * bitmap,uint32_t thresh)808 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
809 {
810 	return thresh - bitmap_weight(bitmap, thresh);
811 }
812 
813 /*
814  * We can't queue a command until we have space available on the cmd ring.
815  *
816  * Called with ring lock held.
817  */
is_ring_space_avail(struct tcmu_dev * udev,size_t cmd_size)818 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
819 {
820 	struct tcmu_mailbox *mb = udev->mb_addr;
821 	size_t space, cmd_needed;
822 	u32 cmd_head;
823 
824 	tcmu_flush_dcache_range(mb, sizeof(*mb));
825 
826 	cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
827 
828 	/*
829 	 * If cmd end-of-ring space is too small then we need space for a NOP plus
830 	 * original cmd - cmds are internally contiguous.
831 	 */
832 	if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
833 		cmd_needed = cmd_size;
834 	else
835 		cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
836 
837 	space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
838 	if (space < cmd_needed) {
839 		pr_debug("no cmd space: %u %u %u\n", cmd_head,
840 		       udev->cmdr_last_cleaned, udev->cmdr_size);
841 		return false;
842 	}
843 	return true;
844 }
845 
846 /*
847  * We have to allocate data buffers before we can queue a command.
848  * Returns -1 on error (not enough space) or number of needed iovs on success
849  *
850  * Called with ring lock held.
851  */
tcmu_alloc_data_space(struct tcmu_dev * udev,struct tcmu_cmd * cmd,int * iov_bidi_cnt)852 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
853 				  int *iov_bidi_cnt)
854 {
855 	int space, iov_cnt = 0, ret = 0;
856 
857 	if (!cmd->dbi_cnt)
858 		goto wr_iov_cnts;
859 
860 	/* try to check and get the data blocks as needed */
861 	space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
862 	if (space < cmd->dbi_cnt) {
863 		unsigned long blocks_left =
864 				(udev->max_blocks - udev->dbi_thresh) + space;
865 
866 		if (blocks_left < cmd->dbi_cnt) {
867 			pr_debug("no data space: only %lu available, but ask for %u\n",
868 					blocks_left * udev->data_blk_size,
869 					cmd->dbi_cnt * udev->data_blk_size);
870 			return -1;
871 		}
872 
873 		udev->dbi_thresh += cmd->dbi_cnt;
874 		if (udev->dbi_thresh > udev->max_blocks)
875 			udev->dbi_thresh = udev->max_blocks;
876 	}
877 
878 	iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length);
879 	if (iov_cnt < 0)
880 		return -1;
881 
882 	if (cmd->dbi_bidi_cnt) {
883 		ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi);
884 		if (ret < 0)
885 			return -1;
886 	}
887 wr_iov_cnts:
888 	*iov_bidi_cnt = ret;
889 	return iov_cnt + ret;
890 }
891 
tcmu_cmd_get_base_cmd_size(size_t iov_cnt)892 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
893 {
894 	return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]),
895 			sizeof(struct tcmu_cmd_entry));
896 }
897 
tcmu_cmd_get_cmd_size(struct tcmu_cmd * tcmu_cmd,size_t base_command_size)898 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd,
899 					   size_t base_command_size)
900 {
901 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
902 	size_t command_size;
903 
904 	command_size = base_command_size +
905 		round_up(scsi_command_size(se_cmd->t_task_cdb),
906 				TCMU_OP_ALIGN_SIZE);
907 
908 	WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
909 
910 	return command_size;
911 }
912 
tcmu_setup_cmd_timer(struct tcmu_cmd * tcmu_cmd,unsigned int tmo,struct timer_list * timer)913 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
914 				 struct timer_list *timer)
915 {
916 	if (!tmo)
917 		return;
918 
919 	tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
920 	if (!timer_pending(timer))
921 		mod_timer(timer, tcmu_cmd->deadline);
922 
923 	pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd,
924 		 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC);
925 }
926 
add_to_qfull_queue(struct tcmu_cmd * tcmu_cmd)927 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
928 {
929 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
930 	unsigned int tmo;
931 
932 	/*
933 	 * For backwards compat if qfull_time_out is not set use
934 	 * cmd_time_out and if that's not set use the default time out.
935 	 */
936 	if (!udev->qfull_time_out)
937 		return -ETIMEDOUT;
938 	else if (udev->qfull_time_out > 0)
939 		tmo = udev->qfull_time_out;
940 	else if (udev->cmd_time_out)
941 		tmo = udev->cmd_time_out;
942 	else
943 		tmo = TCMU_TIME_OUT;
944 
945 	tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer);
946 
947 	list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
948 	pr_debug("adding cmd %p on dev %s to ring space wait queue\n",
949 		 tcmu_cmd, udev->name);
950 	return 0;
951 }
952 
ring_insert_padding(struct tcmu_dev * udev,size_t cmd_size)953 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size)
954 {
955 	struct tcmu_cmd_entry_hdr *hdr;
956 	struct tcmu_mailbox *mb = udev->mb_addr;
957 	uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
958 
959 	/* Insert a PAD if end-of-ring space is too small */
960 	if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) {
961 		size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
962 
963 		hdr = udev->cmdr + cmd_head;
964 		tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD);
965 		tcmu_hdr_set_len(&hdr->len_op, pad_size);
966 		hdr->cmd_id = 0; /* not used for PAD */
967 		hdr->kflags = 0;
968 		hdr->uflags = 0;
969 		tcmu_flush_dcache_range(hdr, sizeof(*hdr));
970 
971 		UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
972 		tcmu_flush_dcache_range(mb, sizeof(*mb));
973 
974 		cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
975 		WARN_ON(cmd_head != 0);
976 	}
977 
978 	return cmd_head;
979 }
980 
tcmu_unplug_device(struct se_dev_plug * se_plug)981 static void tcmu_unplug_device(struct se_dev_plug *se_plug)
982 {
983 	struct se_device *se_dev = se_plug->se_dev;
984 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
985 
986 	clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags);
987 	uio_event_notify(&udev->uio_info);
988 }
989 
tcmu_plug_device(struct se_device * se_dev)990 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev)
991 {
992 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
993 
994 	if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
995 		return &udev->se_plug;
996 
997 	return NULL;
998 }
999 
1000 /**
1001  * queue_cmd_ring - queue cmd to ring or internally
1002  * @tcmu_cmd: cmd to queue
1003  * @scsi_err: TCM error code if failure (-1) returned.
1004  *
1005  * Returns:
1006  * -1 we cannot queue internally or to the ring.
1007  *  0 success
1008  *  1 internally queued to wait for ring memory to free.
1009  */
queue_cmd_ring(struct tcmu_cmd * tcmu_cmd,sense_reason_t * scsi_err)1010 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
1011 {
1012 	struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
1013 	struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
1014 	size_t base_command_size, command_size;
1015 	struct tcmu_mailbox *mb = udev->mb_addr;
1016 	struct tcmu_cmd_entry *entry;
1017 	struct iovec *iov;
1018 	int iov_cnt, iov_bidi_cnt;
1019 	uint32_t cmd_id, cmd_head;
1020 	uint64_t cdb_off;
1021 	uint32_t blk_size = udev->data_blk_size;
1022 	/* size of data buffer needed */
1023 	size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size;
1024 
1025 	*scsi_err = TCM_NO_SENSE;
1026 
1027 	if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) {
1028 		*scsi_err = TCM_LUN_BUSY;
1029 		return -1;
1030 	}
1031 
1032 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1033 		*scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1034 		return -1;
1035 	}
1036 
1037 	if (!list_empty(&udev->qfull_queue))
1038 		goto queue;
1039 
1040 	if (data_length > (size_t)udev->max_blocks * blk_size) {
1041 		pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
1042 			data_length, (size_t)udev->max_blocks * blk_size);
1043 		*scsi_err = TCM_INVALID_CDB_FIELD;
1044 		return -1;
1045 	}
1046 
1047 	iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
1048 	if (iov_cnt < 0)
1049 		goto free_and_queue;
1050 
1051 	/*
1052 	 * Must be a certain minimum size for response sense info, but
1053 	 * also may be larger if the iov array is large.
1054 	 */
1055 	base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
1056 	command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
1057 
1058 	if (command_size > (udev->cmdr_size / 2)) {
1059 		pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
1060 			command_size, udev->cmdr_size);
1061 		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1062 		*scsi_err = TCM_INVALID_CDB_FIELD;
1063 		return -1;
1064 	}
1065 
1066 	if (!is_ring_space_avail(udev, command_size))
1067 		/*
1068 		 * Don't leave commands partially setup because the unmap
1069 		 * thread might need the blocks to make forward progress.
1070 		 */
1071 		goto free_and_queue;
1072 
1073 	if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff),
1074 		     GFP_NOWAIT) < 0) {
1075 		pr_err("tcmu: Could not allocate cmd id.\n");
1076 
1077 		tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
1078 		*scsi_err = TCM_OUT_OF_RESOURCES;
1079 		return -1;
1080 	}
1081 	tcmu_cmd->cmd_id = cmd_id;
1082 
1083 	pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
1084 		 tcmu_cmd, udev->name);
1085 
1086 	cmd_head = ring_insert_padding(udev, command_size);
1087 
1088 	entry = udev->cmdr + cmd_head;
1089 	memset(entry, 0, command_size);
1090 	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
1091 
1092 	/* prepare iov list and copy data to data area if necessary */
1093 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1094 	iov = &entry->req.iov[0];
1095 
1096 	if (se_cmd->data_direction == DMA_TO_DEVICE ||
1097 	    se_cmd->se_cmd_flags & SCF_BIDI)
1098 		scatter_data_area(udev, tcmu_cmd, &iov);
1099 	else
1100 		tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
1101 
1102 	entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
1103 
1104 	/* Handle BIDI commands */
1105 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
1106 		iov++;
1107 		tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
1108 		entry->req.iov_bidi_cnt = iov_bidi_cnt;
1109 	}
1110 
1111 	tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
1112 
1113 	entry->hdr.cmd_id = tcmu_cmd->cmd_id;
1114 
1115 	tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
1116 
1117 	/* All offsets relative to mb_addr, not start of entry! */
1118 	cdb_off = CMDR_OFF + cmd_head + base_command_size;
1119 	memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
1120 	entry->req.cdb_off = cdb_off;
1121 	tcmu_flush_dcache_range(entry, command_size);
1122 
1123 	UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
1124 	tcmu_flush_dcache_range(mb, sizeof(*mb));
1125 
1126 	list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
1127 
1128 	if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags))
1129 		uio_event_notify(&udev->uio_info);
1130 
1131 	return 0;
1132 
1133 free_and_queue:
1134 	tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
1135 	tcmu_cmd_reset_dbi_cur(tcmu_cmd);
1136 
1137 queue:
1138 	if (add_to_qfull_queue(tcmu_cmd)) {
1139 		*scsi_err = TCM_OUT_OF_RESOURCES;
1140 		return -1;
1141 	}
1142 
1143 	return 1;
1144 }
1145 
1146 /**
1147  * queue_tmr_ring - queue tmr info to ring or internally
1148  * @udev: related tcmu_dev
1149  * @tmr: tcmu_tmr containing tmr info to queue
1150  *
1151  * Returns:
1152  *  0 success
1153  *  1 internally queued to wait for ring memory to free.
1154  */
1155 static int
queue_tmr_ring(struct tcmu_dev * udev,struct tcmu_tmr * tmr)1156 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
1157 {
1158 	struct tcmu_tmr_entry *entry;
1159 	int cmd_size;
1160 	int id_list_sz;
1161 	struct tcmu_mailbox *mb = udev->mb_addr;
1162 	uint32_t cmd_head;
1163 
1164 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
1165 		goto out_free;
1166 
1167 	id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt;
1168 	cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
1169 
1170 	if (!list_empty(&udev->tmr_queue) ||
1171 	    !is_ring_space_avail(udev, cmd_size)) {
1172 		list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
1173 		pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
1174 			 tmr, udev->name);
1175 		return 1;
1176 	}
1177 
1178 	cmd_head = ring_insert_padding(udev, cmd_size);
1179 
1180 	entry = udev->cmdr + cmd_head;
1181 	memset(entry, 0, cmd_size);
1182 	tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR);
1183 	tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size);
1184 	entry->tmr_type = tmr->tmr_type;
1185 	entry->cmd_cnt = tmr->tmr_cmd_cnt;
1186 	memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz);
1187 	tcmu_flush_dcache_range(entry, cmd_size);
1188 
1189 	UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size);
1190 	tcmu_flush_dcache_range(mb, sizeof(*mb));
1191 
1192 	uio_event_notify(&udev->uio_info);
1193 
1194 out_free:
1195 	kfree(tmr);
1196 
1197 	return 0;
1198 }
1199 
1200 static sense_reason_t
tcmu_queue_cmd(struct se_cmd * se_cmd)1201 tcmu_queue_cmd(struct se_cmd *se_cmd)
1202 {
1203 	struct se_device *se_dev = se_cmd->se_dev;
1204 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
1205 	struct tcmu_cmd *tcmu_cmd;
1206 	sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD;
1207 	int ret = -1;
1208 
1209 	tcmu_cmd = tcmu_alloc_cmd(se_cmd);
1210 	if (!tcmu_cmd)
1211 		return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1212 
1213 	mutex_lock(&udev->cmdr_lock);
1214 	if (!(se_cmd->transport_state & CMD_T_ABORTED))
1215 		ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1216 	if (ret < 0)
1217 		tcmu_free_cmd(tcmu_cmd);
1218 	else
1219 		se_cmd->priv = tcmu_cmd;
1220 	mutex_unlock(&udev->cmdr_lock);
1221 	return scsi_ret;
1222 }
1223 
tcmu_set_next_deadline(struct list_head * queue,struct timer_list * timer)1224 static void tcmu_set_next_deadline(struct list_head *queue,
1225 				   struct timer_list *timer)
1226 {
1227 	struct tcmu_cmd *cmd;
1228 
1229 	if (!list_empty(queue)) {
1230 		cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry);
1231 		mod_timer(timer, cmd->deadline);
1232 	} else
1233 		del_timer(timer);
1234 }
1235 
1236 static int
tcmu_tmr_type(enum tcm_tmreq_table tmf)1237 tcmu_tmr_type(enum tcm_tmreq_table tmf)
1238 {
1239 	switch (tmf) {
1240 	case TMR_ABORT_TASK:		return TCMU_TMR_ABORT_TASK;
1241 	case TMR_ABORT_TASK_SET:	return TCMU_TMR_ABORT_TASK_SET;
1242 	case TMR_CLEAR_ACA:		return TCMU_TMR_CLEAR_ACA;
1243 	case TMR_CLEAR_TASK_SET:	return TCMU_TMR_CLEAR_TASK_SET;
1244 	case TMR_LUN_RESET:		return TCMU_TMR_LUN_RESET;
1245 	case TMR_TARGET_WARM_RESET:	return TCMU_TMR_TARGET_WARM_RESET;
1246 	case TMR_TARGET_COLD_RESET:	return TCMU_TMR_TARGET_COLD_RESET;
1247 	case TMR_LUN_RESET_PRO:		return TCMU_TMR_LUN_RESET_PRO;
1248 	default:			return TCMU_TMR_UNKNOWN;
1249 	}
1250 }
1251 
1252 static void
tcmu_tmr_notify(struct se_device * se_dev,enum tcm_tmreq_table tmf,struct list_head * cmd_list)1253 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf,
1254 		struct list_head *cmd_list)
1255 {
1256 	int i = 0, cmd_cnt = 0;
1257 	bool unqueued = false;
1258 	uint16_t *cmd_ids = NULL;
1259 	struct tcmu_cmd *cmd;
1260 	struct se_cmd *se_cmd;
1261 	struct tcmu_tmr *tmr;
1262 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
1263 
1264 	mutex_lock(&udev->cmdr_lock);
1265 
1266 	/* First we check for aborted commands in qfull_queue */
1267 	list_for_each_entry(se_cmd, cmd_list, state_list) {
1268 		i++;
1269 		if (!se_cmd->priv)
1270 			continue;
1271 		cmd = se_cmd->priv;
1272 		/* Commands on qfull queue have no id yet */
1273 		if (cmd->cmd_id) {
1274 			cmd_cnt++;
1275 			continue;
1276 		}
1277 		pr_debug("Removing aborted command %p from queue on dev %s.\n",
1278 			 cmd, udev->name);
1279 
1280 		list_del_init(&cmd->queue_entry);
1281 		tcmu_free_cmd(cmd);
1282 		se_cmd->priv = NULL;
1283 		target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED);
1284 		unqueued = true;
1285 	}
1286 	if (unqueued)
1287 		tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1288 
1289 	if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags))
1290 		goto unlock;
1291 
1292 	pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n",
1293 		 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt);
1294 
1295 	tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO);
1296 	if (!tmr)
1297 		goto unlock;
1298 
1299 	tmr->tmr_type = tcmu_tmr_type(tmf);
1300 	tmr->tmr_cmd_cnt = cmd_cnt;
1301 
1302 	if (cmd_cnt != 0) {
1303 		cmd_cnt = 0;
1304 		list_for_each_entry(se_cmd, cmd_list, state_list) {
1305 			if (!se_cmd->priv)
1306 				continue;
1307 			cmd = se_cmd->priv;
1308 			if (cmd->cmd_id)
1309 				tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id;
1310 		}
1311 	}
1312 
1313 	queue_tmr_ring(udev, tmr);
1314 
1315 unlock:
1316 	mutex_unlock(&udev->cmdr_lock);
1317 }
1318 
tcmu_handle_completion(struct tcmu_cmd * cmd,struct tcmu_cmd_entry * entry,bool keep_buf)1319 static bool tcmu_handle_completion(struct tcmu_cmd *cmd,
1320 				   struct tcmu_cmd_entry *entry, bool keep_buf)
1321 {
1322 	struct se_cmd *se_cmd = cmd->se_cmd;
1323 	struct tcmu_dev *udev = cmd->tcmu_dev;
1324 	bool read_len_valid = false;
1325 	bool ret = true;
1326 	uint32_t read_len;
1327 
1328 	/*
1329 	 * cmd has been completed already from timeout, just reclaim
1330 	 * data area space and free cmd
1331 	 */
1332 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
1333 		WARN_ON_ONCE(se_cmd);
1334 		goto out;
1335 	}
1336 	if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
1337 		pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n",
1338 		       entry->hdr.cmd_id);
1339 		set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1340 		ret = false;
1341 		goto out;
1342 	}
1343 
1344 	list_del_init(&cmd->queue_entry);
1345 
1346 	tcmu_cmd_reset_dbi_cur(cmd);
1347 
1348 	if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
1349 		pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
1350 			cmd->se_cmd);
1351 		entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
1352 		goto done;
1353 	}
1354 
1355 	read_len = se_cmd->data_length;
1356 	if (se_cmd->data_direction == DMA_FROM_DEVICE &&
1357 	    (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
1358 		read_len_valid = true;
1359 		if (entry->rsp.read_len < read_len)
1360 			read_len = entry->rsp.read_len;
1361 	}
1362 
1363 	if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
1364 		transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
1365 		if (!read_len_valid )
1366 			goto done;
1367 		else
1368 			se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
1369 	}
1370 	if (se_cmd->se_cmd_flags & SCF_BIDI) {
1371 		/* Get Data-In buffer before clean up */
1372 		gather_data_area(udev, cmd, true, read_len);
1373 	} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
1374 		gather_data_area(udev, cmd, false, read_len);
1375 	} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
1376 		/* TODO: */
1377 	} else if (se_cmd->data_direction != DMA_NONE) {
1378 		pr_warn("TCMU: data direction was %d!\n",
1379 			se_cmd->data_direction);
1380 	}
1381 
1382 done:
1383 	se_cmd->priv = NULL;
1384 	if (read_len_valid) {
1385 		pr_debug("read_len = %d\n", read_len);
1386 		target_complete_cmd_with_length(cmd->se_cmd,
1387 					entry->rsp.scsi_status, read_len);
1388 	} else
1389 		target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
1390 
1391 out:
1392 	if (!keep_buf) {
1393 		tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1394 		tcmu_free_cmd(cmd);
1395 	} else {
1396 		/*
1397 		 * Keep this command after completion, since userspace still
1398 		 * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF
1399 		 * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept
1400 		 * a second completion later.
1401 		 * Userspace can free the buffer later by writing the cmd_id
1402 		 * to new action attribute free_kept_buf.
1403 		 */
1404 		clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1405 		set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags);
1406 	}
1407 	return ret;
1408 }
1409 
tcmu_run_tmr_queue(struct tcmu_dev * udev)1410 static int tcmu_run_tmr_queue(struct tcmu_dev *udev)
1411 {
1412 	struct tcmu_tmr *tmr, *tmp;
1413 	LIST_HEAD(tmrs);
1414 
1415 	if (list_empty(&udev->tmr_queue))
1416 		return 1;
1417 
1418 	pr_debug("running %s's tmr queue\n", udev->name);
1419 
1420 	list_splice_init(&udev->tmr_queue, &tmrs);
1421 
1422 	list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) {
1423 		list_del_init(&tmr->queue_entry);
1424 
1425 		pr_debug("removing tmr %p on dev %s from queue\n",
1426 			 tmr, udev->name);
1427 
1428 		if (queue_tmr_ring(udev, tmr)) {
1429 			pr_debug("ran out of space during tmr queue run\n");
1430 			/*
1431 			 * tmr was requeued, so just put all tmrs back in
1432 			 * the queue
1433 			 */
1434 			list_splice_tail(&tmrs, &udev->tmr_queue);
1435 			return 0;
1436 		}
1437 	}
1438 
1439 	return 1;
1440 }
1441 
tcmu_handle_completions(struct tcmu_dev * udev)1442 static bool tcmu_handle_completions(struct tcmu_dev *udev)
1443 {
1444 	struct tcmu_mailbox *mb;
1445 	struct tcmu_cmd *cmd;
1446 	bool free_space = false;
1447 
1448 	if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
1449 		pr_err("ring broken, not handling completions\n");
1450 		return false;
1451 	}
1452 
1453 	mb = udev->mb_addr;
1454 	tcmu_flush_dcache_range(mb, sizeof(*mb));
1455 
1456 	while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
1457 
1458 		struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned;
1459 		bool keep_buf;
1460 
1461 		/*
1462 		 * Flush max. up to end of cmd ring since current entry might
1463 		 * be a padding that is shorter than sizeof(*entry)
1464 		 */
1465 		size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
1466 					       udev->cmdr_size);
1467 		tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
1468 					ring_left : sizeof(*entry));
1469 
1470 		free_space = true;
1471 
1472 		if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD ||
1473 		    tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) {
1474 			UPDATE_HEAD(udev->cmdr_last_cleaned,
1475 				    tcmu_hdr_get_len(entry->hdr.len_op),
1476 				    udev->cmdr_size);
1477 			continue;
1478 		}
1479 		WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD);
1480 
1481 		keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF);
1482 		if (keep_buf)
1483 			cmd = xa_load(&udev->commands, entry->hdr.cmd_id);
1484 		else
1485 			cmd = xa_erase(&udev->commands, entry->hdr.cmd_id);
1486 		if (!cmd) {
1487 			pr_err("cmd_id %u not found, ring is broken\n",
1488 			       entry->hdr.cmd_id);
1489 			set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
1490 			return false;
1491 		}
1492 
1493 		if (!tcmu_handle_completion(cmd, entry, keep_buf))
1494 			break;
1495 
1496 		UPDATE_HEAD(udev->cmdr_last_cleaned,
1497 			    tcmu_hdr_get_len(entry->hdr.len_op),
1498 			    udev->cmdr_size);
1499 	}
1500 	if (free_space)
1501 		free_space = tcmu_run_tmr_queue(udev);
1502 
1503 	if (atomic_read(&global_page_count) > tcmu_global_max_pages &&
1504 	    xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) {
1505 		/*
1506 		 * Allocated blocks exceeded global block limit, currently no
1507 		 * more pending or waiting commands so try to reclaim blocks.
1508 		 */
1509 		schedule_delayed_work(&tcmu_unmap_work, 0);
1510 	}
1511 	if (udev->cmd_time_out)
1512 		tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
1513 
1514 	return free_space;
1515 }
1516 
tcmu_check_expired_ring_cmd(struct tcmu_cmd * cmd)1517 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd)
1518 {
1519 	struct se_cmd *se_cmd;
1520 
1521 	if (!time_after_eq(jiffies, cmd->deadline))
1522 		return;
1523 
1524 	set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
1525 	list_del_init(&cmd->queue_entry);
1526 	se_cmd = cmd->se_cmd;
1527 	se_cmd->priv = NULL;
1528 	cmd->se_cmd = NULL;
1529 
1530 	pr_debug("Timing out inflight cmd %u on dev %s.\n",
1531 		 cmd->cmd_id, cmd->tcmu_dev->name);
1532 
1533 	target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION);
1534 }
1535 
tcmu_check_expired_queue_cmd(struct tcmu_cmd * cmd)1536 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd)
1537 {
1538 	struct se_cmd *se_cmd;
1539 
1540 	if (!time_after_eq(jiffies, cmd->deadline))
1541 		return;
1542 
1543 	pr_debug("Timing out queued cmd %p on dev %s.\n",
1544 		  cmd, cmd->tcmu_dev->name);
1545 
1546 	list_del_init(&cmd->queue_entry);
1547 	se_cmd = cmd->se_cmd;
1548 	tcmu_free_cmd(cmd);
1549 
1550 	se_cmd->priv = NULL;
1551 	target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL);
1552 }
1553 
tcmu_device_timedout(struct tcmu_dev * udev)1554 static void tcmu_device_timedout(struct tcmu_dev *udev)
1555 {
1556 	spin_lock(&timed_out_udevs_lock);
1557 	if (list_empty(&udev->timedout_entry))
1558 		list_add_tail(&udev->timedout_entry, &timed_out_udevs);
1559 	spin_unlock(&timed_out_udevs_lock);
1560 
1561 	schedule_delayed_work(&tcmu_unmap_work, 0);
1562 }
1563 
tcmu_cmd_timedout(struct timer_list * t)1564 static void tcmu_cmd_timedout(struct timer_list *t)
1565 {
1566 	struct tcmu_dev *udev = from_timer(udev, t, cmd_timer);
1567 
1568 	pr_debug("%s cmd timeout has expired\n", udev->name);
1569 	tcmu_device_timedout(udev);
1570 }
1571 
tcmu_qfull_timedout(struct timer_list * t)1572 static void tcmu_qfull_timedout(struct timer_list *t)
1573 {
1574 	struct tcmu_dev *udev = from_timer(udev, t, qfull_timer);
1575 
1576 	pr_debug("%s qfull timeout has expired\n", udev->name);
1577 	tcmu_device_timedout(udev);
1578 }
1579 
tcmu_attach_hba(struct se_hba * hba,u32 host_id)1580 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
1581 {
1582 	struct tcmu_hba *tcmu_hba;
1583 
1584 	tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
1585 	if (!tcmu_hba)
1586 		return -ENOMEM;
1587 
1588 	tcmu_hba->host_id = host_id;
1589 	hba->hba_ptr = tcmu_hba;
1590 
1591 	return 0;
1592 }
1593 
tcmu_detach_hba(struct se_hba * hba)1594 static void tcmu_detach_hba(struct se_hba *hba)
1595 {
1596 	kfree(hba->hba_ptr);
1597 	hba->hba_ptr = NULL;
1598 }
1599 
tcmu_alloc_device(struct se_hba * hba,const char * name)1600 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
1601 {
1602 	struct tcmu_dev *udev;
1603 
1604 	udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
1605 	if (!udev)
1606 		return NULL;
1607 	kref_init(&udev->kref);
1608 
1609 	udev->name = kstrdup(name, GFP_KERNEL);
1610 	if (!udev->name) {
1611 		kfree(udev);
1612 		return NULL;
1613 	}
1614 
1615 	udev->hba = hba;
1616 	udev->cmd_time_out = TCMU_TIME_OUT;
1617 	udev->qfull_time_out = -1;
1618 
1619 	udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF;
1620 	udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk;
1621 	udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF);
1622 
1623 	mutex_init(&udev->cmdr_lock);
1624 
1625 	INIT_LIST_HEAD(&udev->node);
1626 	INIT_LIST_HEAD(&udev->timedout_entry);
1627 	INIT_LIST_HEAD(&udev->qfull_queue);
1628 	INIT_LIST_HEAD(&udev->tmr_queue);
1629 	INIT_LIST_HEAD(&udev->inflight_queue);
1630 	xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1);
1631 
1632 	timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
1633 	timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0);
1634 
1635 	xa_init(&udev->data_pages);
1636 
1637 	return &udev->se_dev;
1638 }
1639 
tcmu_dev_call_rcu(struct rcu_head * p)1640 static void tcmu_dev_call_rcu(struct rcu_head *p)
1641 {
1642 	struct se_device *dev = container_of(p, struct se_device, rcu_head);
1643 	struct tcmu_dev *udev = TCMU_DEV(dev);
1644 
1645 	kfree(udev->uio_info.name);
1646 	kfree(udev->name);
1647 	kfree(udev);
1648 }
1649 
tcmu_check_and_free_pending_cmd(struct tcmu_cmd * cmd)1650 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
1651 {
1652 	if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ||
1653 	    test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
1654 		kmem_cache_free(tcmu_cmd_cache, cmd);
1655 		return 0;
1656 	}
1657 	return -EINVAL;
1658 }
1659 
tcmu_blocks_release(struct tcmu_dev * udev,unsigned long first,unsigned long last)1660 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first,
1661 				unsigned long last)
1662 {
1663 	XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk);
1664 	struct page *page;
1665 	u32 pages_freed = 0;
1666 
1667 	xas_lock(&xas);
1668 	xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) {
1669 		xas_store(&xas, NULL);
1670 		__free_page(page);
1671 		pages_freed++;
1672 	}
1673 	xas_unlock(&xas);
1674 
1675 	atomic_sub(pages_freed, &global_page_count);
1676 
1677 	return pages_freed;
1678 }
1679 
tcmu_remove_all_queued_tmr(struct tcmu_dev * udev)1680 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
1681 {
1682 	struct tcmu_tmr *tmr, *tmp;
1683 
1684 	list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
1685 		list_del_init(&tmr->queue_entry);
1686 		kfree(tmr);
1687 	}
1688 }
1689 
tcmu_dev_kref_release(struct kref * kref)1690 static void tcmu_dev_kref_release(struct kref *kref)
1691 {
1692 	struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
1693 	struct se_device *dev = &udev->se_dev;
1694 	struct tcmu_cmd *cmd;
1695 	bool all_expired = true;
1696 	unsigned long i;
1697 
1698 	vfree(udev->mb_addr);
1699 	udev->mb_addr = NULL;
1700 
1701 	spin_lock_bh(&timed_out_udevs_lock);
1702 	if (!list_empty(&udev->timedout_entry))
1703 		list_del(&udev->timedout_entry);
1704 	spin_unlock_bh(&timed_out_udevs_lock);
1705 
1706 	/* Upper layer should drain all requests before calling this */
1707 	mutex_lock(&udev->cmdr_lock);
1708 	xa_for_each(&udev->commands, i, cmd) {
1709 		if (tcmu_check_and_free_pending_cmd(cmd) != 0)
1710 			all_expired = false;
1711 	}
1712 	/* There can be left over TMR cmds. Remove them. */
1713 	tcmu_remove_all_queued_tmr(udev);
1714 	if (!list_empty(&udev->qfull_queue))
1715 		all_expired = false;
1716 	xa_destroy(&udev->commands);
1717 	WARN_ON(!all_expired);
1718 
1719 	tcmu_blocks_release(udev, 0, udev->dbi_max);
1720 	bitmap_free(udev->data_bitmap);
1721 	mutex_unlock(&udev->cmdr_lock);
1722 
1723 	pr_debug("dev_kref_release\n");
1724 
1725 	call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
1726 }
1727 
run_qfull_queue(struct tcmu_dev * udev,bool fail)1728 static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
1729 {
1730 	struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
1731 	LIST_HEAD(cmds);
1732 	sense_reason_t scsi_ret;
1733 	int ret;
1734 
1735 	if (list_empty(&udev->qfull_queue))
1736 		return;
1737 
1738 	pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
1739 
1740 	list_splice_init(&udev->qfull_queue, &cmds);
1741 
1742 	list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
1743 		list_del_init(&tcmu_cmd->queue_entry);
1744 
1745 		pr_debug("removing cmd %p on dev %s from queue\n",
1746 			 tcmu_cmd, udev->name);
1747 
1748 		if (fail) {
1749 			/*
1750 			 * We were not able to even start the command, so
1751 			 * fail with busy to allow a retry in case runner
1752 			 * was only temporarily down. If the device is being
1753 			 * removed then LIO core will do the right thing and
1754 			 * fail the retry.
1755 			 */
1756 			tcmu_cmd->se_cmd->priv = NULL;
1757 			target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY);
1758 			tcmu_free_cmd(tcmu_cmd);
1759 			continue;
1760 		}
1761 
1762 		ret = queue_cmd_ring(tcmu_cmd, &scsi_ret);
1763 		if (ret < 0) {
1764 			pr_debug("cmd %p on dev %s failed with %u\n",
1765 				 tcmu_cmd, udev->name, scsi_ret);
1766 			/*
1767 			 * Ignore scsi_ret for now. target_complete_cmd
1768 			 * drops it.
1769 			 */
1770 			tcmu_cmd->se_cmd->priv = NULL;
1771 			target_complete_cmd(tcmu_cmd->se_cmd,
1772 					    SAM_STAT_CHECK_CONDITION);
1773 			tcmu_free_cmd(tcmu_cmd);
1774 		} else if (ret > 0) {
1775 			pr_debug("ran out of space during cmdr queue run\n");
1776 			/*
1777 			 * cmd was requeued, so just put all cmds back in
1778 			 * the queue
1779 			 */
1780 			list_splice_tail(&cmds, &udev->qfull_queue);
1781 			break;
1782 		}
1783 	}
1784 
1785 	tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
1786 }
1787 
tcmu_irqcontrol(struct uio_info * info,s32 irq_on)1788 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
1789 {
1790 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1791 
1792 	mutex_lock(&udev->cmdr_lock);
1793 	if (tcmu_handle_completions(udev))
1794 		run_qfull_queue(udev, false);
1795 	mutex_unlock(&udev->cmdr_lock);
1796 
1797 	return 0;
1798 }
1799 
1800 /*
1801  * mmap code from uio.c. Copied here because we want to hook mmap()
1802  * and this stuff must come along.
1803  */
tcmu_find_mem_index(struct vm_area_struct * vma)1804 static int tcmu_find_mem_index(struct vm_area_struct *vma)
1805 {
1806 	struct tcmu_dev *udev = vma->vm_private_data;
1807 	struct uio_info *info = &udev->uio_info;
1808 
1809 	if (vma->vm_pgoff < MAX_UIO_MAPS) {
1810 		if (info->mem[vma->vm_pgoff].size == 0)
1811 			return -1;
1812 		return (int)vma->vm_pgoff;
1813 	}
1814 	return -1;
1815 }
1816 
tcmu_try_get_data_page(struct tcmu_dev * udev,uint32_t dpi)1817 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi)
1818 {
1819 	struct page *page;
1820 
1821 	mutex_lock(&udev->cmdr_lock);
1822 	page = xa_load(&udev->data_pages, dpi);
1823 	if (likely(page)) {
1824 		mutex_unlock(&udev->cmdr_lock);
1825 		return page;
1826 	}
1827 
1828 	/*
1829 	 * Userspace messed up and passed in a address not in the
1830 	 * data iov passed to it.
1831 	 */
1832 	pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n",
1833 	       dpi, udev->name);
1834 	mutex_unlock(&udev->cmdr_lock);
1835 
1836 	return NULL;
1837 }
1838 
tcmu_vma_open(struct vm_area_struct * vma)1839 static void tcmu_vma_open(struct vm_area_struct *vma)
1840 {
1841 	struct tcmu_dev *udev = vma->vm_private_data;
1842 
1843 	pr_debug("vma_open\n");
1844 
1845 	kref_get(&udev->kref);
1846 }
1847 
tcmu_vma_close(struct vm_area_struct * vma)1848 static void tcmu_vma_close(struct vm_area_struct *vma)
1849 {
1850 	struct tcmu_dev *udev = vma->vm_private_data;
1851 
1852 	pr_debug("vma_close\n");
1853 
1854 	/* release ref from tcmu_vma_open */
1855 	kref_put(&udev->kref, tcmu_dev_kref_release);
1856 }
1857 
tcmu_vma_fault(struct vm_fault * vmf)1858 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
1859 {
1860 	struct tcmu_dev *udev = vmf->vma->vm_private_data;
1861 	struct uio_info *info = &udev->uio_info;
1862 	struct page *page;
1863 	unsigned long offset;
1864 	void *addr;
1865 
1866 	int mi = tcmu_find_mem_index(vmf->vma);
1867 	if (mi < 0)
1868 		return VM_FAULT_SIGBUS;
1869 
1870 	/*
1871 	 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
1872 	 * to use mem[N].
1873 	 */
1874 	offset = (vmf->pgoff - mi) << PAGE_SHIFT;
1875 
1876 	if (offset < udev->data_off) {
1877 		/* For the vmalloc()ed cmd area pages */
1878 		addr = (void *)(unsigned long)info->mem[mi].addr + offset;
1879 		page = vmalloc_to_page(addr);
1880 	} else {
1881 		uint32_t dpi;
1882 
1883 		/* For the dynamically growing data area pages */
1884 		dpi = (offset - udev->data_off) / PAGE_SIZE;
1885 		page = tcmu_try_get_data_page(udev, dpi);
1886 		if (!page)
1887 			return VM_FAULT_SIGBUS;
1888 	}
1889 
1890 	get_page(page);
1891 	vmf->page = page;
1892 	return 0;
1893 }
1894 
1895 static const struct vm_operations_struct tcmu_vm_ops = {
1896 	.open = tcmu_vma_open,
1897 	.close = tcmu_vma_close,
1898 	.fault = tcmu_vma_fault,
1899 };
1900 
tcmu_mmap(struct uio_info * info,struct vm_area_struct * vma)1901 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
1902 {
1903 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1904 
1905 	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
1906 	vma->vm_ops = &tcmu_vm_ops;
1907 
1908 	vma->vm_private_data = udev;
1909 
1910 	/* Ensure the mmap is exactly the right size */
1911 	if (vma_pages(vma) != udev->mmap_pages)
1912 		return -EINVAL;
1913 
1914 	tcmu_vma_open(vma);
1915 
1916 	return 0;
1917 }
1918 
tcmu_open(struct uio_info * info,struct inode * inode)1919 static int tcmu_open(struct uio_info *info, struct inode *inode)
1920 {
1921 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1922 
1923 	/* O_EXCL not supported for char devs, so fake it? */
1924 	if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
1925 		return -EBUSY;
1926 
1927 	udev->inode = inode;
1928 
1929 	pr_debug("open\n");
1930 
1931 	return 0;
1932 }
1933 
tcmu_release(struct uio_info * info,struct inode * inode)1934 static int tcmu_release(struct uio_info *info, struct inode *inode)
1935 {
1936 	struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
1937 	struct tcmu_cmd *cmd;
1938 	unsigned long i;
1939 	bool freed = false;
1940 
1941 	mutex_lock(&udev->cmdr_lock);
1942 
1943 	xa_for_each(&udev->commands, i, cmd) {
1944 		/* Cmds with KEEP_BUF set are no longer on the ring, but
1945 		 * userspace still holds the data buffer. If userspace closes
1946 		 * we implicitly free these cmds and buffers, since after new
1947 		 * open the (new ?) userspace cannot find the cmd in the ring
1948 		 * and thus never will release the buffer by writing cmd_id to
1949 		 * free_kept_buf action attribute.
1950 		 */
1951 		if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags))
1952 			continue;
1953 		pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n",
1954 			 cmd->cmd_id, udev->name);
1955 		freed = true;
1956 
1957 		xa_erase(&udev->commands, i);
1958 		tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
1959 		tcmu_free_cmd(cmd);
1960 	}
1961 	/*
1962 	 * We only freed data space, not ring space. Therefore we dont call
1963 	 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
1964 	 */
1965 	if (freed && list_empty(&udev->tmr_queue))
1966 		run_qfull_queue(udev, false);
1967 
1968 	mutex_unlock(&udev->cmdr_lock);
1969 
1970 	clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
1971 
1972 	pr_debug("close\n");
1973 
1974 	return 0;
1975 }
1976 
tcmu_init_genl_cmd_reply(struct tcmu_dev * udev,int cmd)1977 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd)
1978 {
1979 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
1980 
1981 	if (!tcmu_kern_cmd_reply_supported)
1982 		return 0;
1983 
1984 	if (udev->nl_reply_supported <= 0)
1985 		return 0;
1986 
1987 	mutex_lock(&tcmu_nl_cmd_mutex);
1988 
1989 	if (tcmu_netlink_blocked) {
1990 		mutex_unlock(&tcmu_nl_cmd_mutex);
1991 		pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd,
1992 			udev->name);
1993 		return -EAGAIN;
1994 	}
1995 
1996 	if (nl_cmd->cmd != TCMU_CMD_UNSPEC) {
1997 		mutex_unlock(&tcmu_nl_cmd_mutex);
1998 		pr_warn("netlink cmd %d already executing on %s\n",
1999 			 nl_cmd->cmd, udev->name);
2000 		return -EBUSY;
2001 	}
2002 
2003 	memset(nl_cmd, 0, sizeof(*nl_cmd));
2004 	nl_cmd->cmd = cmd;
2005 	nl_cmd->udev = udev;
2006 	init_completion(&nl_cmd->complete);
2007 	INIT_LIST_HEAD(&nl_cmd->nl_list);
2008 
2009 	list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list);
2010 
2011 	mutex_unlock(&tcmu_nl_cmd_mutex);
2012 	return 0;
2013 }
2014 
tcmu_destroy_genl_cmd_reply(struct tcmu_dev * udev)2015 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev)
2016 {
2017 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2018 
2019 	if (!tcmu_kern_cmd_reply_supported)
2020 		return;
2021 
2022 	if (udev->nl_reply_supported <= 0)
2023 		return;
2024 
2025 	mutex_lock(&tcmu_nl_cmd_mutex);
2026 
2027 	list_del(&nl_cmd->nl_list);
2028 	memset(nl_cmd, 0, sizeof(*nl_cmd));
2029 
2030 	mutex_unlock(&tcmu_nl_cmd_mutex);
2031 }
2032 
tcmu_wait_genl_cmd_reply(struct tcmu_dev * udev)2033 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev)
2034 {
2035 	struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd;
2036 	int ret;
2037 
2038 	if (!tcmu_kern_cmd_reply_supported)
2039 		return 0;
2040 
2041 	if (udev->nl_reply_supported <= 0)
2042 		return 0;
2043 
2044 	pr_debug("sleeping for nl reply\n");
2045 	wait_for_completion(&nl_cmd->complete);
2046 
2047 	mutex_lock(&tcmu_nl_cmd_mutex);
2048 	nl_cmd->cmd = TCMU_CMD_UNSPEC;
2049 	ret = nl_cmd->status;
2050 	mutex_unlock(&tcmu_nl_cmd_mutex);
2051 
2052 	return ret;
2053 }
2054 
tcmu_netlink_event_init(struct tcmu_dev * udev,enum tcmu_genl_cmd cmd,struct sk_buff ** buf,void ** hdr)2055 static int tcmu_netlink_event_init(struct tcmu_dev *udev,
2056 				   enum tcmu_genl_cmd cmd,
2057 				   struct sk_buff **buf, void **hdr)
2058 {
2059 	struct sk_buff *skb;
2060 	void *msg_header;
2061 	int ret = -ENOMEM;
2062 
2063 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2064 	if (!skb)
2065 		return ret;
2066 
2067 	msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
2068 	if (!msg_header)
2069 		goto free_skb;
2070 
2071 	ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name);
2072 	if (ret < 0)
2073 		goto free_skb;
2074 
2075 	ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor);
2076 	if (ret < 0)
2077 		goto free_skb;
2078 
2079 	ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index);
2080 	if (ret < 0)
2081 		goto free_skb;
2082 
2083 	*buf = skb;
2084 	*hdr = msg_header;
2085 	return ret;
2086 
2087 free_skb:
2088 	nlmsg_free(skb);
2089 	return ret;
2090 }
2091 
tcmu_netlink_event_send(struct tcmu_dev * udev,enum tcmu_genl_cmd cmd,struct sk_buff * skb,void * msg_header)2092 static int tcmu_netlink_event_send(struct tcmu_dev *udev,
2093 				   enum tcmu_genl_cmd cmd,
2094 				   struct sk_buff *skb, void *msg_header)
2095 {
2096 	int ret;
2097 
2098 	genlmsg_end(skb, msg_header);
2099 
2100 	ret = tcmu_init_genl_cmd_reply(udev, cmd);
2101 	if (ret) {
2102 		nlmsg_free(skb);
2103 		return ret;
2104 	}
2105 
2106 	ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
2107 				      TCMU_MCGRP_CONFIG, GFP_KERNEL);
2108 
2109 	/* Wait during an add as the listener may not be up yet */
2110 	if (ret == 0 ||
2111 	   (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
2112 		return tcmu_wait_genl_cmd_reply(udev);
2113 	else
2114 		tcmu_destroy_genl_cmd_reply(udev);
2115 
2116 	return ret;
2117 }
2118 
tcmu_send_dev_add_event(struct tcmu_dev * udev)2119 static int tcmu_send_dev_add_event(struct tcmu_dev *udev)
2120 {
2121 	struct sk_buff *skb = NULL;
2122 	void *msg_header = NULL;
2123 	int ret = 0;
2124 
2125 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb,
2126 				      &msg_header);
2127 	if (ret < 0)
2128 		return ret;
2129 	return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb,
2130 				       msg_header);
2131 }
2132 
tcmu_send_dev_remove_event(struct tcmu_dev * udev)2133 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev)
2134 {
2135 	struct sk_buff *skb = NULL;
2136 	void *msg_header = NULL;
2137 	int ret = 0;
2138 
2139 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE,
2140 				      &skb, &msg_header);
2141 	if (ret < 0)
2142 		return ret;
2143 	return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE,
2144 				       skb, msg_header);
2145 }
2146 
tcmu_update_uio_info(struct tcmu_dev * udev)2147 static int tcmu_update_uio_info(struct tcmu_dev *udev)
2148 {
2149 	struct tcmu_hba *hba = udev->hba->hba_ptr;
2150 	struct uio_info *info;
2151 	char *str;
2152 
2153 	info = &udev->uio_info;
2154 
2155 	if (udev->dev_config[0])
2156 		str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id,
2157 				udev->name, udev->dev_config);
2158 	else
2159 		str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id,
2160 				udev->name);
2161 	if (!str)
2162 		return -ENOMEM;
2163 
2164 	/* If the old string exists, free it */
2165 	kfree(info->name);
2166 	info->name = str;
2167 
2168 	return 0;
2169 }
2170 
tcmu_configure_device(struct se_device * dev)2171 static int tcmu_configure_device(struct se_device *dev)
2172 {
2173 	struct tcmu_dev *udev = TCMU_DEV(dev);
2174 	struct uio_info *info;
2175 	struct tcmu_mailbox *mb;
2176 	size_t data_size;
2177 	int ret = 0;
2178 
2179 	ret = tcmu_update_uio_info(udev);
2180 	if (ret)
2181 		return ret;
2182 
2183 	info = &udev->uio_info;
2184 
2185 	mutex_lock(&udev->cmdr_lock);
2186 	udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
2187 	mutex_unlock(&udev->cmdr_lock);
2188 	if (!udev->data_bitmap) {
2189 		ret = -ENOMEM;
2190 		goto err_bitmap_alloc;
2191 	}
2192 
2193 	mb = vzalloc(MB_CMDR_SIZE);
2194 	if (!mb) {
2195 		ret = -ENOMEM;
2196 		goto err_vzalloc;
2197 	}
2198 
2199 	/* mailbox fits in first part of CMDR space */
2200 	udev->mb_addr = mb;
2201 	udev->cmdr = (void *)mb + CMDR_OFF;
2202 	udev->cmdr_size = CMDR_SIZE;
2203 	udev->data_off = MB_CMDR_SIZE;
2204 	data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT;
2205 	udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT;
2206 	udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE;
2207 	udev->dbi_thresh = 0; /* Default in Idle state */
2208 
2209 	/* Initialise the mailbox of the ring buffer */
2210 	mb->version = TCMU_MAILBOX_VERSION;
2211 	mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC |
2212 		    TCMU_MAILBOX_FLAG_CAP_READ_LEN |
2213 		    TCMU_MAILBOX_FLAG_CAP_TMR |
2214 		    TCMU_MAILBOX_FLAG_CAP_KEEP_BUF;
2215 	mb->cmdr_off = CMDR_OFF;
2216 	mb->cmdr_size = udev->cmdr_size;
2217 
2218 	WARN_ON(!PAGE_ALIGNED(udev->data_off));
2219 	WARN_ON(data_size % PAGE_SIZE);
2220 
2221 	info->version = __stringify(TCMU_MAILBOX_VERSION);
2222 
2223 	info->mem[0].name = "tcm-user command & data buffer";
2224 	info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
2225 	info->mem[0].size = data_size + MB_CMDR_SIZE;
2226 	info->mem[0].memtype = UIO_MEM_NONE;
2227 
2228 	info->irqcontrol = tcmu_irqcontrol;
2229 	info->irq = UIO_IRQ_CUSTOM;
2230 
2231 	info->mmap = tcmu_mmap;
2232 	info->open = tcmu_open;
2233 	info->release = tcmu_release;
2234 
2235 	ret = uio_register_device(tcmu_root_device, info);
2236 	if (ret)
2237 		goto err_register;
2238 
2239 	/* User can set hw_block_size before enable the device */
2240 	if (dev->dev_attrib.hw_block_size == 0)
2241 		dev->dev_attrib.hw_block_size = 512;
2242 	/* Other attributes can be configured in userspace */
2243 	if (!dev->dev_attrib.hw_max_sectors)
2244 		dev->dev_attrib.hw_max_sectors = 128;
2245 	if (!dev->dev_attrib.emulate_write_cache)
2246 		dev->dev_attrib.emulate_write_cache = 0;
2247 	dev->dev_attrib.hw_queue_depth = 128;
2248 
2249 	/* If user didn't explicitly disable netlink reply support, use
2250 	 * module scope setting.
2251 	 */
2252 	if (udev->nl_reply_supported >= 0)
2253 		udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
2254 
2255 	/*
2256 	 * Get a ref incase userspace does a close on the uio device before
2257 	 * LIO has initiated tcmu_free_device.
2258 	 */
2259 	kref_get(&udev->kref);
2260 
2261 	ret = tcmu_send_dev_add_event(udev);
2262 	if (ret)
2263 		goto err_netlink;
2264 
2265 	mutex_lock(&root_udev_mutex);
2266 	list_add(&udev->node, &root_udev);
2267 	mutex_unlock(&root_udev_mutex);
2268 
2269 	return 0;
2270 
2271 err_netlink:
2272 	kref_put(&udev->kref, tcmu_dev_kref_release);
2273 	uio_unregister_device(&udev->uio_info);
2274 err_register:
2275 	vfree(udev->mb_addr);
2276 	udev->mb_addr = NULL;
2277 err_vzalloc:
2278 	bitmap_free(udev->data_bitmap);
2279 	udev->data_bitmap = NULL;
2280 err_bitmap_alloc:
2281 	kfree(info->name);
2282 	info->name = NULL;
2283 
2284 	return ret;
2285 }
2286 
tcmu_free_device(struct se_device * dev)2287 static void tcmu_free_device(struct se_device *dev)
2288 {
2289 	struct tcmu_dev *udev = TCMU_DEV(dev);
2290 
2291 	/* release ref from init */
2292 	kref_put(&udev->kref, tcmu_dev_kref_release);
2293 }
2294 
tcmu_destroy_device(struct se_device * dev)2295 static void tcmu_destroy_device(struct se_device *dev)
2296 {
2297 	struct tcmu_dev *udev = TCMU_DEV(dev);
2298 
2299 	del_timer_sync(&udev->cmd_timer);
2300 	del_timer_sync(&udev->qfull_timer);
2301 
2302 	mutex_lock(&root_udev_mutex);
2303 	list_del(&udev->node);
2304 	mutex_unlock(&root_udev_mutex);
2305 
2306 	tcmu_send_dev_remove_event(udev);
2307 
2308 	uio_unregister_device(&udev->uio_info);
2309 
2310 	/* release ref from configure */
2311 	kref_put(&udev->kref, tcmu_dev_kref_release);
2312 }
2313 
tcmu_unblock_dev(struct tcmu_dev * udev)2314 static void tcmu_unblock_dev(struct tcmu_dev *udev)
2315 {
2316 	mutex_lock(&udev->cmdr_lock);
2317 	clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags);
2318 	mutex_unlock(&udev->cmdr_lock);
2319 }
2320 
tcmu_block_dev(struct tcmu_dev * udev)2321 static void tcmu_block_dev(struct tcmu_dev *udev)
2322 {
2323 	mutex_lock(&udev->cmdr_lock);
2324 
2325 	if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2326 		goto unlock;
2327 
2328 	/* complete IO that has executed successfully */
2329 	tcmu_handle_completions(udev);
2330 	/* fail IO waiting to be queued */
2331 	run_qfull_queue(udev, true);
2332 
2333 unlock:
2334 	mutex_unlock(&udev->cmdr_lock);
2335 }
2336 
tcmu_reset_ring(struct tcmu_dev * udev,u8 err_level)2337 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
2338 {
2339 	struct tcmu_mailbox *mb;
2340 	struct tcmu_cmd *cmd;
2341 	unsigned long i;
2342 
2343 	mutex_lock(&udev->cmdr_lock);
2344 
2345 	xa_for_each(&udev->commands, i, cmd) {
2346 		pr_debug("removing cmd %u on dev %s from ring %s\n",
2347 			 cmd->cmd_id, udev->name,
2348 			 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ?
2349 			 "(is expired)" :
2350 			 (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ?
2351 			 "(is keep buffer)" : ""));
2352 
2353 		xa_erase(&udev->commands, i);
2354 		if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) &&
2355 		    !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
2356 			WARN_ON(!cmd->se_cmd);
2357 			list_del_init(&cmd->queue_entry);
2358 			cmd->se_cmd->priv = NULL;
2359 			if (err_level == 1) {
2360 				/*
2361 				 * Userspace was not able to start the
2362 				 * command or it is retryable.
2363 				 */
2364 				target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY);
2365 			} else {
2366 				/* hard failure */
2367 				target_complete_cmd(cmd->se_cmd,
2368 						    SAM_STAT_CHECK_CONDITION);
2369 			}
2370 		}
2371 		tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
2372 		tcmu_free_cmd(cmd);
2373 	}
2374 
2375 	mb = udev->mb_addr;
2376 	tcmu_flush_dcache_range(mb, sizeof(*mb));
2377 	pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned,
2378 		 mb->cmd_tail, mb->cmd_head);
2379 
2380 	udev->cmdr_last_cleaned = 0;
2381 	mb->cmd_tail = 0;
2382 	mb->cmd_head = 0;
2383 	tcmu_flush_dcache_range(mb, sizeof(*mb));
2384 	clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
2385 
2386 	del_timer(&udev->cmd_timer);
2387 
2388 	/*
2389 	 * ring is empty and qfull queue never contains aborted commands.
2390 	 * So TMRs in tmr queue do not contain relevant cmd_ids.
2391 	 * After a ring reset userspace should do a fresh start, so
2392 	 * even LUN RESET message is no longer relevant.
2393 	 * Therefore remove all TMRs from qfull queue
2394 	 */
2395 	tcmu_remove_all_queued_tmr(udev);
2396 
2397 	run_qfull_queue(udev, false);
2398 
2399 	mutex_unlock(&udev->cmdr_lock);
2400 }
2401 
2402 enum {
2403 	Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
2404 	Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk,
2405 	Opt_err,
2406 };
2407 
2408 static match_table_t tokens = {
2409 	{Opt_dev_config, "dev_config=%s"},
2410 	{Opt_dev_size, "dev_size=%s"},
2411 	{Opt_hw_block_size, "hw_block_size=%d"},
2412 	{Opt_hw_max_sectors, "hw_max_sectors=%d"},
2413 	{Opt_nl_reply_supported, "nl_reply_supported=%d"},
2414 	{Opt_max_data_area_mb, "max_data_area_mb=%d"},
2415 	{Opt_data_pages_per_blk, "data_pages_per_blk=%d"},
2416 	{Opt_err, NULL}
2417 };
2418 
tcmu_set_dev_attrib(substring_t * arg,u32 * dev_attrib)2419 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
2420 {
2421 	int val, ret;
2422 
2423 	ret = match_int(arg, &val);
2424 	if (ret < 0) {
2425 		pr_err("match_int() failed for dev attrib. Error %d.\n",
2426 		       ret);
2427 		return ret;
2428 	}
2429 
2430 	if (val <= 0) {
2431 		pr_err("Invalid dev attrib value %d. Must be greater than zero.\n",
2432 		       val);
2433 		return -EINVAL;
2434 	}
2435 	*dev_attrib = val;
2436 	return 0;
2437 }
2438 
tcmu_set_max_blocks_param(struct tcmu_dev * udev,substring_t * arg)2439 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
2440 {
2441 	int val, ret;
2442 	uint32_t pages_per_blk = udev->data_pages_per_blk;
2443 
2444 	ret = match_int(arg, &val);
2445 	if (ret < 0) {
2446 		pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
2447 		       ret);
2448 		return ret;
2449 	}
2450 	if (val <= 0) {
2451 		pr_err("Invalid max_data_area %d.\n", val);
2452 		return -EINVAL;
2453 	}
2454 	if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) {
2455 		pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
2456 		       val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages));
2457 		val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages);
2458 	}
2459 	if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) {
2460 		pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n",
2461 		       val, TCMU_MBS_TO_PAGES(val), pages_per_blk);
2462 		return -EINVAL;
2463 	}
2464 
2465 	mutex_lock(&udev->cmdr_lock);
2466 	if (udev->data_bitmap) {
2467 		pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
2468 		ret = -EINVAL;
2469 		goto unlock;
2470 	}
2471 
2472 	udev->data_area_mb = val;
2473 	udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk;
2474 
2475 unlock:
2476 	mutex_unlock(&udev->cmdr_lock);
2477 	return ret;
2478 }
2479 
tcmu_set_data_pages_per_blk(struct tcmu_dev * udev,substring_t * arg)2480 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg)
2481 {
2482 	int val, ret;
2483 
2484 	ret = match_int(arg, &val);
2485 	if (ret < 0) {
2486 		pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n",
2487 		       ret);
2488 		return ret;
2489 	}
2490 
2491 	if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) {
2492 		pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n",
2493 		       val, udev->data_area_mb,
2494 		       TCMU_MBS_TO_PAGES(udev->data_area_mb));
2495 		return -EINVAL;
2496 	}
2497 
2498 	mutex_lock(&udev->cmdr_lock);
2499 	if (udev->data_bitmap) {
2500 		pr_err("Cannot set data_pages_per_blk after it has been enabled.\n");
2501 		ret = -EINVAL;
2502 		goto unlock;
2503 	}
2504 
2505 	udev->data_pages_per_blk = val;
2506 	udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val;
2507 
2508 unlock:
2509 	mutex_unlock(&udev->cmdr_lock);
2510 	return ret;
2511 }
2512 
tcmu_set_configfs_dev_params(struct se_device * dev,const char * page,ssize_t count)2513 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
2514 		const char *page, ssize_t count)
2515 {
2516 	struct tcmu_dev *udev = TCMU_DEV(dev);
2517 	char *orig, *ptr, *opts;
2518 	substring_t args[MAX_OPT_ARGS];
2519 	int ret = 0, token;
2520 
2521 	opts = kstrdup(page, GFP_KERNEL);
2522 	if (!opts)
2523 		return -ENOMEM;
2524 
2525 	orig = opts;
2526 
2527 	while ((ptr = strsep(&opts, ",\n")) != NULL) {
2528 		if (!*ptr)
2529 			continue;
2530 
2531 		token = match_token(ptr, tokens, args);
2532 		switch (token) {
2533 		case Opt_dev_config:
2534 			if (match_strlcpy(udev->dev_config, &args[0],
2535 					  TCMU_CONFIG_LEN) == 0) {
2536 				ret = -EINVAL;
2537 				break;
2538 			}
2539 			pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
2540 			break;
2541 		case Opt_dev_size:
2542 			ret = match_u64(&args[0], &udev->dev_size);
2543 			if (ret < 0)
2544 				pr_err("match_u64() failed for dev_size=. Error %d.\n",
2545 				       ret);
2546 			break;
2547 		case Opt_hw_block_size:
2548 			ret = tcmu_set_dev_attrib(&args[0],
2549 					&(dev->dev_attrib.hw_block_size));
2550 			break;
2551 		case Opt_hw_max_sectors:
2552 			ret = tcmu_set_dev_attrib(&args[0],
2553 					&(dev->dev_attrib.hw_max_sectors));
2554 			break;
2555 		case Opt_nl_reply_supported:
2556 			ret = match_int(&args[0], &udev->nl_reply_supported);
2557 			if (ret < 0)
2558 				pr_err("match_int() failed for nl_reply_supported=. Error %d.\n",
2559 				       ret);
2560 			break;
2561 		case Opt_max_data_area_mb:
2562 			ret = tcmu_set_max_blocks_param(udev, &args[0]);
2563 			break;
2564 		case Opt_data_pages_per_blk:
2565 			ret = tcmu_set_data_pages_per_blk(udev, &args[0]);
2566 			break;
2567 		default:
2568 			break;
2569 		}
2570 
2571 		if (ret)
2572 			break;
2573 	}
2574 
2575 	kfree(orig);
2576 	return (!ret) ? count : ret;
2577 }
2578 
tcmu_show_configfs_dev_params(struct se_device * dev,char * b)2579 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
2580 {
2581 	struct tcmu_dev *udev = TCMU_DEV(dev);
2582 	ssize_t bl = 0;
2583 
2584 	bl = sprintf(b + bl, "Config: %s ",
2585 		     udev->dev_config[0] ? udev->dev_config : "NULL");
2586 	bl += sprintf(b + bl, "Size: %llu ", udev->dev_size);
2587 	bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb);
2588 	bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk);
2589 
2590 	return bl;
2591 }
2592 
tcmu_get_blocks(struct se_device * dev)2593 static sector_t tcmu_get_blocks(struct se_device *dev)
2594 {
2595 	struct tcmu_dev *udev = TCMU_DEV(dev);
2596 
2597 	return div_u64(udev->dev_size - dev->dev_attrib.block_size,
2598 		       dev->dev_attrib.block_size);
2599 }
2600 
2601 static sense_reason_t
tcmu_parse_cdb(struct se_cmd * cmd)2602 tcmu_parse_cdb(struct se_cmd *cmd)
2603 {
2604 	return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
2605 }
2606 
tcmu_cmd_time_out_show(struct config_item * item,char * page)2607 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
2608 {
2609 	struct se_dev_attrib *da = container_of(to_config_group(item),
2610 					struct se_dev_attrib, da_group);
2611 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2612 
2613 	return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
2614 }
2615 
tcmu_cmd_time_out_store(struct config_item * item,const char * page,size_t count)2616 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
2617 				       size_t count)
2618 {
2619 	struct se_dev_attrib *da = container_of(to_config_group(item),
2620 					struct se_dev_attrib, da_group);
2621 	struct tcmu_dev *udev = container_of(da->da_dev,
2622 					struct tcmu_dev, se_dev);
2623 	u32 val;
2624 	int ret;
2625 
2626 	if (da->da_dev->export_count) {
2627 		pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
2628 		return -EINVAL;
2629 	}
2630 
2631 	ret = kstrtou32(page, 0, &val);
2632 	if (ret < 0)
2633 		return ret;
2634 
2635 	udev->cmd_time_out = val * MSEC_PER_SEC;
2636 	return count;
2637 }
2638 CONFIGFS_ATTR(tcmu_, cmd_time_out);
2639 
tcmu_qfull_time_out_show(struct config_item * item,char * page)2640 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page)
2641 {
2642 	struct se_dev_attrib *da = container_of(to_config_group(item),
2643 						struct se_dev_attrib, da_group);
2644 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2645 
2646 	return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ?
2647 			udev->qfull_time_out :
2648 			udev->qfull_time_out / MSEC_PER_SEC);
2649 }
2650 
tcmu_qfull_time_out_store(struct config_item * item,const char * page,size_t count)2651 static ssize_t tcmu_qfull_time_out_store(struct config_item *item,
2652 					 const char *page, size_t count)
2653 {
2654 	struct se_dev_attrib *da = container_of(to_config_group(item),
2655 					struct se_dev_attrib, da_group);
2656 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2657 	s32 val;
2658 	int ret;
2659 
2660 	ret = kstrtos32(page, 0, &val);
2661 	if (ret < 0)
2662 		return ret;
2663 
2664 	if (val >= 0) {
2665 		udev->qfull_time_out = val * MSEC_PER_SEC;
2666 	} else if (val == -1) {
2667 		udev->qfull_time_out = val;
2668 	} else {
2669 		printk(KERN_ERR "Invalid qfull timeout value %d\n", val);
2670 		return -EINVAL;
2671 	}
2672 	return count;
2673 }
2674 CONFIGFS_ATTR(tcmu_, qfull_time_out);
2675 
tcmu_max_data_area_mb_show(struct config_item * item,char * page)2676 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page)
2677 {
2678 	struct se_dev_attrib *da = container_of(to_config_group(item),
2679 						struct se_dev_attrib, da_group);
2680 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2681 
2682 	return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb);
2683 }
2684 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb);
2685 
tcmu_data_pages_per_blk_show(struct config_item * item,char * page)2686 static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item,
2687 					    char *page)
2688 {
2689 	struct se_dev_attrib *da = container_of(to_config_group(item),
2690 						struct se_dev_attrib, da_group);
2691 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2692 
2693 	return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk);
2694 }
2695 CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk);
2696 
tcmu_dev_config_show(struct config_item * item,char * page)2697 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page)
2698 {
2699 	struct se_dev_attrib *da = container_of(to_config_group(item),
2700 						struct se_dev_attrib, da_group);
2701 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2702 
2703 	return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config);
2704 }
2705 
tcmu_send_dev_config_event(struct tcmu_dev * udev,const char * reconfig_data)2706 static int tcmu_send_dev_config_event(struct tcmu_dev *udev,
2707 				      const char *reconfig_data)
2708 {
2709 	struct sk_buff *skb = NULL;
2710 	void *msg_header = NULL;
2711 	int ret = 0;
2712 
2713 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2714 				      &skb, &msg_header);
2715 	if (ret < 0)
2716 		return ret;
2717 	ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data);
2718 	if (ret < 0) {
2719 		nlmsg_free(skb);
2720 		return ret;
2721 	}
2722 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2723 				       skb, msg_header);
2724 }
2725 
2726 
tcmu_dev_config_store(struct config_item * item,const char * page,size_t count)2727 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page,
2728 				     size_t count)
2729 {
2730 	struct se_dev_attrib *da = container_of(to_config_group(item),
2731 						struct se_dev_attrib, da_group);
2732 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2733 	int ret, len;
2734 
2735 	len = strlen(page);
2736 	if (!len || len > TCMU_CONFIG_LEN - 1)
2737 		return -EINVAL;
2738 
2739 	/* Check if device has been configured before */
2740 	if (target_dev_configured(&udev->se_dev)) {
2741 		ret = tcmu_send_dev_config_event(udev, page);
2742 		if (ret) {
2743 			pr_err("Unable to reconfigure device\n");
2744 			return ret;
2745 		}
2746 		strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2747 
2748 		ret = tcmu_update_uio_info(udev);
2749 		if (ret)
2750 			return ret;
2751 		return count;
2752 	}
2753 	strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN);
2754 
2755 	return count;
2756 }
2757 CONFIGFS_ATTR(tcmu_, dev_config);
2758 
tcmu_dev_size_show(struct config_item * item,char * page)2759 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page)
2760 {
2761 	struct se_dev_attrib *da = container_of(to_config_group(item),
2762 						struct se_dev_attrib, da_group);
2763 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2764 
2765 	return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size);
2766 }
2767 
tcmu_send_dev_size_event(struct tcmu_dev * udev,u64 size)2768 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size)
2769 {
2770 	struct sk_buff *skb = NULL;
2771 	void *msg_header = NULL;
2772 	int ret = 0;
2773 
2774 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2775 				      &skb, &msg_header);
2776 	if (ret < 0)
2777 		return ret;
2778 	ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE,
2779 				size, TCMU_ATTR_PAD);
2780 	if (ret < 0) {
2781 		nlmsg_free(skb);
2782 		return ret;
2783 	}
2784 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2785 				       skb, msg_header);
2786 }
2787 
tcmu_dev_size_store(struct config_item * item,const char * page,size_t count)2788 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page,
2789 				   size_t count)
2790 {
2791 	struct se_dev_attrib *da = container_of(to_config_group(item),
2792 						struct se_dev_attrib, da_group);
2793 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2794 	u64 val;
2795 	int ret;
2796 
2797 	ret = kstrtou64(page, 0, &val);
2798 	if (ret < 0)
2799 		return ret;
2800 
2801 	/* Check if device has been configured before */
2802 	if (target_dev_configured(&udev->se_dev)) {
2803 		ret = tcmu_send_dev_size_event(udev, val);
2804 		if (ret) {
2805 			pr_err("Unable to reconfigure device\n");
2806 			return ret;
2807 		}
2808 	}
2809 	udev->dev_size = val;
2810 	return count;
2811 }
2812 CONFIGFS_ATTR(tcmu_, dev_size);
2813 
tcmu_nl_reply_supported_show(struct config_item * item,char * page)2814 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
2815 		char *page)
2816 {
2817 	struct se_dev_attrib *da = container_of(to_config_group(item),
2818 						struct se_dev_attrib, da_group);
2819 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2820 
2821 	return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
2822 }
2823 
tcmu_nl_reply_supported_store(struct config_item * item,const char * page,size_t count)2824 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
2825 		const char *page, size_t count)
2826 {
2827 	struct se_dev_attrib *da = container_of(to_config_group(item),
2828 						struct se_dev_attrib, da_group);
2829 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2830 	s8 val;
2831 	int ret;
2832 
2833 	ret = kstrtos8(page, 0, &val);
2834 	if (ret < 0)
2835 		return ret;
2836 
2837 	udev->nl_reply_supported = val;
2838 	return count;
2839 }
2840 CONFIGFS_ATTR(tcmu_, nl_reply_supported);
2841 
tcmu_emulate_write_cache_show(struct config_item * item,char * page)2842 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
2843 					     char *page)
2844 {
2845 	struct se_dev_attrib *da = container_of(to_config_group(item),
2846 					struct se_dev_attrib, da_group);
2847 
2848 	return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache);
2849 }
2850 
tcmu_send_emulate_write_cache(struct tcmu_dev * udev,u8 val)2851 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val)
2852 {
2853 	struct sk_buff *skb = NULL;
2854 	void *msg_header = NULL;
2855 	int ret = 0;
2856 
2857 	ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE,
2858 				      &skb, &msg_header);
2859 	if (ret < 0)
2860 		return ret;
2861 	ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val);
2862 	if (ret < 0) {
2863 		nlmsg_free(skb);
2864 		return ret;
2865 	}
2866 	return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE,
2867 				       skb, msg_header);
2868 }
2869 
tcmu_emulate_write_cache_store(struct config_item * item,const char * page,size_t count)2870 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item,
2871 					      const char *page, size_t count)
2872 {
2873 	struct se_dev_attrib *da = container_of(to_config_group(item),
2874 					struct se_dev_attrib, da_group);
2875 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2876 	u8 val;
2877 	int ret;
2878 
2879 	ret = kstrtou8(page, 0, &val);
2880 	if (ret < 0)
2881 		return ret;
2882 
2883 	/* Check if device has been configured before */
2884 	if (target_dev_configured(&udev->se_dev)) {
2885 		ret = tcmu_send_emulate_write_cache(udev, val);
2886 		if (ret) {
2887 			pr_err("Unable to reconfigure device\n");
2888 			return ret;
2889 		}
2890 	}
2891 
2892 	da->emulate_write_cache = val;
2893 	return count;
2894 }
2895 CONFIGFS_ATTR(tcmu_, emulate_write_cache);
2896 
tcmu_tmr_notification_show(struct config_item * item,char * page)2897 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page)
2898 {
2899 	struct se_dev_attrib *da = container_of(to_config_group(item),
2900 					struct se_dev_attrib, da_group);
2901 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2902 
2903 	return snprintf(page, PAGE_SIZE, "%i\n",
2904 			test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags));
2905 }
2906 
tcmu_tmr_notification_store(struct config_item * item,const char * page,size_t count)2907 static ssize_t tcmu_tmr_notification_store(struct config_item *item,
2908 					   const char *page, size_t count)
2909 {
2910 	struct se_dev_attrib *da = container_of(to_config_group(item),
2911 					struct se_dev_attrib, da_group);
2912 	struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
2913 	u8 val;
2914 	int ret;
2915 
2916 	ret = kstrtou8(page, 0, &val);
2917 	if (ret < 0)
2918 		return ret;
2919 	if (val > 1)
2920 		return -EINVAL;
2921 
2922 	if (val)
2923 		set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2924 	else
2925 		clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags);
2926 	return count;
2927 }
2928 CONFIGFS_ATTR(tcmu_, tmr_notification);
2929 
tcmu_block_dev_show(struct config_item * item,char * page)2930 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page)
2931 {
2932 	struct se_device *se_dev = container_of(to_config_group(item),
2933 						struct se_device,
2934 						dev_action_group);
2935 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2936 
2937 	if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags))
2938 		return snprintf(page, PAGE_SIZE, "%s\n", "blocked");
2939 	else
2940 		return snprintf(page, PAGE_SIZE, "%s\n", "unblocked");
2941 }
2942 
tcmu_block_dev_store(struct config_item * item,const char * page,size_t count)2943 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page,
2944 				    size_t count)
2945 {
2946 	struct se_device *se_dev = container_of(to_config_group(item),
2947 						struct se_device,
2948 						dev_action_group);
2949 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2950 	u8 val;
2951 	int ret;
2952 
2953 	if (!target_dev_configured(&udev->se_dev)) {
2954 		pr_err("Device is not configured.\n");
2955 		return -EINVAL;
2956 	}
2957 
2958 	ret = kstrtou8(page, 0, &val);
2959 	if (ret < 0)
2960 		return ret;
2961 
2962 	if (val > 1) {
2963 		pr_err("Invalid block value %d\n", val);
2964 		return -EINVAL;
2965 	}
2966 
2967 	if (!val)
2968 		tcmu_unblock_dev(udev);
2969 	else
2970 		tcmu_block_dev(udev);
2971 	return count;
2972 }
2973 CONFIGFS_ATTR(tcmu_, block_dev);
2974 
tcmu_reset_ring_store(struct config_item * item,const char * page,size_t count)2975 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page,
2976 				     size_t count)
2977 {
2978 	struct se_device *se_dev = container_of(to_config_group(item),
2979 						struct se_device,
2980 						dev_action_group);
2981 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
2982 	u8 val;
2983 	int ret;
2984 
2985 	if (!target_dev_configured(&udev->se_dev)) {
2986 		pr_err("Device is not configured.\n");
2987 		return -EINVAL;
2988 	}
2989 
2990 	ret = kstrtou8(page, 0, &val);
2991 	if (ret < 0)
2992 		return ret;
2993 
2994 	if (val != 1 && val != 2) {
2995 		pr_err("Invalid reset ring value %d\n", val);
2996 		return -EINVAL;
2997 	}
2998 
2999 	tcmu_reset_ring(udev, val);
3000 	return count;
3001 }
3002 CONFIGFS_ATTR_WO(tcmu_, reset_ring);
3003 
tcmu_free_kept_buf_store(struct config_item * item,const char * page,size_t count)3004 static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page,
3005 					size_t count)
3006 {
3007 	struct se_device *se_dev = container_of(to_config_group(item),
3008 						struct se_device,
3009 						dev_action_group);
3010 	struct tcmu_dev *udev = TCMU_DEV(se_dev);
3011 	struct tcmu_cmd *cmd;
3012 	u16 cmd_id;
3013 	int ret;
3014 
3015 	if (!target_dev_configured(&udev->se_dev)) {
3016 		pr_err("Device is not configured.\n");
3017 		return -EINVAL;
3018 	}
3019 
3020 	ret = kstrtou16(page, 0, &cmd_id);
3021 	if (ret < 0)
3022 		return ret;
3023 
3024 	mutex_lock(&udev->cmdr_lock);
3025 
3026 	{
3027 		XA_STATE(xas, &udev->commands, cmd_id);
3028 
3029 		xas_lock(&xas);
3030 		cmd = xas_load(&xas);
3031 		if (!cmd) {
3032 			pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id);
3033 			count = -EINVAL;
3034 			xas_unlock(&xas);
3035 			goto out_unlock;
3036 		}
3037 		if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) {
3038 			pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n",
3039 			       cmd_id);
3040 			count = -EINVAL;
3041 			xas_unlock(&xas);
3042 			goto out_unlock;
3043 		}
3044 		xas_store(&xas, NULL);
3045 		xas_unlock(&xas);
3046 	}
3047 
3048 	tcmu_cmd_free_data(cmd, cmd->dbi_cnt);
3049 	tcmu_free_cmd(cmd);
3050 	/*
3051 	 * We only freed data space, not ring space. Therefore we dont call
3052 	 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty.
3053 	 */
3054 	if (list_empty(&udev->tmr_queue))
3055 		run_qfull_queue(udev, false);
3056 
3057 out_unlock:
3058 	mutex_unlock(&udev->cmdr_lock);
3059 	return count;
3060 }
3061 CONFIGFS_ATTR_WO(tcmu_, free_kept_buf);
3062 
3063 static struct configfs_attribute *tcmu_attrib_attrs[] = {
3064 	&tcmu_attr_cmd_time_out,
3065 	&tcmu_attr_qfull_time_out,
3066 	&tcmu_attr_max_data_area_mb,
3067 	&tcmu_attr_data_pages_per_blk,
3068 	&tcmu_attr_dev_config,
3069 	&tcmu_attr_dev_size,
3070 	&tcmu_attr_emulate_write_cache,
3071 	&tcmu_attr_tmr_notification,
3072 	&tcmu_attr_nl_reply_supported,
3073 	NULL,
3074 };
3075 
3076 static struct configfs_attribute **tcmu_attrs;
3077 
3078 static struct configfs_attribute *tcmu_action_attrs[] = {
3079 	&tcmu_attr_block_dev,
3080 	&tcmu_attr_reset_ring,
3081 	&tcmu_attr_free_kept_buf,
3082 	NULL,
3083 };
3084 
3085 static struct target_backend_ops tcmu_ops = {
3086 	.name			= "user",
3087 	.owner			= THIS_MODULE,
3088 	.transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH,
3089 	.transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR |
3090 				      TRANSPORT_FLAG_PASSTHROUGH_ALUA,
3091 	.attach_hba		= tcmu_attach_hba,
3092 	.detach_hba		= tcmu_detach_hba,
3093 	.alloc_device		= tcmu_alloc_device,
3094 	.configure_device	= tcmu_configure_device,
3095 	.destroy_device		= tcmu_destroy_device,
3096 	.free_device		= tcmu_free_device,
3097 	.unplug_device		= tcmu_unplug_device,
3098 	.plug_device		= tcmu_plug_device,
3099 	.parse_cdb		= tcmu_parse_cdb,
3100 	.tmr_notify		= tcmu_tmr_notify,
3101 	.set_configfs_dev_params = tcmu_set_configfs_dev_params,
3102 	.show_configfs_dev_params = tcmu_show_configfs_dev_params,
3103 	.get_device_type	= sbc_get_device_type,
3104 	.get_blocks		= tcmu_get_blocks,
3105 	.tb_dev_action_attrs	= tcmu_action_attrs,
3106 };
3107 
find_free_blocks(void)3108 static void find_free_blocks(void)
3109 {
3110 	struct tcmu_dev *udev;
3111 	loff_t off;
3112 	u32 pages_freed, total_pages_freed = 0;
3113 	u32 start, end, block, total_blocks_freed = 0;
3114 
3115 	if (atomic_read(&global_page_count) <= tcmu_global_max_pages)
3116 		return;
3117 
3118 	mutex_lock(&root_udev_mutex);
3119 	list_for_each_entry(udev, &root_udev, node) {
3120 		mutex_lock(&udev->cmdr_lock);
3121 
3122 		if (!target_dev_configured(&udev->se_dev)) {
3123 			mutex_unlock(&udev->cmdr_lock);
3124 			continue;
3125 		}
3126 
3127 		/* Try to complete the finished commands first */
3128 		if (tcmu_handle_completions(udev))
3129 			run_qfull_queue(udev, false);
3130 
3131 		/* Skip the udevs in idle */
3132 		if (!udev->dbi_thresh) {
3133 			mutex_unlock(&udev->cmdr_lock);
3134 			continue;
3135 		}
3136 
3137 		end = udev->dbi_max + 1;
3138 		block = find_last_bit(udev->data_bitmap, end);
3139 		if (block == udev->dbi_max) {
3140 			/*
3141 			 * The last bit is dbi_max, so it is not possible
3142 			 * reclaim any blocks.
3143 			 */
3144 			mutex_unlock(&udev->cmdr_lock);
3145 			continue;
3146 		} else if (block == end) {
3147 			/* The current udev will goto idle state */
3148 			udev->dbi_thresh = start = 0;
3149 			udev->dbi_max = 0;
3150 		} else {
3151 			udev->dbi_thresh = start = block + 1;
3152 			udev->dbi_max = block;
3153 		}
3154 
3155 		/* Here will truncate the data area from off */
3156 		off = udev->data_off + (loff_t)start * udev->data_blk_size;
3157 		unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
3158 
3159 		/* Release the block pages */
3160 		pages_freed = tcmu_blocks_release(udev, start, end - 1);
3161 		mutex_unlock(&udev->cmdr_lock);
3162 
3163 		total_pages_freed += pages_freed;
3164 		total_blocks_freed += end - start;
3165 		pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n",
3166 			 pages_freed, total_pages_freed, end - start,
3167 			 total_blocks_freed, udev->name);
3168 	}
3169 	mutex_unlock(&root_udev_mutex);
3170 
3171 	if (atomic_read(&global_page_count) > tcmu_global_max_pages)
3172 		schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000));
3173 }
3174 
check_timedout_devices(void)3175 static void check_timedout_devices(void)
3176 {
3177 	struct tcmu_dev *udev, *tmp_dev;
3178 	struct tcmu_cmd *cmd, *tmp_cmd;
3179 	LIST_HEAD(devs);
3180 
3181 	spin_lock_bh(&timed_out_udevs_lock);
3182 	list_splice_init(&timed_out_udevs, &devs);
3183 
3184 	list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {
3185 		list_del_init(&udev->timedout_entry);
3186 		spin_unlock_bh(&timed_out_udevs_lock);
3187 
3188 		mutex_lock(&udev->cmdr_lock);
3189 
3190 		/*
3191 		 * If cmd_time_out is disabled but qfull is set deadline
3192 		 * will only reflect the qfull timeout. Ignore it.
3193 		 */
3194 		if (udev->cmd_time_out) {
3195 			list_for_each_entry_safe(cmd, tmp_cmd,
3196 						 &udev->inflight_queue,
3197 						 queue_entry) {
3198 				tcmu_check_expired_ring_cmd(cmd);
3199 			}
3200 			tcmu_set_next_deadline(&udev->inflight_queue,
3201 					       &udev->cmd_timer);
3202 		}
3203 		list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue,
3204 					 queue_entry) {
3205 			tcmu_check_expired_queue_cmd(cmd);
3206 		}
3207 		tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
3208 
3209 		mutex_unlock(&udev->cmdr_lock);
3210 
3211 		spin_lock_bh(&timed_out_udevs_lock);
3212 	}
3213 
3214 	spin_unlock_bh(&timed_out_udevs_lock);
3215 }
3216 
tcmu_unmap_work_fn(struct work_struct * work)3217 static void tcmu_unmap_work_fn(struct work_struct *work)
3218 {
3219 	check_timedout_devices();
3220 	find_free_blocks();
3221 }
3222 
tcmu_module_init(void)3223 static int __init tcmu_module_init(void)
3224 {
3225 	int ret, i, k, len = 0;
3226 
3227 	BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
3228 
3229 	INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn);
3230 
3231 	tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
3232 				sizeof(struct tcmu_cmd),
3233 				__alignof__(struct tcmu_cmd),
3234 				0, NULL);
3235 	if (!tcmu_cmd_cache)
3236 		return -ENOMEM;
3237 
3238 	tcmu_root_device = root_device_register("tcm_user");
3239 	if (IS_ERR(tcmu_root_device)) {
3240 		ret = PTR_ERR(tcmu_root_device);
3241 		goto out_free_cache;
3242 	}
3243 
3244 	ret = genl_register_family(&tcmu_genl_family);
3245 	if (ret < 0) {
3246 		goto out_unreg_device;
3247 	}
3248 
3249 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3250 		len += sizeof(struct configfs_attribute *);
3251 	for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++)
3252 		len += sizeof(struct configfs_attribute *);
3253 	for (i = 0; tcmu_attrib_attrs[i] != NULL; i++)
3254 		len += sizeof(struct configfs_attribute *);
3255 	len += sizeof(struct configfs_attribute *);
3256 
3257 	tcmu_attrs = kzalloc(len, GFP_KERNEL);
3258 	if (!tcmu_attrs) {
3259 		ret = -ENOMEM;
3260 		goto out_unreg_genl;
3261 	}
3262 
3263 	for (i = 0; passthrough_attrib_attrs[i] != NULL; i++)
3264 		tcmu_attrs[i] = passthrough_attrib_attrs[i];
3265 	for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++)
3266 		tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k];
3267 	for (k = 0; tcmu_attrib_attrs[k] != NULL; k++)
3268 		tcmu_attrs[i++] = tcmu_attrib_attrs[k];
3269 	tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
3270 
3271 	ret = transport_backend_register(&tcmu_ops);
3272 	if (ret)
3273 		goto out_attrs;
3274 
3275 	return 0;
3276 
3277 out_attrs:
3278 	kfree(tcmu_attrs);
3279 out_unreg_genl:
3280 	genl_unregister_family(&tcmu_genl_family);
3281 out_unreg_device:
3282 	root_device_unregister(tcmu_root_device);
3283 out_free_cache:
3284 	kmem_cache_destroy(tcmu_cmd_cache);
3285 
3286 	return ret;
3287 }
3288 
tcmu_module_exit(void)3289 static void __exit tcmu_module_exit(void)
3290 {
3291 	cancel_delayed_work_sync(&tcmu_unmap_work);
3292 	target_backend_unregister(&tcmu_ops);
3293 	kfree(tcmu_attrs);
3294 	genl_unregister_family(&tcmu_genl_family);
3295 	root_device_unregister(tcmu_root_device);
3296 	kmem_cache_destroy(tcmu_cmd_cache);
3297 }
3298 
3299 MODULE_DESCRIPTION("TCM USER subsystem plugin");
3300 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
3301 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
3302 MODULE_LICENSE("GPL");
3303 
3304 module_init(tcmu_module_init);
3305 module_exit(tcmu_module_exit);
3306