1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (c) 2012 - 2015 UNISYS CORPORATION
4  * All rights reserved.
5  */
6 
7 #include <linux/debugfs.h>
8 #include <linux/kthread.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11 #include <linux/visorbus.h>
12 #include <linux/xarray.h>
13 #include <scsi/scsi.h>
14 #include <scsi/scsi_host.h>
15 #include <scsi/scsi_cmnd.h>
16 #include <scsi/scsi_device.h>
17 
18 #include "iochannel.h"
19 
20 /* The Send and Receive Buffers of the IO Queue may both be full */
21 
22 #define IOS_ERROR_THRESHOLD  1000
23 #define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS * 2)
24 #define VISORHBA_ERROR_COUNT 30
25 
26 static struct dentry *visorhba_debugfs_dir;
27 
28 /* GUIDS for HBA channel type supported by this driver */
29 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
30 	/* Note that the only channel type we expect to be reported by the
31 	 * bus driver is the VISOR_VHBA channel.
32 	 */
33 	{ VISOR_VHBA_CHANNEL_GUID, "sparvhba", sizeof(struct channel_header),
34 	  VISOR_VHBA_CHANNEL_VERSIONID },
35 	{}
36 };
37 
38 MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
39 MODULE_ALIAS("visorbus:" VISOR_VHBA_CHANNEL_GUID_STR);
40 
41 struct visordisk_info {
42 	struct scsi_device *sdev;
43 	u32 valid;
44 	atomic_t ios_threshold;
45 	atomic_t error_count;
46 	struct visordisk_info *next;
47 };
48 
49 struct scsipending {
50 	struct uiscmdrsp cmdrsp;
51 	/* The Data being tracked */
52 	void *sent;
53 	/* Type of pointer that is being stored */
54 	char cmdtype;
55 };
56 
57 /* Each scsi_host has a host_data area that contains this struct. */
58 struct visorhba_devdata {
59 	struct Scsi_Host *scsihost;
60 	struct visor_device *dev;
61 	struct list_head dev_info_list;
62 	/* Tracks the requests that have been forwarded to
63 	 * the IOVM and haven't returned yet
64 	 */
65 	struct scsipending pending[MAX_PENDING_REQUESTS];
66 	/* Start search for next pending free slot here */
67 	unsigned int nextinsert;
68 	/* lock to protect data in devdata */
69 	spinlock_t privlock;
70 	bool serverdown;
71 	bool serverchangingstate;
72 	unsigned long long acquire_failed_cnt;
73 	unsigned long long interrupts_rcvd;
74 	unsigned long long interrupts_notme;
75 	unsigned long long interrupts_disabled;
76 	u64 __iomem *flags_addr;
77 	struct visordisk_info head;
78 	unsigned int max_buff_len;
79 	int devnum;
80 	struct uiscmdrsp *cmdrsp;
81 	/*
82 	 * allows us to pass int handles back-and-forth between us and
83 	 * iovm, instead of raw pointers
84 	 */
85 	struct xarray xa;
86 	struct dentry *debugfs_dir;
87 	struct dentry *debugfs_info;
88 };
89 
90 struct visorhba_devices_open {
91 	struct visorhba_devdata *devdata;
92 };
93 
94 /*
95  * add_scsipending_entry - Save off io command that is pending in
96  *			   Service Partition
97  * @devdata: Pointer to devdata
98  * @cmdtype: Specifies the type of command pending
99  * @new:     The command to be saved
100  *
101  * Saves off the io command that is being handled by the Service
102  * Partition so that it can be handled when it completes. If new is
103  * NULL it is assumed the entry refers only to the cmdrsp.
104  *
105  * Return: Insert_location where entry was added on success,
106  *	   -EBUSY if it can't
107  */
add_scsipending_entry(struct visorhba_devdata * devdata,char cmdtype,void * new)108 static int add_scsipending_entry(struct visorhba_devdata *devdata,
109 				 char cmdtype, void *new)
110 {
111 	unsigned long flags;
112 	struct scsipending *entry;
113 	int insert_location;
114 
115 	spin_lock_irqsave(&devdata->privlock, flags);
116 	insert_location = devdata->nextinsert;
117 	while (devdata->pending[insert_location].sent) {
118 		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
119 		if (insert_location == (int)devdata->nextinsert) {
120 			spin_unlock_irqrestore(&devdata->privlock, flags);
121 			return -EBUSY;
122 		}
123 	}
124 
125 	entry = &devdata->pending[insert_location];
126 	memset(&entry->cmdrsp, 0, sizeof(entry->cmdrsp));
127 	entry->cmdtype = cmdtype;
128 	if (new)
129 		entry->sent = new;
130 	/* wants to send cmdrsp */
131 	else
132 		entry->sent = &entry->cmdrsp;
133 	devdata->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
134 	spin_unlock_irqrestore(&devdata->privlock, flags);
135 
136 	return insert_location;
137 }
138 
139 /*
140  * del_scsipending_ent - Removes an entry from the pending array
141  * @devdata: Device holding the pending array
142  * @del:     Entry to remove
143  *
144  * Removes the entry pointed at by del and returns it.
145  *
146  * Return: The scsipending entry pointed to on success, NULL on failure
147  */
del_scsipending_ent(struct visorhba_devdata * devdata,int del)148 static void *del_scsipending_ent(struct visorhba_devdata *devdata, int del)
149 {
150 	unsigned long flags;
151 	void *sent;
152 
153 	if (del >= MAX_PENDING_REQUESTS)
154 		return NULL;
155 
156 	spin_lock_irqsave(&devdata->privlock, flags);
157 	sent = devdata->pending[del].sent;
158 	devdata->pending[del].cmdtype = 0;
159 	devdata->pending[del].sent = NULL;
160 	spin_unlock_irqrestore(&devdata->privlock, flags);
161 
162 	return sent;
163 }
164 
165 /*
166  * get_scsipending_cmdrsp - Return the cmdrsp stored in a pending entry
167  * @ddata: Device holding the pending array
168  * @ent:   Entry that stores the cmdrsp
169  *
170  * Each scsipending entry has a cmdrsp in it. The cmdrsp is only valid
171  * if the "sent" field is not NULL.
172  *
173  * Return: A pointer to the cmdrsp, NULL on failure
174  */
get_scsipending_cmdrsp(struct visorhba_devdata * ddata,int ent)175 static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
176 						int ent)
177 {
178 	if (ddata->pending[ent].sent)
179 		return &ddata->pending[ent].cmdrsp;
180 
181 	return NULL;
182 }
183 
184 /*
185  * setup_scsitaskmgmt_handles - Stash the necessary handles so that the
186  *				completion processing logic for a taskmgmt
187  *				cmd will be able to find who to wake up
188  *				and where to stash the result
189  * @xa:       The data object maintaining the pointer<-->int mappings
190  * @cmdrsp:   Response from the IOVM
191  * @event:    The event handle to associate with an id
192  * @result:   The location to place the result of the event handle into
193  */
setup_scsitaskmgmt_handles(struct xarray * xa,struct uiscmdrsp * cmdrsp,wait_queue_head_t * event,int * result)194 static int setup_scsitaskmgmt_handles(struct xarray *xa, struct uiscmdrsp *cmdrsp,
195 				       wait_queue_head_t *event, int *result)
196 {
197 	int ret;
198 	u32 id;
199 
200 	/* specify the event that has to be triggered when this cmd is complete */
201 	ret = xa_alloc_irq(xa, &id, event, xa_limit_32b, GFP_KERNEL);
202 	if (ret)
203 		return ret;
204 	cmdrsp->scsitaskmgmt.notify_handle = id;
205 	ret = xa_alloc_irq(xa, &id, result, xa_limit_32b, GFP_KERNEL);
206 	if (ret) {
207 		xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
208 		return ret;
209 	}
210 	cmdrsp->scsitaskmgmt.notifyresult_handle = id;
211 
212 	return 0;
213 }
214 
215 /*
216  * cleanup_scsitaskmgmt_handles - Forget handles created by
217  *				  setup_scsitaskmgmt_handles()
218  * @xa: The data object maintaining the pointer<-->int mappings
219  * @cmdrsp:   Response from the IOVM
220  */
cleanup_scsitaskmgmt_handles(struct xarray * xa,struct uiscmdrsp * cmdrsp)221 static void cleanup_scsitaskmgmt_handles(struct xarray *xa,
222 					 struct uiscmdrsp *cmdrsp)
223 {
224 	xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notify_handle);
225 	xa_erase_irq(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
226 }
227 
228 /*
229  * forward_taskmgmt_command - Send taskmegmt command to the Service
230  *			      Partition
231  * @tasktype: Type of taskmgmt command
232  * @scsidev:  Scsidev that issued command
233  *
234  * Create a cmdrsp packet and send it to the Service Partition
235  * that will service this request.
236  *
237  * Return: Int representing whether command was queued successfully or not
238  */
forward_taskmgmt_command(enum task_mgmt_types tasktype,struct scsi_device * scsidev)239 static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
240 				    struct scsi_device *scsidev)
241 {
242 	struct uiscmdrsp *cmdrsp;
243 	struct visorhba_devdata *devdata =
244 		(struct visorhba_devdata *)scsidev->host->hostdata;
245 	int notifyresult = 0xffff;
246 	wait_queue_head_t notifyevent;
247 	int scsicmd_id;
248 	int ret;
249 
250 	if (devdata->serverdown || devdata->serverchangingstate)
251 		return FAILED;
252 
253 	scsicmd_id = add_scsipending_entry(devdata, CMD_SCSITASKMGMT_TYPE,
254 					   NULL);
255 	if (scsicmd_id < 0)
256 		return FAILED;
257 
258 	cmdrsp = get_scsipending_cmdrsp(devdata, scsicmd_id);
259 
260 	init_waitqueue_head(&notifyevent);
261 
262 	/* issue TASK_MGMT_ABORT_TASK */
263 	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
264 
265 	ret = setup_scsitaskmgmt_handles(&devdata->xa, cmdrsp,
266 					 &notifyevent, &notifyresult);
267 	if (ret) {
268 		dev_dbg(&scsidev->sdev_gendev,
269 		        "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
270 		return FAILED;
271 	}
272 
273 	/* save destination */
274 	cmdrsp->scsitaskmgmt.tasktype = tasktype;
275 	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
276 	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
277 	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
278 	cmdrsp->scsitaskmgmt.handle = scsicmd_id;
279 
280 	dev_dbg(&scsidev->sdev_gendev,
281 		"visorhba: initiating type=%d taskmgmt command\n", tasktype);
282 	if (visorchannel_signalinsert(devdata->dev->visorchannel,
283 				      IOCHAN_TO_IOPART,
284 				      cmdrsp))
285 		goto err_del_scsipending_ent;
286 
287 	/* It can take the Service Partition up to 35 seconds to complete
288 	 * an IO in some cases, so wait 45 seconds and error out
289 	 */
290 	if (!wait_event_timeout(notifyevent, notifyresult != 0xffff,
291 				msecs_to_jiffies(45000)))
292 		goto err_del_scsipending_ent;
293 
294 	dev_dbg(&scsidev->sdev_gendev,
295 		"visorhba: taskmgmt type=%d success; result=0x%x\n",
296 		 tasktype, notifyresult);
297 	cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
298 	return SUCCESS;
299 
300 err_del_scsipending_ent:
301 	dev_dbg(&scsidev->sdev_gendev,
302 		"visorhba: taskmgmt type=%d not executed\n", tasktype);
303 	del_scsipending_ent(devdata, scsicmd_id);
304 	cleanup_scsitaskmgmt_handles(&devdata->xa, cmdrsp);
305 	return FAILED;
306 }
307 
308 /*
309  * visorhba_abort_handler - Send TASK_MGMT_ABORT_TASK
310  * @scsicmd: The scsicmd that needs aborted
311  *
312  * Return: SUCCESS if inserted, FAILED otherwise
313  */
visorhba_abort_handler(struct scsi_cmnd * scsicmd)314 static int visorhba_abort_handler(struct scsi_cmnd *scsicmd)
315 {
316 	/* issue TASK_MGMT_ABORT_TASK */
317 	struct scsi_device *scsidev;
318 	struct visordisk_info *vdisk;
319 	int rtn;
320 
321 	scsidev = scsicmd->device;
322 	vdisk = scsidev->hostdata;
323 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
324 		atomic_inc(&vdisk->error_count);
325 	else
326 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
327 	rtn = forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsidev);
328 	if (rtn == SUCCESS) {
329 		scsicmd->result = DID_ABORT << 16;
330 		scsicmd->scsi_done(scsicmd);
331 	}
332 	return rtn;
333 }
334 
335 /*
336  * visorhba_device_reset_handler - Send TASK_MGMT_LUN_RESET
337  * @scsicmd: The scsicmd that needs aborted
338  *
339  * Return: SUCCESS if inserted, FAILED otherwise
340  */
visorhba_device_reset_handler(struct scsi_cmnd * scsicmd)341 static int visorhba_device_reset_handler(struct scsi_cmnd *scsicmd)
342 {
343 	/* issue TASK_MGMT_LUN_RESET */
344 	struct scsi_device *scsidev;
345 	struct visordisk_info *vdisk;
346 	int rtn;
347 
348 	scsidev = scsicmd->device;
349 	vdisk = scsidev->hostdata;
350 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
351 		atomic_inc(&vdisk->error_count);
352 	else
353 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
354 	rtn = forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsidev);
355 	if (rtn == SUCCESS) {
356 		scsicmd->result = DID_RESET << 16;
357 		scsicmd->scsi_done(scsicmd);
358 	}
359 	return rtn;
360 }
361 
362 /*
363  * visorhba_bus_reset_handler - Send TASK_MGMT_TARGET_RESET for each
364  *				target on the bus
365  * @scsicmd: The scsicmd that needs aborted
366  *
367  * Return: SUCCESS if inserted, FAILED otherwise
368  */
visorhba_bus_reset_handler(struct scsi_cmnd * scsicmd)369 static int visorhba_bus_reset_handler(struct scsi_cmnd *scsicmd)
370 {
371 	struct scsi_device *scsidev;
372 	struct visordisk_info *vdisk;
373 	int rtn;
374 
375 	scsidev = scsicmd->device;
376 	shost_for_each_device(scsidev, scsidev->host) {
377 		vdisk = scsidev->hostdata;
378 		if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT)
379 			atomic_inc(&vdisk->error_count);
380 		else
381 			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
382 	}
383 	rtn = forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsidev);
384 	if (rtn == SUCCESS) {
385 		scsicmd->result = DID_RESET << 16;
386 		scsicmd->scsi_done(scsicmd);
387 	}
388 	return rtn;
389 }
390 
391 /*
392  * visorhba_host_reset_handler - Not supported
393  * @scsicmd: The scsicmd that needs to be aborted
394  *
395  * Return: Not supported, return SUCCESS
396  */
visorhba_host_reset_handler(struct scsi_cmnd * scsicmd)397 static int visorhba_host_reset_handler(struct scsi_cmnd *scsicmd)
398 {
399 	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
400 	return SUCCESS;
401 }
402 
403 /*
404  * visorhba_get_info - Get information about SCSI device
405  * @shp: Scsi host that is requesting information
406  *
407  * Return: String with visorhba information
408  */
visorhba_get_info(struct Scsi_Host * shp)409 static const char *visorhba_get_info(struct Scsi_Host *shp)
410 {
411 	/* Return version string */
412 	return "visorhba";
413 }
414 
415 /*
416  * dma_data_dir_linux_to_spar - convert dma_data_direction value to
417  *				Unisys-specific equivalent
418  * @d: dma direction value to convert
419  *
420  * Returns the Unisys-specific dma direction value corresponding to @d
421  */
dma_data_dir_linux_to_spar(enum dma_data_direction d)422 static u32 dma_data_dir_linux_to_spar(enum dma_data_direction d)
423 {
424 	switch (d) {
425 	case DMA_BIDIRECTIONAL:
426 		return UIS_DMA_BIDIRECTIONAL;
427 	case DMA_TO_DEVICE:
428 		return UIS_DMA_TO_DEVICE;
429 	case DMA_FROM_DEVICE:
430 		return UIS_DMA_FROM_DEVICE;
431 	case DMA_NONE:
432 		return UIS_DMA_NONE;
433 	default:
434 		return UIS_DMA_NONE;
435 	}
436 }
437 
438 /*
439  * visorhba_queue_command_lck - Queues command to the Service Partition
440  * @scsicmd:		Command to be queued
441  * @vsiorhba_cmnd_done: Done command to call when scsicmd is returned
442  *
443  * Queues to scsicmd to the ServicePartition after converting it to a
444  * uiscmdrsp structure.
445  *
446  * Return: 0 if successfully queued to the Service Partition, otherwise
447  *	   error code
448  */
visorhba_queue_command_lck(struct scsi_cmnd * scsicmd,void (* visorhba_cmnd_done)(struct scsi_cmnd *))449 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
450 				      void (*visorhba_cmnd_done)
451 					   (struct scsi_cmnd *))
452 {
453 	struct uiscmdrsp *cmdrsp;
454 	struct scsi_device *scsidev = scsicmd->device;
455 	int insert_location;
456 	unsigned char *cdb = scsicmd->cmnd;
457 	struct Scsi_Host *scsihost = scsidev->host;
458 	unsigned int i;
459 	struct visorhba_devdata *devdata =
460 		(struct visorhba_devdata *)scsihost->hostdata;
461 	struct scatterlist *sg = NULL;
462 	struct scatterlist *sglist = NULL;
463 
464 	if (devdata->serverdown || devdata->serverchangingstate)
465 		return SCSI_MLQUEUE_DEVICE_BUSY;
466 
467 	insert_location = add_scsipending_entry(devdata, CMD_SCSI_TYPE,
468 						(void *)scsicmd);
469 	if (insert_location < 0)
470 		return SCSI_MLQUEUE_DEVICE_BUSY;
471 
472 	cmdrsp = get_scsipending_cmdrsp(devdata, insert_location);
473 	cmdrsp->cmdtype = CMD_SCSI_TYPE;
474 	/* save the pending insertion location. Deletion from pending
475 	 * will return the scsicmd pointer for completion
476 	 */
477 	cmdrsp->scsi.handle = insert_location;
478 
479 	/* save done function that we have call when cmd is complete */
480 	scsicmd->scsi_done = visorhba_cmnd_done;
481 	/* save destination */
482 	cmdrsp->scsi.vdest.channel = scsidev->channel;
483 	cmdrsp->scsi.vdest.id = scsidev->id;
484 	cmdrsp->scsi.vdest.lun = scsidev->lun;
485 	/* save datadir */
486 	cmdrsp->scsi.data_dir =
487 		dma_data_dir_linux_to_spar(scsicmd->sc_data_direction);
488 	memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
489 	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
490 
491 	/* keep track of the max buffer length so far. */
492 	if (cmdrsp->scsi.bufflen > devdata->max_buff_len)
493 		devdata->max_buff_len = cmdrsp->scsi.bufflen;
494 
495 	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO)
496 		goto err_del_scsipending_ent;
497 
498 	/* convert buffer to phys information  */
499 	/* buffer is scatterlist - copy it out */
500 	sglist = scsi_sglist(scsicmd);
501 
502 	for_each_sg(sglist, sg, scsi_sg_count(scsicmd), i) {
503 		cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
504 		cmdrsp->scsi.gpi_list[i].length = sg->length;
505 	}
506 	cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
507 
508 	if (visorchannel_signalinsert(devdata->dev->visorchannel,
509 				      IOCHAN_TO_IOPART,
510 				      cmdrsp))
511 		/* queue must be full and we aren't going to wait */
512 		goto err_del_scsipending_ent;
513 
514 	return 0;
515 
516 err_del_scsipending_ent:
517 	del_scsipending_ent(devdata, insert_location);
518 	return SCSI_MLQUEUE_DEVICE_BUSY;
519 }
520 
521 #ifdef DEF_SCSI_QCMD
DEF_SCSI_QCMD(visorhba_queue_command)522 static DEF_SCSI_QCMD(visorhba_queue_command)
523 #else
524 #define visorhba_queue_command visorhba_queue_command_lck
525 #endif
526 
527 /*
528  * visorhba_slave_alloc - Called when new disk is discovered
529  * @scsidev: New disk
530  *
531  * Create a new visordisk_info structure and add it to our
532  * list of vdisks.
533  *
534  * Return: 0 on success, -ENOMEM on failure.
535  */
536 static int visorhba_slave_alloc(struct scsi_device *scsidev)
537 {
538 	/* this is called by the midlayer before scan for new devices --
539 	 * LLD can alloc any struct & do init if needed.
540 	 */
541 	struct visordisk_info *vdisk;
542 	struct visorhba_devdata *devdata;
543 	struct Scsi_Host *scsihost = (struct Scsi_Host *)scsidev->host;
544 
545 	/* already allocated return success */
546 	if (scsidev->hostdata)
547 		return 0;
548 
549 	/* even though we errored, treat as success */
550 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
551 	if (!devdata)
552 		return 0;
553 
554 	vdisk = kzalloc(sizeof(*vdisk), GFP_ATOMIC);
555 	if (!vdisk)
556 		return -ENOMEM;
557 
558 	vdisk->sdev = scsidev;
559 	scsidev->hostdata = vdisk;
560 	return 0;
561 }
562 
563 /*
564  * visorhba_slave_destroy - Disk is going away, clean up resources.
565  * @scsidev: Scsi device to destroy
566  */
visorhba_slave_destroy(struct scsi_device * scsidev)567 static void visorhba_slave_destroy(struct scsi_device *scsidev)
568 {
569 	/* midlevel calls this after device has been quiesced and
570 	 * before it is to be deleted.
571 	 */
572 	struct visordisk_info *vdisk;
573 
574 	vdisk = scsidev->hostdata;
575 	scsidev->hostdata = NULL;
576 	kfree(vdisk);
577 }
578 
579 static struct scsi_host_template visorhba_driver_template = {
580 	.name = "Unisys Visor HBA",
581 	.info = visorhba_get_info,
582 	.queuecommand = visorhba_queue_command,
583 	.eh_abort_handler = visorhba_abort_handler,
584 	.eh_device_reset_handler = visorhba_device_reset_handler,
585 	.eh_bus_reset_handler = visorhba_bus_reset_handler,
586 	.eh_host_reset_handler = visorhba_host_reset_handler,
587 	.shost_attrs = NULL,
588 #define visorhba_MAX_CMNDS 128
589 	.can_queue = visorhba_MAX_CMNDS,
590 	.sg_tablesize = 64,
591 	.this_id = -1,
592 	.slave_alloc = visorhba_slave_alloc,
593 	.slave_destroy = visorhba_slave_destroy,
594 };
595 
596 /*
597  * info_debugfs_show - Debugfs interface to dump visorhba states
598  * @seq: The sequence file to write information to
599  * @v:   Unused, but needed for use with seq file single_open invocation
600  *
601  * Presents a file in the debugfs tree named: /visorhba/vbus<x>:dev<y>/info.
602  *
603  * Return: SUCCESS
604  */
info_debugfs_show(struct seq_file * seq,void * v)605 static int info_debugfs_show(struct seq_file *seq, void *v)
606 {
607 	struct visorhba_devdata *devdata = seq->private;
608 
609 	seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
610 	seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
611 	seq_printf(seq, "interrupts_disabled = %llu\n",
612 		   devdata->interrupts_disabled);
613 	seq_printf(seq, "interrupts_notme = %llu\n",
614 		   devdata->interrupts_notme);
615 	seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
616 	if (devdata->flags_addr) {
617 		u64 phys_flags_addr =
618 			virt_to_phys((__force  void *)devdata->flags_addr);
619 		seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
620 			   phys_flags_addr);
621 		seq_printf(seq, "FeatureFlags = %llu\n",
622 			   (u64)readq(devdata->flags_addr));
623 	}
624 	seq_printf(seq, "acquire_failed_cnt = %llu\n",
625 		   devdata->acquire_failed_cnt);
626 
627 	return 0;
628 }
629 DEFINE_SHOW_ATTRIBUTE(info_debugfs);
630 
631 /*
632  * complete_taskmgmt_command - Complete task management
633  * @idrtable: The data object maintaining the pointer<-->int mappings
634  * @cmdrsp:   Response from the IOVM
635  * @result:   The result of the task management command
636  *
637  * Service Partition returned the result of the task management
638  * command. Wake up anyone waiting for it.
639  */
complete_taskmgmt_command(struct xarray * xa,struct uiscmdrsp * cmdrsp,int result)640 static void complete_taskmgmt_command(struct xarray *xa,
641 				      struct uiscmdrsp *cmdrsp, int result)
642 {
643 	wait_queue_head_t *wq =
644 		xa_load(xa, cmdrsp->scsitaskmgmt.notify_handle);
645 	int *scsi_result_ptr =
646 		xa_load(xa, cmdrsp->scsitaskmgmt.notifyresult_handle);
647 	if (unlikely(!(wq && scsi_result_ptr))) {
648 		pr_err("visorhba: no completion context; cmd will time out\n");
649 		return;
650 	}
651 
652 	/* copy the result of the taskmgmt and
653 	 * wake up the error handler that is waiting for this
654 	 */
655 	pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
656 	*scsi_result_ptr = result;
657 	wake_up_all(wq);
658 }
659 
660 /*
661  * visorhba_serverdown_complete - Called when we are done cleaning up
662  *				  from serverdown
663  * @devdata: Visorhba instance on which to complete serverdown
664  *
665  * Called when we are done cleanning up from serverdown, stop processing
666  * queue, fail pending IOs.
667  */
visorhba_serverdown_complete(struct visorhba_devdata * devdata)668 static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
669 {
670 	int i;
671 	struct scsipending *pendingdel = NULL;
672 	struct scsi_cmnd *scsicmd = NULL;
673 	struct uiscmdrsp *cmdrsp;
674 	unsigned long flags;
675 
676 	/* Stop using the IOVM response queue (queue should be drained
677 	 * by the end)
678 	 */
679 	visorbus_disable_channel_interrupts(devdata->dev);
680 
681 	/* Fail commands that weren't completed */
682 	spin_lock_irqsave(&devdata->privlock, flags);
683 	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
684 		pendingdel = &devdata->pending[i];
685 		switch (pendingdel->cmdtype) {
686 		case CMD_SCSI_TYPE:
687 			scsicmd = pendingdel->sent;
688 			scsicmd->result = DID_RESET << 16;
689 			if (scsicmd->scsi_done)
690 				scsicmd->scsi_done(scsicmd);
691 			break;
692 		case CMD_SCSITASKMGMT_TYPE:
693 			cmdrsp = pendingdel->sent;
694 			complete_taskmgmt_command(&devdata->xa, cmdrsp,
695 						  TASK_MGMT_FAILED);
696 			break;
697 		default:
698 			break;
699 		}
700 		pendingdel->cmdtype = 0;
701 		pendingdel->sent = NULL;
702 	}
703 	spin_unlock_irqrestore(&devdata->privlock, flags);
704 
705 	devdata->serverdown = true;
706 	devdata->serverchangingstate = false;
707 }
708 
709 /*
710  * visorhba_serverdown - Got notified that the IOVM is down
711  * @devdata: Visorhba that is being serviced by downed IOVM
712  *
713  * Something happened to the IOVM, return immediately and
714  * schedule cleanup work.
715  *
716  * Return: 0 on success, -EINVAL on failure
717  */
visorhba_serverdown(struct visorhba_devdata * devdata)718 static int visorhba_serverdown(struct visorhba_devdata *devdata)
719 {
720 	if (!devdata->serverdown && !devdata->serverchangingstate) {
721 		devdata->serverchangingstate = true;
722 		visorhba_serverdown_complete(devdata);
723 	} else if (devdata->serverchangingstate) {
724 		return -EINVAL;
725 	}
726 	return 0;
727 }
728 
729 /*
730  * do_scsi_linuxstat - Scsi command returned linuxstat
731  * @cmdrsp:  Response from IOVM
732  * @scsicmd: Command issued
733  *
734  * Don't log errors for disk-not-present inquiries.
735  */
do_scsi_linuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)736 static void do_scsi_linuxstat(struct uiscmdrsp *cmdrsp,
737 			      struct scsi_cmnd *scsicmd)
738 {
739 	struct visordisk_info *vdisk;
740 	struct scsi_device *scsidev;
741 
742 	scsidev = scsicmd->device;
743 	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
744 
745 	/* Do not log errors for disk-not-present inquiries */
746 	if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
747 	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
748 	    cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT)
749 		return;
750 	/* Okay see what our error_count is here.... */
751 	vdisk = scsidev->hostdata;
752 	if (atomic_read(&vdisk->error_count) < VISORHBA_ERROR_COUNT) {
753 		atomic_inc(&vdisk->error_count);
754 		atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
755 	}
756 }
757 
set_no_disk_inquiry_result(unsigned char * buf,size_t len,bool is_lun0)758 static int set_no_disk_inquiry_result(unsigned char *buf, size_t len,
759 				      bool is_lun0)
760 {
761 	if (len < NO_DISK_INQUIRY_RESULT_LEN)
762 		return -EINVAL;
763 	memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
764 	buf[2] = SCSI_SPC2_VER;
765 	if (is_lun0) {
766 		buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
767 		buf[3] = DEV_HISUPPORT;
768 	} else {
769 		buf[0] = DEV_NOT_CAPABLE;
770 	}
771 	buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
772 	strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
773 	return 0;
774 }
775 
776 /*
777  * do_scsi_nolinuxstat - Scsi command didn't have linuxstat
778  * @cmdrsp:  Response from IOVM
779  * @scsicmd: Command issued
780  *
781  * Handle response when no linuxstat was returned.
782  */
do_scsi_nolinuxstat(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)783 static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp,
784 				struct scsi_cmnd *scsicmd)
785 {
786 	struct scsi_device *scsidev;
787 	unsigned char *buf;
788 	struct scatterlist *sg;
789 	unsigned int i;
790 	char *this_page;
791 	char *this_page_orig;
792 	int bufind = 0;
793 	struct visordisk_info *vdisk;
794 
795 	scsidev = scsicmd->device;
796 	if (cmdrsp->scsi.cmnd[0] == INQUIRY &&
797 	    cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN) {
798 		if (cmdrsp->scsi.no_disk_result == 0)
799 			return;
800 
801 		buf = kzalloc(36, GFP_KERNEL);
802 		if (!buf)
803 			return;
804 
805 		/* Linux scsi code wants a device at Lun 0
806 		 * to issue report luns, but we don't want
807 		 * a disk there so we'll present a processor
808 		 * there.
809 		 */
810 		set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
811 					   scsidev->lun == 0);
812 
813 		if (scsi_sg_count(scsicmd) == 0) {
814 			memcpy(scsi_sglist(scsicmd), buf,
815 			       cmdrsp->scsi.bufflen);
816 			kfree(buf);
817 			return;
818 		}
819 
820 		scsi_for_each_sg(scsicmd, sg, scsi_sg_count(scsicmd), i) {
821 			this_page_orig = kmap_atomic(sg_page(sg));
822 			this_page = (void *)((unsigned long)this_page_orig |
823 					     sg->offset);
824 			memcpy(this_page, buf + bufind, sg->length);
825 			kunmap_atomic(this_page_orig);
826 		}
827 		kfree(buf);
828 	} else {
829 		vdisk = scsidev->hostdata;
830 		if (atomic_read(&vdisk->ios_threshold) > 0) {
831 			atomic_dec(&vdisk->ios_threshold);
832 			if (atomic_read(&vdisk->ios_threshold) == 0)
833 				atomic_set(&vdisk->error_count, 0);
834 		}
835 	}
836 }
837 
838 /*
839  * complete_scsi_command - Complete a scsi command
840  * @uiscmdrsp: Response from Service Partition
841  * @scsicmd:   The scsi command
842  *
843  * Response was returned by the Service Partition. Finish it and send
844  * completion to the scsi midlayer.
845  */
complete_scsi_command(struct uiscmdrsp * cmdrsp,struct scsi_cmnd * scsicmd)846 static void complete_scsi_command(struct uiscmdrsp *cmdrsp,
847 				  struct scsi_cmnd *scsicmd)
848 {
849 	/* take what we need out of cmdrsp and complete the scsicmd */
850 	scsicmd->result = cmdrsp->scsi.linuxstat;
851 	if (cmdrsp->scsi.linuxstat)
852 		do_scsi_linuxstat(cmdrsp, scsicmd);
853 	else
854 		do_scsi_nolinuxstat(cmdrsp, scsicmd);
855 
856 	scsicmd->scsi_done(scsicmd);
857 }
858 
859 /*
860  * drain_queue - Pull responses out of iochannel
861  * @cmdrsp:  Response from the IOSP
862  * @devdata: Device that owns this iochannel
863  *
864  * Pulls responses out of the iochannel and process the responses.
865  */
drain_queue(struct uiscmdrsp * cmdrsp,struct visorhba_devdata * devdata)866 static void drain_queue(struct uiscmdrsp *cmdrsp,
867 			struct visorhba_devdata *devdata)
868 {
869 	struct scsi_cmnd *scsicmd;
870 
871 	while (1) {
872 		/* queue empty */
873 		if (visorchannel_signalremove(devdata->dev->visorchannel,
874 					      IOCHAN_FROM_IOPART,
875 					      cmdrsp))
876 			break;
877 		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
878 			/* scsicmd location is returned by the
879 			 * deletion
880 			 */
881 			scsicmd = del_scsipending_ent(devdata,
882 						      cmdrsp->scsi.handle);
883 			if (!scsicmd)
884 				break;
885 			/* complete the orig cmd */
886 			complete_scsi_command(cmdrsp, scsicmd);
887 		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
888 			if (!del_scsipending_ent(devdata,
889 						 cmdrsp->scsitaskmgmt.handle))
890 				break;
891 			complete_taskmgmt_command(&devdata->xa, cmdrsp,
892 						  cmdrsp->scsitaskmgmt.result);
893 		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
894 			dev_err_once(&devdata->dev->device,
895 				     "ignoring unsupported NOTIFYGUEST\n");
896 		/* cmdrsp is now available for re-use */
897 	}
898 }
899 
900 /*
901  * This is used only when this driver is active as an hba driver in the
902  * client guest partition.  It is called periodically so we can obtain
903  * and process the command respond from the IO Service Partition periodically.
904  */
visorhba_channel_interrupt(struct visor_device * dev)905 static void visorhba_channel_interrupt(struct visor_device *dev)
906 {
907 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
908 
909 	if (!devdata)
910 		return;
911 
912 	drain_queue(devdata->cmdrsp, devdata);
913 }
914 
915 /*
916  * visorhba_pause - Function to handle visorbus pause messages
917  * @dev:	   Device that is pausing
918  * @complete_func: Function to call when finished
919  *
920  * Something has happened to the IO Service Partition that is
921  * handling this device. Quiet this device and reset commands
922  * so that the Service Partition can be corrected.
923  *
924  * Return: SUCCESS
925  */
visorhba_pause(struct visor_device * dev,visorbus_state_complete_func complete_func)926 static int visorhba_pause(struct visor_device *dev,
927 			  visorbus_state_complete_func complete_func)
928 {
929 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
930 
931 	visorhba_serverdown(devdata);
932 	complete_func(dev, 0);
933 	return 0;
934 }
935 
936 /*
937  * visorhba_resume - Function called when the IO Service Partition is back
938  * @dev:	   Device that is pausing
939  * @complete_func: Function to call when finished
940  *
941  * Yay! The IO Service Partition is back, the channel has been wiped
942  * so lets re-establish connection and start processing responses.
943  *
944  * Return: 0 on success, -EINVAL on failure
945  */
visorhba_resume(struct visor_device * dev,visorbus_state_complete_func complete_func)946 static int visorhba_resume(struct visor_device *dev,
947 			   visorbus_state_complete_func complete_func)
948 {
949 	struct visorhba_devdata *devdata;
950 
951 	devdata = dev_get_drvdata(&dev->device);
952 	if (!devdata)
953 		return -EINVAL;
954 
955 	if (devdata->serverdown && !devdata->serverchangingstate)
956 		devdata->serverchangingstate = true;
957 
958 	visorbus_enable_channel_interrupts(dev);
959 	devdata->serverdown = false;
960 	devdata->serverchangingstate = false;
961 
962 	return 0;
963 }
964 
965 /*
966  * visorhba_probe - Device has been discovered; do acquire
967  * @dev: visor_device that was discovered
968  *
969  * A new HBA was discovered; do the initial connections of it.
970  *
971  * Return: 0 on success, otherwise error code
972  */
visorhba_probe(struct visor_device * dev)973 static int visorhba_probe(struct visor_device *dev)
974 {
975 	struct Scsi_Host *scsihost;
976 	struct vhba_config_max max;
977 	struct visorhba_devdata *devdata = NULL;
978 	int err, channel_offset;
979 	u64 features;
980 
981 	scsihost = scsi_host_alloc(&visorhba_driver_template,
982 				   sizeof(*devdata));
983 	if (!scsihost)
984 		return -ENODEV;
985 
986 	channel_offset = offsetof(struct visor_io_channel, vhba.max);
987 	err = visorbus_read_channel(dev, channel_offset, &max,
988 				    sizeof(struct vhba_config_max));
989 	if (err < 0)
990 		goto err_scsi_host_put;
991 
992 	scsihost->max_id = (unsigned int)max.max_id;
993 	scsihost->max_lun = (unsigned int)max.max_lun;
994 	scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
995 	scsihost->max_sectors =
996 	    (unsigned short)(max.max_io_size >> 9);
997 	scsihost->sg_tablesize =
998 	    (unsigned short)(max.max_io_size / PAGE_SIZE);
999 	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
1000 		scsihost->sg_tablesize = MAX_PHYS_INFO;
1001 	err = scsi_add_host(scsihost, &dev->device);
1002 	if (err < 0)
1003 		goto err_scsi_host_put;
1004 
1005 	devdata = (struct visorhba_devdata *)scsihost->hostdata;
1006 	devdata->dev = dev;
1007 	dev_set_drvdata(&dev->device, devdata);
1008 
1009 	devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
1010 						  visorhba_debugfs_dir);
1011 	if (!devdata->debugfs_dir) {
1012 		err = -ENOMEM;
1013 		goto err_scsi_remove_host;
1014 	}
1015 	devdata->debugfs_info =
1016 		debugfs_create_file("info", 0440,
1017 				    devdata->debugfs_dir, devdata,
1018 				    &info_debugfs_fops);
1019 	if (!devdata->debugfs_info) {
1020 		err = -ENOMEM;
1021 		goto err_debugfs_dir;
1022 	}
1023 
1024 	spin_lock_init(&devdata->privlock);
1025 	devdata->serverdown = false;
1026 	devdata->serverchangingstate = false;
1027 	devdata->scsihost = scsihost;
1028 
1029 	channel_offset = offsetof(struct visor_io_channel,
1030 				  channel_header.features);
1031 	err = visorbus_read_channel(dev, channel_offset, &features, 8);
1032 	if (err)
1033 		goto err_debugfs_info;
1034 	features |= VISOR_CHANNEL_IS_POLLING;
1035 	err = visorbus_write_channel(dev, channel_offset, &features, 8);
1036 	if (err)
1037 		goto err_debugfs_info;
1038 
1039 	xa_init(&devdata->xa);
1040 
1041 	devdata->cmdrsp = kmalloc(sizeof(*devdata->cmdrsp), GFP_ATOMIC);
1042 	visorbus_enable_channel_interrupts(dev);
1043 
1044 	scsi_scan_host(scsihost);
1045 
1046 	return 0;
1047 
1048 err_debugfs_info:
1049 	debugfs_remove(devdata->debugfs_info);
1050 
1051 err_debugfs_dir:
1052 	debugfs_remove_recursive(devdata->debugfs_dir);
1053 
1054 err_scsi_remove_host:
1055 	scsi_remove_host(scsihost);
1056 
1057 err_scsi_host_put:
1058 	scsi_host_put(scsihost);
1059 	return err;
1060 }
1061 
1062 /*
1063  * visorhba_remove - Remove a visorhba device
1064  * @dev: Device to remove
1065  *
1066  * Removes the visorhba device.
1067  */
visorhba_remove(struct visor_device * dev)1068 static void visorhba_remove(struct visor_device *dev)
1069 {
1070 	struct visorhba_devdata *devdata = dev_get_drvdata(&dev->device);
1071 	struct Scsi_Host *scsihost = NULL;
1072 
1073 	if (!devdata)
1074 		return;
1075 
1076 	scsihost = devdata->scsihost;
1077 	kfree(devdata->cmdrsp);
1078 	visorbus_disable_channel_interrupts(dev);
1079 	scsi_remove_host(scsihost);
1080 	scsi_host_put(scsihost);
1081 
1082 	dev_set_drvdata(&dev->device, NULL);
1083 	debugfs_remove(devdata->debugfs_info);
1084 	debugfs_remove_recursive(devdata->debugfs_dir);
1085 }
1086 
1087 /* This is used to tell the visorbus driver which types of visor devices
1088  * we support, and what functions to call when a visor device that we support
1089  * is attached or removed.
1090  */
1091 static struct visor_driver visorhba_driver = {
1092 	.name = "visorhba",
1093 	.owner = THIS_MODULE,
1094 	.channel_types = visorhba_channel_types,
1095 	.probe = visorhba_probe,
1096 	.remove = visorhba_remove,
1097 	.pause = visorhba_pause,
1098 	.resume = visorhba_resume,
1099 	.channel_interrupt = visorhba_channel_interrupt,
1100 };
1101 
1102 /*
1103  * visorhba_init - Driver init routine
1104  *
1105  * Initialize the visorhba driver and register it with visorbus
1106  * to handle s-Par virtual host bus adapter.
1107  *
1108  * Return: 0 on success, error code otherwise
1109  */
visorhba_init(void)1110 static int visorhba_init(void)
1111 {
1112 	int rc;
1113 
1114 	visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
1115 	if (!visorhba_debugfs_dir)
1116 		return -ENOMEM;
1117 
1118 	rc = visorbus_register_visor_driver(&visorhba_driver);
1119 	if (rc)
1120 		goto cleanup_debugfs;
1121 
1122 	return 0;
1123 
1124 cleanup_debugfs:
1125 	debugfs_remove_recursive(visorhba_debugfs_dir);
1126 
1127 	return rc;
1128 }
1129 
1130 /*
1131  * visorhba_exit - Driver exit routine
1132  *
1133  * Unregister driver from the bus and free up memory.
1134  */
visorhba_exit(void)1135 static void visorhba_exit(void)
1136 {
1137 	visorbus_unregister_visor_driver(&visorhba_driver);
1138 	debugfs_remove_recursive(visorhba_debugfs_dir);
1139 }
1140 
1141 module_init(visorhba_init);
1142 module_exit(visorhba_exit);
1143 
1144 MODULE_AUTHOR("Unisys");
1145 MODULE_LICENSE("GPL");
1146 MODULE_DESCRIPTION("s-Par HBA driver for virtual SCSI host busses");
1147