1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2020
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/blktrace_api.h>
14 #include <linux/jiffies.h>
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <scsi/fc/fc_els.h>
18 #include "zfcp_ext.h"
19 #include "zfcp_fc.h"
20 #include "zfcp_dbf.h"
21 #include "zfcp_qdio.h"
22 #include "zfcp_reqlist.h"
23 #include "zfcp_diag.h"
24 
25 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
26 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
27 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */
28 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
29 
30 struct kmem_cache *zfcp_fsf_qtcb_cache;
31 
32 static bool ber_stop = true;
33 module_param(ber_stop, bool, 0600);
34 MODULE_PARM_DESC(ber_stop,
35 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
36 
zfcp_fsf_request_timeout_handler(struct timer_list * t)37 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
38 {
39 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
40 	struct zfcp_adapter *adapter = fsf_req->adapter;
41 
42 	zfcp_qdio_siosl(adapter);
43 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
44 				"fsrth_1");
45 }
46 
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)47 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
48 				 unsigned long timeout)
49 {
50 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
51 	fsf_req->timer.expires = jiffies + timeout;
52 	add_timer(&fsf_req->timer);
53 }
54 
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)55 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
56 {
57 	BUG_ON(!fsf_req->erp_action);
58 	fsf_req->timer.function = zfcp_erp_timeout_handler;
59 	fsf_req->timer.expires = jiffies + 30 * HZ;
60 	add_timer(&fsf_req->timer);
61 }
62 
63 /* association between FSF command and FSF QTCB type */
64 static u32 fsf_qtcb_type[] = {
65 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
66 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
67 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
68 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
70 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
71 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
72 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
73 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
74 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
75 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
76 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
77 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
78 };
79 
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
81 {
82 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
83 		"operational because of an unsupported FC class\n");
84 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
85 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
86 }
87 
88 /**
89  * zfcp_fsf_req_free - free memory used by fsf request
90  * @req: pointer to struct zfcp_fsf_req
91  */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
93 {
94 	if (likely(req->pool)) {
95 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
96 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
97 		mempool_free(req, req->pool);
98 		return;
99 	}
100 
101 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
102 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
103 	kfree(req);
104 }
105 
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)106 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
107 {
108 	unsigned long flags;
109 	struct fsf_status_read_buffer *sr_buf = req->data;
110 	struct zfcp_adapter *adapter = req->adapter;
111 	struct zfcp_port *port;
112 	int d_id = ntoh24(sr_buf->d_id);
113 
114 	read_lock_irqsave(&adapter->port_list_lock, flags);
115 	list_for_each_entry(port, &adapter->port_list, list)
116 		if (port->d_id == d_id) {
117 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
118 			break;
119 		}
120 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
121 }
122 
zfcp_fsf_fc_host_link_down(struct zfcp_adapter * adapter)123 void zfcp_fsf_fc_host_link_down(struct zfcp_adapter *adapter)
124 {
125 	struct Scsi_Host *shost = adapter->scsi_host;
126 
127 	adapter->hydra_version = 0;
128 	adapter->peer_wwpn = 0;
129 	adapter->peer_wwnn = 0;
130 	adapter->peer_d_id = 0;
131 
132 	/* if there is no shost yet, we have nothing to zero-out */
133 	if (shost == NULL)
134 		return;
135 
136 	fc_host_port_id(shost) = 0;
137 	fc_host_fabric_name(shost) = 0;
138 	fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
139 	fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
140 	snprintf(fc_host_model(shost), FC_SYMBOLIC_NAME_SIZE, "0x%04x", 0);
141 	memset(fc_host_active_fc4s(shost), 0, FC_FC4_LIST_SIZE);
142 }
143 
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)144 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
145 					 struct fsf_link_down_info *link_down)
146 {
147 	struct zfcp_adapter *adapter = req->adapter;
148 
149 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
150 		return;
151 
152 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
153 
154 	zfcp_scsi_schedule_rports_block(adapter);
155 
156 	zfcp_fsf_fc_host_link_down(adapter);
157 
158 	if (!link_down)
159 		goto out;
160 
161 	switch (link_down->error_code) {
162 	case FSF_PSQ_LINK_NO_LIGHT:
163 		dev_warn(&req->adapter->ccw_device->dev,
164 			 "There is no light signal from the local "
165 			 "fibre channel cable\n");
166 		break;
167 	case FSF_PSQ_LINK_WRAP_PLUG:
168 		dev_warn(&req->adapter->ccw_device->dev,
169 			 "There is a wrap plug instead of a fibre "
170 			 "channel cable\n");
171 		break;
172 	case FSF_PSQ_LINK_NO_FCP:
173 		dev_warn(&req->adapter->ccw_device->dev,
174 			 "The adjacent fibre channel node does not "
175 			 "support FCP\n");
176 		break;
177 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
178 		dev_warn(&req->adapter->ccw_device->dev,
179 			 "The FCP device is suspended because of a "
180 			 "firmware update\n");
181 		break;
182 	case FSF_PSQ_LINK_INVALID_WWPN:
183 		dev_warn(&req->adapter->ccw_device->dev,
184 			 "The FCP device detected a WWPN that is "
185 			 "duplicate or not valid\n");
186 		break;
187 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
188 		dev_warn(&req->adapter->ccw_device->dev,
189 			 "The fibre channel fabric does not support NPIV\n");
190 		break;
191 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
192 		dev_warn(&req->adapter->ccw_device->dev,
193 			 "The FCP adapter cannot support more NPIV ports\n");
194 		break;
195 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
196 		dev_warn(&req->adapter->ccw_device->dev,
197 			 "The adjacent switch cannot support "
198 			 "more NPIV ports\n");
199 		break;
200 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
201 		dev_warn(&req->adapter->ccw_device->dev,
202 			 "The FCP adapter could not log in to the "
203 			 "fibre channel fabric\n");
204 		break;
205 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
206 		dev_warn(&req->adapter->ccw_device->dev,
207 			 "The WWPN assignment file on the FCP adapter "
208 			 "has been damaged\n");
209 		break;
210 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
211 		dev_warn(&req->adapter->ccw_device->dev,
212 			 "The mode table on the FCP adapter "
213 			 "has been damaged\n");
214 		break;
215 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
216 		dev_warn(&req->adapter->ccw_device->dev,
217 			 "All NPIV ports on the FCP adapter have "
218 			 "been assigned\n");
219 		break;
220 	default:
221 		dev_warn(&req->adapter->ccw_device->dev,
222 			 "The link between the FCP adapter and "
223 			 "the FC fabric is down\n");
224 	}
225 out:
226 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
227 }
228 
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)229 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
230 {
231 	struct fsf_status_read_buffer *sr_buf = req->data;
232 	struct fsf_link_down_info *ldi =
233 		(struct fsf_link_down_info *) &sr_buf->payload;
234 
235 	switch (sr_buf->status_subtype) {
236 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
237 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
238 		zfcp_fsf_link_down_info_eval(req, ldi);
239 		break;
240 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
241 		zfcp_fsf_link_down_info_eval(req, NULL);
242 	}
243 }
244 
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)245 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
246 {
247 	struct zfcp_adapter *adapter = req->adapter;
248 	struct fsf_status_read_buffer *sr_buf = req->data;
249 
250 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
251 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
252 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
253 		zfcp_fsf_req_free(req);
254 		return;
255 	}
256 
257 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
258 
259 	switch (sr_buf->status_type) {
260 	case FSF_STATUS_READ_PORT_CLOSED:
261 		zfcp_fsf_status_read_port_closed(req);
262 		break;
263 	case FSF_STATUS_READ_INCOMING_ELS:
264 		zfcp_fc_incoming_els(req);
265 		break;
266 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
267 		break;
268 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
269 		zfcp_dbf_hba_bit_err("fssrh_3", req);
270 		if (ber_stop) {
271 			dev_warn(&adapter->ccw_device->dev,
272 				 "All paths over this FCP device are disused because of excessive bit errors\n");
273 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
274 		} else {
275 			dev_warn(&adapter->ccw_device->dev,
276 				 "The error threshold for checksum statistics has been exceeded\n");
277 		}
278 		break;
279 	case FSF_STATUS_READ_LINK_DOWN:
280 		zfcp_fsf_status_read_link_down(req);
281 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
282 		break;
283 	case FSF_STATUS_READ_LINK_UP:
284 		dev_info(&adapter->ccw_device->dev,
285 			 "The local link has been restored\n");
286 		/* All ports should be marked as ready to run again */
287 		zfcp_erp_set_adapter_status(adapter,
288 					    ZFCP_STATUS_COMMON_RUNNING);
289 		zfcp_erp_adapter_reopen(adapter,
290 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
291 					ZFCP_STATUS_COMMON_ERP_FAILED,
292 					"fssrh_2");
293 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
294 
295 		break;
296 	case FSF_STATUS_READ_NOTIFICATION_LOST:
297 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
298 			zfcp_fc_conditional_port_scan(adapter);
299 		break;
300 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
301 		adapter->adapter_features = sr_buf->payload.word[0];
302 		break;
303 	}
304 
305 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
306 	zfcp_fsf_req_free(req);
307 
308 	atomic_inc(&adapter->stat_miss);
309 	queue_work(adapter->work_queue, &adapter->stat_work);
310 }
311 
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)312 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
313 {
314 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
315 	case FSF_SQ_FCP_RSP_AVAILABLE:
316 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
317 	case FSF_SQ_NO_RETRY_POSSIBLE:
318 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
319 		return;
320 	case FSF_SQ_COMMAND_ABORTED:
321 		break;
322 	case FSF_SQ_NO_RECOM:
323 		dev_err(&req->adapter->ccw_device->dev,
324 			"The FCP adapter reported a problem "
325 			"that cannot be recovered\n");
326 		zfcp_qdio_siosl(req->adapter);
327 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
328 		break;
329 	}
330 	/* all non-return stats set FSFREQ_ERROR*/
331 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
332 }
333 
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)334 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
335 {
336 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
337 		return;
338 
339 	switch (req->qtcb->header.fsf_status) {
340 	case FSF_UNKNOWN_COMMAND:
341 		dev_err(&req->adapter->ccw_device->dev,
342 			"The FCP adapter does not recognize the command 0x%x\n",
343 			req->qtcb->header.fsf_command);
344 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
345 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
346 		break;
347 	case FSF_ADAPTER_STATUS_AVAILABLE:
348 		zfcp_fsf_fsfstatus_qual_eval(req);
349 		break;
350 	}
351 }
352 
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)353 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
354 {
355 	struct zfcp_adapter *adapter = req->adapter;
356 	struct fsf_qtcb *qtcb = req->qtcb;
357 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
358 
359 	zfcp_dbf_hba_fsf_response(req);
360 
361 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
362 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
363 		return;
364 	}
365 
366 	switch (qtcb->prefix.prot_status) {
367 	case FSF_PROT_GOOD:
368 	case FSF_PROT_FSF_STATUS_PRESENTED:
369 		return;
370 	case FSF_PROT_QTCB_VERSION_ERROR:
371 		dev_err(&adapter->ccw_device->dev,
372 			"QTCB version 0x%x not supported by FCP adapter "
373 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
374 			psq->word[0], psq->word[1]);
375 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
376 		break;
377 	case FSF_PROT_ERROR_STATE:
378 	case FSF_PROT_SEQ_NUMB_ERROR:
379 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
380 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
381 		break;
382 	case FSF_PROT_UNSUPP_QTCB_TYPE:
383 		dev_err(&adapter->ccw_device->dev,
384 			"The QTCB type is not supported by the FCP adapter\n");
385 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
386 		break;
387 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
388 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
389 				&adapter->status);
390 		break;
391 	case FSF_PROT_DUPLICATE_REQUEST_ID:
392 		dev_err(&adapter->ccw_device->dev,
393 			"0x%Lx is an ambiguous request identifier\n",
394 			(unsigned long long)qtcb->bottom.support.req_handle);
395 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
396 		break;
397 	case FSF_PROT_LINK_DOWN:
398 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
399 		/* go through reopen to flush pending requests */
400 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
401 		break;
402 	case FSF_PROT_REEST_QUEUE:
403 		/* All ports should be marked as ready to run again */
404 		zfcp_erp_set_adapter_status(adapter,
405 					    ZFCP_STATUS_COMMON_RUNNING);
406 		zfcp_erp_adapter_reopen(adapter,
407 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
408 					ZFCP_STATUS_COMMON_ERP_FAILED,
409 					"fspse_8");
410 		break;
411 	default:
412 		dev_err(&adapter->ccw_device->dev,
413 			"0x%x is not a valid transfer protocol status\n",
414 			qtcb->prefix.prot_status);
415 		zfcp_qdio_siosl(adapter);
416 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
417 	}
418 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
419 }
420 
421 /**
422  * zfcp_fsf_req_complete - process completion of a FSF request
423  * @req: The FSF request that has been completed.
424  *
425  * When a request has been completed either from the FCP adapter,
426  * or it has been dismissed due to a queue shutdown, this function
427  * is called to process the completion status and trigger further
428  * events related to the FSF request.
429  * Caller must ensure that the request has been removed from
430  * adapter->req_list, to protect against concurrent modification
431  * by zfcp_erp_strategy_check_fsfreq().
432  */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)433 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
434 {
435 	struct zfcp_erp_action *erp_action;
436 
437 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
438 		zfcp_fsf_status_read_handler(req);
439 		return;
440 	}
441 
442 	del_timer_sync(&req->timer);
443 	zfcp_fsf_protstatus_eval(req);
444 	zfcp_fsf_fsfstatus_eval(req);
445 	req->handler(req);
446 
447 	erp_action = req->erp_action;
448 	if (erp_action)
449 		zfcp_erp_notify(erp_action, 0);
450 
451 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
452 		zfcp_fsf_req_free(req);
453 	else
454 		complete(&req->completion);
455 }
456 
457 /**
458  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
459  * @adapter: pointer to struct zfcp_adapter
460  *
461  * Never ever call this without shutting down the adapter first.
462  * Otherwise the adapter would continue using and corrupting s390 storage.
463  * Included BUG_ON() call to ensure this is done.
464  * ERP is supposed to be the only user of this function.
465  */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)466 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
467 {
468 	struct zfcp_fsf_req *req, *tmp;
469 	LIST_HEAD(remove_queue);
470 
471 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
472 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
473 
474 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
475 		list_del(&req->list);
476 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
477 		zfcp_fsf_req_complete(req);
478 	}
479 }
480 
481 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
482 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
483 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
484 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
485 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
486 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
487 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
488 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
489 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
490 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
491 
zfcp_fsf_convert_portspeed(u32 fsf_speed)492 u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
493 {
494 	u32 fdmi_speed = 0;
495 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
496 		fdmi_speed |= FC_PORTSPEED_1GBIT;
497 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
498 		fdmi_speed |= FC_PORTSPEED_2GBIT;
499 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
500 		fdmi_speed |= FC_PORTSPEED_4GBIT;
501 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
502 		fdmi_speed |= FC_PORTSPEED_10GBIT;
503 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
504 		fdmi_speed |= FC_PORTSPEED_8GBIT;
505 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
506 		fdmi_speed |= FC_PORTSPEED_16GBIT;
507 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
508 		fdmi_speed |= FC_PORTSPEED_32GBIT;
509 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
510 		fdmi_speed |= FC_PORTSPEED_64GBIT;
511 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
512 		fdmi_speed |= FC_PORTSPEED_128GBIT;
513 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
514 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
515 	return fdmi_speed;
516 }
517 
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)518 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
519 {
520 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
521 	struct zfcp_adapter *adapter = req->adapter;
522 	struct fc_els_flogi *plogi;
523 
524 	/* adjust pointers for missing command code */
525 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
526 					- sizeof(u32));
527 
528 	if (req->data)
529 		memcpy(req->data, bottom, sizeof(*bottom));
530 
531 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
532 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
533 					 (u16)FSF_STATUS_READS_RECOM);
534 
535 	/* no error return above here, otherwise must fix call chains */
536 	/* do not evaluate invalid fields */
537 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
538 		return 0;
539 
540 	adapter->hydra_version = bottom->adapter_type;
541 
542 	switch (bottom->fc_topology) {
543 	case FSF_TOPO_P2P:
544 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
545 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
546 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
547 		break;
548 	case FSF_TOPO_FABRIC:
549 		break;
550 	case FSF_TOPO_AL:
551 	default:
552 		dev_err(&adapter->ccw_device->dev,
553 			"Unknown or unsupported arbitrated loop "
554 			"fibre channel topology detected\n");
555 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
556 		return -EIO;
557 	}
558 
559 	return 0;
560 }
561 
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)562 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
563 {
564 	struct zfcp_adapter *adapter = req->adapter;
565 	struct zfcp_diag_header *const diag_hdr =
566 		&adapter->diagnostics->config_data.header;
567 	struct fsf_qtcb *qtcb = req->qtcb;
568 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
569 
570 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
571 		return;
572 
573 	adapter->fsf_lic_version = bottom->lic_version;
574 	adapter->adapter_features = bottom->adapter_features;
575 	adapter->connection_features = bottom->connection_features;
576 	adapter->peer_wwpn = 0;
577 	adapter->peer_wwnn = 0;
578 	adapter->peer_d_id = 0;
579 
580 	switch (qtcb->header.fsf_status) {
581 	case FSF_GOOD:
582 		/*
583 		 * usually we wait with an update till the cache is too old,
584 		 * but because we have the data available, update it anyway
585 		 */
586 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
587 
588 		zfcp_scsi_shost_update_config_data(adapter, bottom, false);
589 		if (zfcp_fsf_exchange_config_evaluate(req))
590 			return;
591 
592 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
593 			dev_err(&adapter->ccw_device->dev,
594 				"FCP adapter maximum QTCB size (%d bytes) "
595 				"is too small\n",
596 				bottom->max_qtcb_size);
597 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
598 			return;
599 		}
600 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
601 				&adapter->status);
602 		break;
603 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
604 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
605 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
606 
607 		/* avoids adapter shutdown to be able to recognize
608 		 * events such as LINK UP */
609 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
610 				&adapter->status);
611 		zfcp_fsf_link_down_info_eval(req,
612 			&qtcb->header.fsf_status_qual.link_down_info);
613 
614 		zfcp_scsi_shost_update_config_data(adapter, bottom, true);
615 		if (zfcp_fsf_exchange_config_evaluate(req))
616 			return;
617 		break;
618 	default:
619 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
620 		return;
621 	}
622 
623 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT)
624 		adapter->hardware_version = bottom->hardware_version;
625 
626 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
627 		dev_err(&adapter->ccw_device->dev,
628 			"The FCP adapter only supports newer "
629 			"control block versions\n");
630 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
631 		return;
632 	}
633 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
634 		dev_err(&adapter->ccw_device->dev,
635 			"The FCP adapter only supports older "
636 			"control block versions\n");
637 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
638 	}
639 }
640 
641 /*
642  * Mapping of FC Endpoint Security flag masks to mnemonics
643  *
644  * NOTE: Update macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH when making any
645  *       changes.
646  */
647 static const struct {
648 	u32	mask;
649 	char	*name;
650 } zfcp_fsf_fc_security_mnemonics[] = {
651 	{ FSF_FC_SECURITY_AUTH,		"Authentication" },
652 	{ FSF_FC_SECURITY_ENC_FCSP2 |
653 	  FSF_FC_SECURITY_ENC_ERAS,	"Encryption" },
654 };
655 
656 /* maximum strlen(zfcp_fsf_fc_security_mnemonics[...].name) + 1 */
657 #define ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH 15
658 
659 /**
660  * zfcp_fsf_scnprint_fc_security() - translate FC Endpoint Security flags into
661  *                                   mnemonics and place in a buffer
662  * @buf        : the buffer to place the translated FC Endpoint Security flag(s)
663  *               into
664  * @size       : the size of the buffer, including the trailing null space
665  * @fc_security: one or more FC Endpoint Security flags, or zero
666  * @fmt        : specifies whether a list or a single item is to be put into the
667  *               buffer
668  *
669  * The Fibre Channel (FC) Endpoint Security flags are translated into mnemonics.
670  * If the FC Endpoint Security flags are zero "none" is placed into the buffer.
671  *
672  * With ZFCP_FSF_PRINT_FMT_LIST the mnemonics are placed as a list separated by
673  * a comma followed by a space into the buffer. If one or more FC Endpoint
674  * Security flags cannot be translated into a mnemonic, as they are undefined
675  * in zfcp_fsf_fc_security_mnemonics, their bitwise ORed value in hexadecimal
676  * representation is placed into the buffer.
677  *
678  * With ZFCP_FSF_PRINT_FMT_SINGLEITEM only one single mnemonic is placed into
679  * the buffer. If the FC Endpoint Security flag cannot be translated, as it is
680  * undefined in zfcp_fsf_fc_security_mnemonics, its value in hexadecimal
681  * representation is placed into the buffer. If more than one FC Endpoint
682  * Security flag was specified, their value in hexadecimal representation is
683  * placed into the buffer. The macro ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH
684  * can be used to define a buffer that is large enough to hold one mnemonic.
685  *
686  * Return: The number of characters written into buf not including the trailing
687  *         '\0'. If size is == 0 the function returns 0.
688  */
zfcp_fsf_scnprint_fc_security(char * buf,size_t size,u32 fc_security,enum zfcp_fsf_print_fmt fmt)689 ssize_t zfcp_fsf_scnprint_fc_security(char *buf, size_t size, u32 fc_security,
690 				      enum zfcp_fsf_print_fmt fmt)
691 {
692 	const char *prefix = "";
693 	ssize_t len = 0;
694 	int i;
695 
696 	if (fc_security == 0)
697 		return scnprintf(buf, size, "none");
698 	if (fmt == ZFCP_FSF_PRINT_FMT_SINGLEITEM && hweight32(fc_security) != 1)
699 		return scnprintf(buf, size, "0x%08x", fc_security);
700 
701 	for (i = 0; i < ARRAY_SIZE(zfcp_fsf_fc_security_mnemonics); i++) {
702 		if (!(fc_security & zfcp_fsf_fc_security_mnemonics[i].mask))
703 			continue;
704 
705 		len += scnprintf(buf + len, size - len, "%s%s", prefix,
706 				 zfcp_fsf_fc_security_mnemonics[i].name);
707 		prefix = ", ";
708 		fc_security &= ~zfcp_fsf_fc_security_mnemonics[i].mask;
709 	}
710 
711 	if (fc_security != 0)
712 		len += scnprintf(buf + len, size - len, "%s0x%08x",
713 				 prefix, fc_security);
714 
715 	return len;
716 }
717 
zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter * adapter,struct zfcp_fsf_req * req)718 static void zfcp_fsf_dbf_adapter_fc_security(struct zfcp_adapter *adapter,
719 					     struct zfcp_fsf_req *req)
720 {
721 	if (adapter->fc_security_algorithms ==
722 	    adapter->fc_security_algorithms_old) {
723 		/* no change, no trace */
724 		return;
725 	}
726 
727 	zfcp_dbf_hba_fsf_fces("fsfcesa", req, ZFCP_DBF_INVALID_WWPN,
728 			      adapter->fc_security_algorithms_old,
729 			      adapter->fc_security_algorithms);
730 
731 	adapter->fc_security_algorithms_old = adapter->fc_security_algorithms;
732 }
733 
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)734 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
735 {
736 	struct zfcp_adapter *adapter = req->adapter;
737 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
738 
739 	if (req->data)
740 		memcpy(req->data, bottom, sizeof(*bottom));
741 
742 	if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
743 		adapter->fc_security_algorithms =
744 			bottom->fc_security_algorithms;
745 	else
746 		adapter->fc_security_algorithms = 0;
747 	zfcp_fsf_dbf_adapter_fc_security(adapter, req);
748 }
749 
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)750 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
751 {
752 	struct zfcp_diag_header *const diag_hdr =
753 		&req->adapter->diagnostics->port_data.header;
754 	struct fsf_qtcb *qtcb = req->qtcb;
755 	struct fsf_qtcb_bottom_port *bottom = &qtcb->bottom.port;
756 
757 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
758 		return;
759 
760 	switch (qtcb->header.fsf_status) {
761 	case FSF_GOOD:
762 		/*
763 		 * usually we wait with an update till the cache is too old,
764 		 * but because we have the data available, update it anyway
765 		 */
766 		zfcp_diag_update_xdata(diag_hdr, bottom, false);
767 
768 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
769 		zfcp_fsf_exchange_port_evaluate(req);
770 		break;
771 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
772 		zfcp_diag_update_xdata(diag_hdr, bottom, true);
773 		req->status |= ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE;
774 
775 		zfcp_fsf_link_down_info_eval(req,
776 			&qtcb->header.fsf_status_qual.link_down_info);
777 
778 		zfcp_scsi_shost_update_port_data(req->adapter, bottom);
779 		zfcp_fsf_exchange_port_evaluate(req);
780 		break;
781 	}
782 }
783 
zfcp_fsf_alloc(mempool_t * pool)784 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
785 {
786 	struct zfcp_fsf_req *req;
787 
788 	if (likely(pool))
789 		req = mempool_alloc(pool, GFP_ATOMIC);
790 	else
791 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
792 
793 	if (unlikely(!req))
794 		return NULL;
795 
796 	memset(req, 0, sizeof(*req));
797 	req->pool = pool;
798 	return req;
799 }
800 
zfcp_fsf_qtcb_alloc(mempool_t * pool)801 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
802 {
803 	struct fsf_qtcb *qtcb;
804 
805 	if (likely(pool))
806 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
807 	else
808 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
809 
810 	if (unlikely(!qtcb))
811 		return NULL;
812 
813 	memset(qtcb, 0, sizeof(*qtcb));
814 	return qtcb;
815 }
816 
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)817 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
818 						u32 fsf_cmd, u8 sbtype,
819 						mempool_t *pool)
820 {
821 	struct zfcp_adapter *adapter = qdio->adapter;
822 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
823 
824 	if (unlikely(!req))
825 		return ERR_PTR(-ENOMEM);
826 
827 	if (adapter->req_no == 0)
828 		adapter->req_no++;
829 
830 	INIT_LIST_HEAD(&req->list);
831 	timer_setup(&req->timer, NULL, 0);
832 	init_completion(&req->completion);
833 
834 	req->adapter = adapter;
835 	req->req_id = adapter->req_no;
836 
837 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
838 		if (likely(pool))
839 			req->qtcb = zfcp_fsf_qtcb_alloc(
840 				adapter->pool.qtcb_pool);
841 		else
842 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
843 
844 		if (unlikely(!req->qtcb)) {
845 			zfcp_fsf_req_free(req);
846 			return ERR_PTR(-ENOMEM);
847 		}
848 
849 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
850 		req->qtcb->prefix.req_id = req->req_id;
851 		req->qtcb->prefix.ulp_info = 26;
852 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
853 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
854 		req->qtcb->header.req_handle = req->req_id;
855 		req->qtcb->header.fsf_command = fsf_cmd;
856 	}
857 
858 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
859 			   req->qtcb, sizeof(struct fsf_qtcb));
860 
861 	return req;
862 }
863 
zfcp_fsf_req_send(struct zfcp_fsf_req * req)864 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
865 {
866 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
867 	struct zfcp_adapter *adapter = req->adapter;
868 	struct zfcp_qdio *qdio = adapter->qdio;
869 	int req_id = req->req_id;
870 
871 	zfcp_reqlist_add(adapter->req_list, req);
872 
873 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
874 	req->issued = get_tod_clock();
875 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
876 		del_timer_sync(&req->timer);
877 		/* lookup request again, list might have changed */
878 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
879 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
880 		return -EIO;
881 	}
882 
883 	/*
884 	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
885 	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
886 	 *
887 	 * The request might complete and be freed concurrently at any point
888 	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
889 	 * uncontrolled access after this might result in an use-after-free bug.
890 	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
891 	 * when it is completed via req->completion, is it safe to use req
892 	 * again.
893 	 */
894 
895 	/* Don't increase for unsolicited status */
896 	if (!is_srb)
897 		adapter->fsf_req_seq_no++;
898 	adapter->req_no++;
899 
900 	return 0;
901 }
902 
903 /**
904  * zfcp_fsf_status_read - send status read request
905  * @qdio: pointer to struct zfcp_qdio
906  * Returns: 0 on success, ERROR otherwise
907  */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)908 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
909 {
910 	struct zfcp_adapter *adapter = qdio->adapter;
911 	struct zfcp_fsf_req *req;
912 	struct fsf_status_read_buffer *sr_buf;
913 	struct page *page;
914 	int retval = -EIO;
915 
916 	spin_lock_irq(&qdio->req_q_lock);
917 	if (zfcp_qdio_sbal_get(qdio))
918 		goto out;
919 
920 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
921 				  SBAL_SFLAGS0_TYPE_STATUS,
922 				  adapter->pool.status_read_req);
923 	if (IS_ERR(req)) {
924 		retval = PTR_ERR(req);
925 		goto out;
926 	}
927 
928 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
929 	if (!page) {
930 		retval = -ENOMEM;
931 		goto failed_buf;
932 	}
933 	sr_buf = page_address(page);
934 	memset(sr_buf, 0, sizeof(*sr_buf));
935 	req->data = sr_buf;
936 
937 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
938 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
939 
940 	retval = zfcp_fsf_req_send(req);
941 	if (retval)
942 		goto failed_req_send;
943 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
944 
945 	goto out;
946 
947 failed_req_send:
948 	req->data = NULL;
949 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
950 failed_buf:
951 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
952 	zfcp_fsf_req_free(req);
953 out:
954 	spin_unlock_irq(&qdio->req_q_lock);
955 	return retval;
956 }
957 
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)958 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
959 {
960 	struct scsi_device *sdev = req->data;
961 	struct zfcp_scsi_dev *zfcp_sdev;
962 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
963 
964 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
965 		return;
966 
967 	zfcp_sdev = sdev_to_zfcp(sdev);
968 
969 	switch (req->qtcb->header.fsf_status) {
970 	case FSF_PORT_HANDLE_NOT_VALID:
971 		if (fsq->word[0] == fsq->word[1]) {
972 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
973 						"fsafch1");
974 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
975 		}
976 		break;
977 	case FSF_LUN_HANDLE_NOT_VALID:
978 		if (fsq->word[0] == fsq->word[1]) {
979 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
980 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
981 		}
982 		break;
983 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
984 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
985 		break;
986 	case FSF_PORT_BOXED:
987 		zfcp_erp_set_port_status(zfcp_sdev->port,
988 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
989 		zfcp_erp_port_reopen(zfcp_sdev->port,
990 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
991 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
992 		break;
993 	case FSF_LUN_BOXED:
994 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
995 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
996 				    "fsafch4");
997 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
998                 break;
999 	case FSF_ADAPTER_STATUS_AVAILABLE:
1000 		switch (fsq->word[0]) {
1001 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1002 			zfcp_fc_test_link(zfcp_sdev->port);
1003 			fallthrough;
1004 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1005 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1006 			break;
1007 		}
1008 		break;
1009 	case FSF_GOOD:
1010 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
1011 		break;
1012 	}
1013 }
1014 
1015 /**
1016  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
1017  * @scmnd: The SCSI command to abort
1018  * Returns: pointer to struct zfcp_fsf_req
1019  */
1020 
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)1021 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
1022 {
1023 	struct zfcp_fsf_req *req = NULL;
1024 	struct scsi_device *sdev = scmnd->device;
1025 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1026 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
1027 	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
1028 
1029 	spin_lock_irq(&qdio->req_q_lock);
1030 	if (zfcp_qdio_sbal_get(qdio))
1031 		goto out;
1032 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
1033 				  SBAL_SFLAGS0_TYPE_READ,
1034 				  qdio->adapter->pool.scsi_abort);
1035 	if (IS_ERR(req)) {
1036 		req = NULL;
1037 		goto out;
1038 	}
1039 
1040 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
1041 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
1042 		goto out_error_free;
1043 
1044 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1045 
1046 	req->data = sdev;
1047 	req->handler = zfcp_fsf_abort_fcp_command_handler;
1048 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1049 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
1050 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
1051 
1052 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
1053 	if (!zfcp_fsf_req_send(req)) {
1054 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
1055 		goto out;
1056 	}
1057 
1058 out_error_free:
1059 	zfcp_fsf_req_free(req);
1060 	req = NULL;
1061 out:
1062 	spin_unlock_irq(&qdio->req_q_lock);
1063 	return req;
1064 }
1065 
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)1066 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
1067 {
1068 	struct zfcp_adapter *adapter = req->adapter;
1069 	struct zfcp_fsf_ct_els *ct = req->data;
1070 	struct fsf_qtcb_header *header = &req->qtcb->header;
1071 
1072 	ct->status = -EINVAL;
1073 
1074 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1075 		goto skip_fsfstatus;
1076 
1077 	switch (header->fsf_status) {
1078         case FSF_GOOD:
1079 		ct->status = 0;
1080 		zfcp_dbf_san_res("fsscth2", req);
1081 		break;
1082         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1083 		zfcp_fsf_class_not_supp(req);
1084 		break;
1085         case FSF_ADAPTER_STATUS_AVAILABLE:
1086                 switch (header->fsf_status_qual.word[0]){
1087                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1088                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1089 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1090 			break;
1091                 }
1092                 break;
1093         case FSF_PORT_BOXED:
1094 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1095 		break;
1096 	case FSF_PORT_HANDLE_NOT_VALID:
1097 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
1098 		fallthrough;
1099 	case FSF_GENERIC_COMMAND_REJECTED:
1100 	case FSF_PAYLOAD_SIZE_MISMATCH:
1101 	case FSF_REQUEST_SIZE_TOO_LARGE:
1102 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1103 	case FSF_SBAL_MISMATCH:
1104 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1105 		break;
1106 	}
1107 
1108 skip_fsfstatus:
1109 	if (ct->handler)
1110 		ct->handler(ct->handler_data);
1111 }
1112 
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1113 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
1114 					    struct zfcp_qdio_req *q_req,
1115 					    struct scatterlist *sg_req,
1116 					    struct scatterlist *sg_resp)
1117 {
1118 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1119 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1120 	zfcp_qdio_set_sbale_last(qdio, q_req);
1121 }
1122 
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1123 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1124 				       struct scatterlist *sg_req,
1125 				       struct scatterlist *sg_resp)
1126 {
1127 	struct zfcp_adapter *adapter = req->adapter;
1128 	struct zfcp_qdio *qdio = adapter->qdio;
1129 	struct fsf_qtcb *qtcb = req->qtcb;
1130 	u32 feat = adapter->adapter_features;
1131 
1132 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1133 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1134 			return -EIO;
1135 		qtcb->bottom.support.req_buf_length =
1136 			zfcp_qdio_real_bytes(sg_req);
1137 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1138 			return -EIO;
1139 		qtcb->bottom.support.resp_buf_length =
1140 			zfcp_qdio_real_bytes(sg_resp);
1141 
1142 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1143 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1144 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1145 		return 0;
1146 	}
1147 
1148 	/* use single, unchained SBAL if it can hold the request */
1149 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1150 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1151 						sg_req, sg_resp);
1152 		return 0;
1153 	}
1154 
1155 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1156 		return -EOPNOTSUPP;
1157 
1158 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1159 		return -EIO;
1160 
1161 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1162 
1163 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1164 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1165 
1166 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1167 		return -EIO;
1168 
1169 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1170 
1171 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1172 
1173 	return 0;
1174 }
1175 
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1176 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1177 				 struct scatterlist *sg_req,
1178 				 struct scatterlist *sg_resp,
1179 				 unsigned int timeout)
1180 {
1181 	int ret;
1182 
1183 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1184 	if (ret)
1185 		return ret;
1186 
1187 	/* common settings for ct/gs and els requests */
1188 	if (timeout > 255)
1189 		timeout = 255; /* max value accepted by hardware */
1190 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1191 	req->qtcb->bottom.support.timeout = timeout;
1192 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1193 
1194 	return 0;
1195 }
1196 
1197 /**
1198  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1199  * @wka_port: pointer to zfcp WKA port to send CT/GS to
1200  * @ct: pointer to struct zfcp_send_ct with data for request
1201  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1202  * @timeout: timeout that hardware should use, and a later software timeout
1203  */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1204 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1205 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1206 		     unsigned int timeout)
1207 {
1208 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1209 	struct zfcp_fsf_req *req;
1210 	int ret = -EIO;
1211 
1212 	spin_lock_irq(&qdio->req_q_lock);
1213 	if (zfcp_qdio_sbal_get(qdio))
1214 		goto out;
1215 
1216 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1217 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1218 
1219 	if (IS_ERR(req)) {
1220 		ret = PTR_ERR(req);
1221 		goto out;
1222 	}
1223 
1224 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1225 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1226 	if (ret)
1227 		goto failed_send;
1228 
1229 	req->handler = zfcp_fsf_send_ct_handler;
1230 	req->qtcb->header.port_handle = wka_port->handle;
1231 	ct->d_id = wka_port->d_id;
1232 	req->data = ct;
1233 
1234 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1235 
1236 	ret = zfcp_fsf_req_send(req);
1237 	if (ret)
1238 		goto failed_send;
1239 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1240 
1241 	goto out;
1242 
1243 failed_send:
1244 	zfcp_fsf_req_free(req);
1245 out:
1246 	spin_unlock_irq(&qdio->req_q_lock);
1247 	return ret;
1248 }
1249 
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1250 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1251 {
1252 	struct zfcp_fsf_ct_els *send_els = req->data;
1253 	struct fsf_qtcb_header *header = &req->qtcb->header;
1254 
1255 	send_els->status = -EINVAL;
1256 
1257 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1258 		goto skip_fsfstatus;
1259 
1260 	switch (header->fsf_status) {
1261 	case FSF_GOOD:
1262 		send_els->status = 0;
1263 		zfcp_dbf_san_res("fsselh1", req);
1264 		break;
1265 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1266 		zfcp_fsf_class_not_supp(req);
1267 		break;
1268 	case FSF_ADAPTER_STATUS_AVAILABLE:
1269 		switch (header->fsf_status_qual.word[0]){
1270 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1271 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1272 		case FSF_SQ_RETRY_IF_POSSIBLE:
1273 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1274 			break;
1275 		}
1276 		break;
1277 	case FSF_ELS_COMMAND_REJECTED:
1278 	case FSF_PAYLOAD_SIZE_MISMATCH:
1279 	case FSF_REQUEST_SIZE_TOO_LARGE:
1280 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1281 		break;
1282 	case FSF_SBAL_MISMATCH:
1283 		/* should never occur, avoided in zfcp_fsf_send_els */
1284 		fallthrough;
1285 	default:
1286 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1287 		break;
1288 	}
1289 skip_fsfstatus:
1290 	if (send_els->handler)
1291 		send_els->handler(send_els->handler_data);
1292 }
1293 
1294 /**
1295  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1296  * @adapter: pointer to zfcp adapter
1297  * @d_id: N_Port_ID to send ELS to
1298  * @els: pointer to struct zfcp_send_els with data for the command
1299  * @timeout: timeout that hardware should use, and a later software timeout
1300  */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1301 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1302 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1303 {
1304 	struct zfcp_fsf_req *req;
1305 	struct zfcp_qdio *qdio = adapter->qdio;
1306 	int ret = -EIO;
1307 
1308 	spin_lock_irq(&qdio->req_q_lock);
1309 	if (zfcp_qdio_sbal_get(qdio))
1310 		goto out;
1311 
1312 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1313 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1314 
1315 	if (IS_ERR(req)) {
1316 		ret = PTR_ERR(req);
1317 		goto out;
1318 	}
1319 
1320 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1321 
1322 	if (!zfcp_adapter_multi_buffer_active(adapter))
1323 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1324 
1325 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1326 
1327 	if (ret)
1328 		goto failed_send;
1329 
1330 	hton24(req->qtcb->bottom.support.d_id, d_id);
1331 	req->handler = zfcp_fsf_send_els_handler;
1332 	els->d_id = d_id;
1333 	req->data = els;
1334 
1335 	zfcp_dbf_san_req("fssels1", req, d_id);
1336 
1337 	ret = zfcp_fsf_req_send(req);
1338 	if (ret)
1339 		goto failed_send;
1340 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1341 
1342 	goto out;
1343 
1344 failed_send:
1345 	zfcp_fsf_req_free(req);
1346 out:
1347 	spin_unlock_irq(&qdio->req_q_lock);
1348 	return ret;
1349 }
1350 
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1351 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1352 {
1353 	struct zfcp_fsf_req *req;
1354 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1355 	int retval = -EIO;
1356 
1357 	spin_lock_irq(&qdio->req_q_lock);
1358 	if (zfcp_qdio_sbal_get(qdio))
1359 		goto out;
1360 
1361 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1362 				  SBAL_SFLAGS0_TYPE_READ,
1363 				  qdio->adapter->pool.erp_req);
1364 
1365 	if (IS_ERR(req)) {
1366 		retval = PTR_ERR(req);
1367 		goto out;
1368 	}
1369 
1370 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1371 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1372 
1373 	req->qtcb->bottom.config.feature_selection =
1374 			FSF_FEATURE_NOTIFICATION_LOST |
1375 			FSF_FEATURE_UPDATE_ALERT |
1376 			FSF_FEATURE_REQUEST_SFP_DATA |
1377 			FSF_FEATURE_FC_SECURITY;
1378 	req->erp_action = erp_action;
1379 	req->handler = zfcp_fsf_exchange_config_data_handler;
1380 	erp_action->fsf_req_id = req->req_id;
1381 
1382 	zfcp_fsf_start_erp_timer(req);
1383 	retval = zfcp_fsf_req_send(req);
1384 	if (retval) {
1385 		zfcp_fsf_req_free(req);
1386 		erp_action->fsf_req_id = 0;
1387 	}
1388 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1389 out:
1390 	spin_unlock_irq(&qdio->req_q_lock);
1391 	return retval;
1392 }
1393 
1394 
1395 /**
1396  * zfcp_fsf_exchange_config_data_sync() - Request information about FCP channel.
1397  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1398  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1399  *	  might be %NULL.
1400  *
1401  * Returns:
1402  * * 0		- Exchange Config Data was successful, @data is complete
1403  * * -EIO	- Exchange Config Data was not successful, @data is invalid
1404  * * -EAGAIN	- @data contains incomplete data
1405  * * -ENOMEM	- Some memory allocation failed along the way
1406  */
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1407 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1408 				       struct fsf_qtcb_bottom_config *data)
1409 {
1410 	struct zfcp_fsf_req *req = NULL;
1411 	int retval = -EIO;
1412 
1413 	spin_lock_irq(&qdio->req_q_lock);
1414 	if (zfcp_qdio_sbal_get(qdio))
1415 		goto out_unlock;
1416 
1417 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1418 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1419 
1420 	if (IS_ERR(req)) {
1421 		retval = PTR_ERR(req);
1422 		goto out_unlock;
1423 	}
1424 
1425 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1426 	req->handler = zfcp_fsf_exchange_config_data_handler;
1427 
1428 	req->qtcb->bottom.config.feature_selection =
1429 			FSF_FEATURE_NOTIFICATION_LOST |
1430 			FSF_FEATURE_UPDATE_ALERT |
1431 			FSF_FEATURE_REQUEST_SFP_DATA |
1432 			FSF_FEATURE_FC_SECURITY;
1433 
1434 	if (data)
1435 		req->data = data;
1436 
1437 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1438 	retval = zfcp_fsf_req_send(req);
1439 	spin_unlock_irq(&qdio->req_q_lock);
1440 
1441 	if (!retval) {
1442 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1443 		wait_for_completion(&req->completion);
1444 
1445 		if (req->status &
1446 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1447 			retval = -EIO;
1448 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1449 			retval = -EAGAIN;
1450 	}
1451 
1452 	zfcp_fsf_req_free(req);
1453 	return retval;
1454 
1455 out_unlock:
1456 	spin_unlock_irq(&qdio->req_q_lock);
1457 	return retval;
1458 }
1459 
1460 /**
1461  * zfcp_fsf_exchange_port_data - request information about local port
1462  * @erp_action: ERP action for the adapter for which port data is requested
1463  * Returns: 0 on success, error otherwise
1464  */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1465 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1466 {
1467 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1468 	struct zfcp_fsf_req *req;
1469 	int retval = -EIO;
1470 
1471 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1472 		return -EOPNOTSUPP;
1473 
1474 	spin_lock_irq(&qdio->req_q_lock);
1475 	if (zfcp_qdio_sbal_get(qdio))
1476 		goto out;
1477 
1478 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1479 				  SBAL_SFLAGS0_TYPE_READ,
1480 				  qdio->adapter->pool.erp_req);
1481 
1482 	if (IS_ERR(req)) {
1483 		retval = PTR_ERR(req);
1484 		goto out;
1485 	}
1486 
1487 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1488 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1489 
1490 	req->handler = zfcp_fsf_exchange_port_data_handler;
1491 	req->erp_action = erp_action;
1492 	erp_action->fsf_req_id = req->req_id;
1493 
1494 	zfcp_fsf_start_erp_timer(req);
1495 	retval = zfcp_fsf_req_send(req);
1496 	if (retval) {
1497 		zfcp_fsf_req_free(req);
1498 		erp_action->fsf_req_id = 0;
1499 	}
1500 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1501 out:
1502 	spin_unlock_irq(&qdio->req_q_lock);
1503 	return retval;
1504 }
1505 
1506 /**
1507  * zfcp_fsf_exchange_port_data_sync() - Request information about local port.
1508  * @qdio: pointer to the QDIO-Queue to use for sending the command.
1509  * @data: pointer to the QTCB-Bottom for storing the result of the command,
1510  *	  might be %NULL.
1511  *
1512  * Returns:
1513  * * 0		- Exchange Port Data was successful, @data is complete
1514  * * -EIO	- Exchange Port Data was not successful, @data is invalid
1515  * * -EAGAIN	- @data contains incomplete data
1516  * * -ENOMEM	- Some memory allocation failed along the way
1517  * * -EOPNOTSUPP	- This operation is not supported
1518  */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1519 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1520 				     struct fsf_qtcb_bottom_port *data)
1521 {
1522 	struct zfcp_fsf_req *req = NULL;
1523 	int retval = -EIO;
1524 
1525 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1526 		return -EOPNOTSUPP;
1527 
1528 	spin_lock_irq(&qdio->req_q_lock);
1529 	if (zfcp_qdio_sbal_get(qdio))
1530 		goto out_unlock;
1531 
1532 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1533 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1534 
1535 	if (IS_ERR(req)) {
1536 		retval = PTR_ERR(req);
1537 		goto out_unlock;
1538 	}
1539 
1540 	if (data)
1541 		req->data = data;
1542 
1543 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1544 
1545 	req->handler = zfcp_fsf_exchange_port_data_handler;
1546 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1547 	retval = zfcp_fsf_req_send(req);
1548 	spin_unlock_irq(&qdio->req_q_lock);
1549 
1550 	if (!retval) {
1551 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1552 		wait_for_completion(&req->completion);
1553 
1554 		if (req->status &
1555 		    (ZFCP_STATUS_FSFREQ_ERROR | ZFCP_STATUS_FSFREQ_DISMISSED))
1556 			retval = -EIO;
1557 		else if (req->status & ZFCP_STATUS_FSFREQ_XDATAINCOMPLETE)
1558 			retval = -EAGAIN;
1559 	}
1560 
1561 	zfcp_fsf_req_free(req);
1562 	return retval;
1563 
1564 out_unlock:
1565 	spin_unlock_irq(&qdio->req_q_lock);
1566 	return retval;
1567 }
1568 
zfcp_fsf_log_port_fc_security(struct zfcp_port * port,struct zfcp_fsf_req * req)1569 static void zfcp_fsf_log_port_fc_security(struct zfcp_port *port,
1570 					  struct zfcp_fsf_req *req)
1571 {
1572 	char mnemonic_old[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1573 	char mnemonic_new[ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH];
1574 
1575 	if (port->connection_info == port->connection_info_old) {
1576 		/* no change, no log nor trace */
1577 		return;
1578 	}
1579 
1580 	zfcp_dbf_hba_fsf_fces("fsfcesp", req, port->wwpn,
1581 			      port->connection_info_old,
1582 			      port->connection_info);
1583 
1584 	zfcp_fsf_scnprint_fc_security(mnemonic_old, sizeof(mnemonic_old),
1585 				      port->connection_info_old,
1586 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1587 	zfcp_fsf_scnprint_fc_security(mnemonic_new, sizeof(mnemonic_new),
1588 				      port->connection_info,
1589 				      ZFCP_FSF_PRINT_FMT_SINGLEITEM);
1590 
1591 	if (strncmp(mnemonic_old, mnemonic_new,
1592 		    ZFCP_FSF_MAX_FC_SECURITY_MNEMONIC_LENGTH) == 0) {
1593 		/* no change in string representation, no log */
1594 		goto out;
1595 	}
1596 
1597 	if (port->connection_info_old == 0) {
1598 		/* activation */
1599 		dev_info(&port->adapter->ccw_device->dev,
1600 			 "FC Endpoint Security of connection to remote port 0x%16llx enabled: %s\n",
1601 			 port->wwpn, mnemonic_new);
1602 	} else if (port->connection_info == 0) {
1603 		/* deactivation */
1604 		dev_warn(&port->adapter->ccw_device->dev,
1605 			 "FC Endpoint Security of connection to remote port 0x%16llx disabled: was %s\n",
1606 			 port->wwpn, mnemonic_old);
1607 	} else {
1608 		/* change */
1609 		dev_warn(&port->adapter->ccw_device->dev,
1610 			 "FC Endpoint Security of connection to remote port 0x%16llx changed: from %s to %s\n",
1611 			 port->wwpn, mnemonic_old, mnemonic_new);
1612 	}
1613 
1614 out:
1615 	port->connection_info_old = port->connection_info;
1616 }
1617 
zfcp_fsf_log_security_error(const struct device * dev,u32 fsf_sqw0,u64 wwpn)1618 static void zfcp_fsf_log_security_error(const struct device *dev, u32 fsf_sqw0,
1619 					u64 wwpn)
1620 {
1621 	switch (fsf_sqw0) {
1622 
1623 	/*
1624 	 * Open Port command error codes
1625 	 */
1626 
1627 	case FSF_SQ_SECURITY_REQUIRED:
1628 		dev_warn_ratelimited(dev,
1629 				     "FC Endpoint Security error: FC security is required but not supported or configured on remote port 0x%016llx\n",
1630 				     wwpn);
1631 		break;
1632 	case FSF_SQ_SECURITY_TIMEOUT:
1633 		dev_warn_ratelimited(dev,
1634 				     "FC Endpoint Security error: a timeout prevented opening remote port 0x%016llx\n",
1635 				     wwpn);
1636 		break;
1637 	case FSF_SQ_SECURITY_KM_UNAVAILABLE:
1638 		dev_warn_ratelimited(dev,
1639 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because local and external key manager cannot communicate\n",
1640 				     wwpn);
1641 		break;
1642 	case FSF_SQ_SECURITY_RKM_UNAVAILABLE:
1643 		dev_warn_ratelimited(dev,
1644 				     "FC Endpoint Security error: opening remote port 0x%016llx failed because it cannot communicate with the external key manager\n",
1645 				     wwpn);
1646 		break;
1647 	case FSF_SQ_SECURITY_AUTH_FAILURE:
1648 		dev_warn_ratelimited(dev,
1649 				     "FC Endpoint Security error: the device could not verify the identity of remote port 0x%016llx\n",
1650 				     wwpn);
1651 		break;
1652 
1653 	/*
1654 	 * Send FCP command error codes
1655 	 */
1656 
1657 	case FSF_SQ_SECURITY_ENC_FAILURE:
1658 		dev_warn_ratelimited(dev,
1659 				     "FC Endpoint Security error: FC connection to remote port 0x%016llx closed because encryption broke down\n",
1660 				     wwpn);
1661 		break;
1662 
1663 	/*
1664 	 * Unknown error codes
1665 	 */
1666 
1667 	default:
1668 		dev_warn_ratelimited(dev,
1669 				     "FC Endpoint Security error: the device issued an unknown error code 0x%08x related to the FC connection to remote port 0x%016llx\n",
1670 				     fsf_sqw0, wwpn);
1671 	}
1672 }
1673 
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1674 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1675 {
1676 	struct zfcp_adapter *adapter = req->adapter;
1677 	struct zfcp_port *port = req->data;
1678 	struct fsf_qtcb_header *header = &req->qtcb->header;
1679 	struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1680 	struct fc_els_flogi *plogi;
1681 
1682 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1683 		goto out;
1684 
1685 	switch (header->fsf_status) {
1686 	case FSF_PORT_ALREADY_OPEN:
1687 		break;
1688 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1689 		dev_warn(&adapter->ccw_device->dev,
1690 			 "Not enough FCP adapter resources to open "
1691 			 "remote port 0x%016Lx\n",
1692 			 (unsigned long long)port->wwpn);
1693 		zfcp_erp_set_port_status(port,
1694 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1695 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1696 		break;
1697 	case FSF_SECURITY_ERROR:
1698 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
1699 					    header->fsf_status_qual.word[0],
1700 					    port->wwpn);
1701 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1702 		break;
1703 	case FSF_ADAPTER_STATUS_AVAILABLE:
1704 		switch (header->fsf_status_qual.word[0]) {
1705 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1706 			/* no zfcp_fc_test_link() with failed open port */
1707 			fallthrough;
1708 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1709 		case FSF_SQ_NO_RETRY_POSSIBLE:
1710 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1711 			break;
1712 		}
1713 		break;
1714 	case FSF_GOOD:
1715 		port->handle = header->port_handle;
1716 		if (adapter->adapter_features & FSF_FEATURE_FC_SECURITY)
1717 			port->connection_info = bottom->connection_info;
1718 		else
1719 			port->connection_info = 0;
1720 		zfcp_fsf_log_port_fc_security(port, req);
1721 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1722 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1723 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1724 		                  &port->status);
1725 		/* check whether D_ID has changed during open */
1726 		/*
1727 		 * FIXME: This check is not airtight, as the FCP channel does
1728 		 * not monitor closures of target port connections caused on
1729 		 * the remote side. Thus, they might miss out on invalidating
1730 		 * locally cached WWPNs (and other N_Port parameters) of gone
1731 		 * target ports. So, our heroic attempt to make things safe
1732 		 * could be undermined by 'open port' response data tagged with
1733 		 * obsolete WWPNs. Another reason to monitor potential
1734 		 * connection closures ourself at least (by interpreting
1735 		 * incoming ELS' and unsolicited status). It just crosses my
1736 		 * mind that one should be able to cross-check by means of
1737 		 * another GID_PN straight after a port has been opened.
1738 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1739 		 */
1740 		plogi = (struct fc_els_flogi *) bottom->els;
1741 		if (bottom->els1_length >= FSF_PLOGI_MIN_LEN)
1742 			zfcp_fc_plogi_evaluate(port, plogi);
1743 		break;
1744 	case FSF_UNKNOWN_OP_SUBTYPE:
1745 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1746 		break;
1747 	}
1748 
1749 out:
1750 	put_device(&port->dev);
1751 }
1752 
1753 /**
1754  * zfcp_fsf_open_port - create and send open port request
1755  * @erp_action: pointer to struct zfcp_erp_action
1756  * Returns: 0 on success, error otherwise
1757  */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1758 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1759 {
1760 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1761 	struct zfcp_port *port = erp_action->port;
1762 	struct zfcp_fsf_req *req;
1763 	int retval = -EIO;
1764 
1765 	spin_lock_irq(&qdio->req_q_lock);
1766 	if (zfcp_qdio_sbal_get(qdio))
1767 		goto out;
1768 
1769 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1770 				  SBAL_SFLAGS0_TYPE_READ,
1771 				  qdio->adapter->pool.erp_req);
1772 
1773 	if (IS_ERR(req)) {
1774 		retval = PTR_ERR(req);
1775 		goto out;
1776 	}
1777 
1778 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1779 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1780 
1781 	req->handler = zfcp_fsf_open_port_handler;
1782 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1783 	req->data = port;
1784 	req->erp_action = erp_action;
1785 	erp_action->fsf_req_id = req->req_id;
1786 	get_device(&port->dev);
1787 
1788 	zfcp_fsf_start_erp_timer(req);
1789 	retval = zfcp_fsf_req_send(req);
1790 	if (retval) {
1791 		zfcp_fsf_req_free(req);
1792 		erp_action->fsf_req_id = 0;
1793 		put_device(&port->dev);
1794 	}
1795 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1796 out:
1797 	spin_unlock_irq(&qdio->req_q_lock);
1798 	return retval;
1799 }
1800 
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1801 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1802 {
1803 	struct zfcp_port *port = req->data;
1804 
1805 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1806 		return;
1807 
1808 	switch (req->qtcb->header.fsf_status) {
1809 	case FSF_PORT_HANDLE_NOT_VALID:
1810 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1811 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1812 		break;
1813 	case FSF_ADAPTER_STATUS_AVAILABLE:
1814 		break;
1815 	case FSF_GOOD:
1816 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1817 		break;
1818 	}
1819 }
1820 
1821 /**
1822  * zfcp_fsf_close_port - create and send close port request
1823  * @erp_action: pointer to struct zfcp_erp_action
1824  * Returns: 0 on success, error otherwise
1825  */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1826 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1827 {
1828 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1829 	struct zfcp_fsf_req *req;
1830 	int retval = -EIO;
1831 
1832 	spin_lock_irq(&qdio->req_q_lock);
1833 	if (zfcp_qdio_sbal_get(qdio))
1834 		goto out;
1835 
1836 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1837 				  SBAL_SFLAGS0_TYPE_READ,
1838 				  qdio->adapter->pool.erp_req);
1839 
1840 	if (IS_ERR(req)) {
1841 		retval = PTR_ERR(req);
1842 		goto out;
1843 	}
1844 
1845 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1846 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1847 
1848 	req->handler = zfcp_fsf_close_port_handler;
1849 	req->data = erp_action->port;
1850 	req->erp_action = erp_action;
1851 	req->qtcb->header.port_handle = erp_action->port->handle;
1852 	erp_action->fsf_req_id = req->req_id;
1853 
1854 	zfcp_fsf_start_erp_timer(req);
1855 	retval = zfcp_fsf_req_send(req);
1856 	if (retval) {
1857 		zfcp_fsf_req_free(req);
1858 		erp_action->fsf_req_id = 0;
1859 	}
1860 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1861 out:
1862 	spin_unlock_irq(&qdio->req_q_lock);
1863 	return retval;
1864 }
1865 
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1866 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1867 {
1868 	struct zfcp_fc_wka_port *wka_port = req->data;
1869 	struct fsf_qtcb_header *header = &req->qtcb->header;
1870 
1871 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1872 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1873 		goto out;
1874 	}
1875 
1876 	switch (header->fsf_status) {
1877 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1878 		dev_warn(&req->adapter->ccw_device->dev,
1879 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1880 		fallthrough;
1881 	case FSF_ADAPTER_STATUS_AVAILABLE:
1882 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1883 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1884 		break;
1885 	case FSF_GOOD:
1886 		wka_port->handle = header->port_handle;
1887 		fallthrough;
1888 	case FSF_PORT_ALREADY_OPEN:
1889 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1890 	}
1891 out:
1892 	wake_up(&wka_port->completion_wq);
1893 }
1894 
1895 /**
1896  * zfcp_fsf_open_wka_port - create and send open wka-port request
1897  * @wka_port: pointer to struct zfcp_fc_wka_port
1898  * Returns: 0 on success, error otherwise
1899  */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1900 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1901 {
1902 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1903 	struct zfcp_fsf_req *req;
1904 	unsigned long req_id = 0;
1905 	int retval = -EIO;
1906 
1907 	spin_lock_irq(&qdio->req_q_lock);
1908 	if (zfcp_qdio_sbal_get(qdio))
1909 		goto out;
1910 
1911 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1912 				  SBAL_SFLAGS0_TYPE_READ,
1913 				  qdio->adapter->pool.erp_req);
1914 
1915 	if (IS_ERR(req)) {
1916 		retval = PTR_ERR(req);
1917 		goto out;
1918 	}
1919 
1920 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1921 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1922 
1923 	req->handler = zfcp_fsf_open_wka_port_handler;
1924 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1925 	req->data = wka_port;
1926 
1927 	req_id = req->req_id;
1928 
1929 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1930 	retval = zfcp_fsf_req_send(req);
1931 	if (retval)
1932 		zfcp_fsf_req_free(req);
1933 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1934 out:
1935 	spin_unlock_irq(&qdio->req_q_lock);
1936 	if (!retval)
1937 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1938 	return retval;
1939 }
1940 
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1941 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1942 {
1943 	struct zfcp_fc_wka_port *wka_port = req->data;
1944 
1945 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1946 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1947 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1948 	}
1949 
1950 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1951 	wake_up(&wka_port->completion_wq);
1952 }
1953 
1954 /**
1955  * zfcp_fsf_close_wka_port - create and send close wka port request
1956  * @wka_port: WKA port to open
1957  * Returns: 0 on success, error otherwise
1958  */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1959 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1960 {
1961 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1962 	struct zfcp_fsf_req *req;
1963 	unsigned long req_id = 0;
1964 	int retval = -EIO;
1965 
1966 	spin_lock_irq(&qdio->req_q_lock);
1967 	if (zfcp_qdio_sbal_get(qdio))
1968 		goto out;
1969 
1970 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1971 				  SBAL_SFLAGS0_TYPE_READ,
1972 				  qdio->adapter->pool.erp_req);
1973 
1974 	if (IS_ERR(req)) {
1975 		retval = PTR_ERR(req);
1976 		goto out;
1977 	}
1978 
1979 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1980 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1981 
1982 	req->handler = zfcp_fsf_close_wka_port_handler;
1983 	req->data = wka_port;
1984 	req->qtcb->header.port_handle = wka_port->handle;
1985 
1986 	req_id = req->req_id;
1987 
1988 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1989 	retval = zfcp_fsf_req_send(req);
1990 	if (retval)
1991 		zfcp_fsf_req_free(req);
1992 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1993 out:
1994 	spin_unlock_irq(&qdio->req_q_lock);
1995 	if (!retval)
1996 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
1997 	return retval;
1998 }
1999 
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)2000 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
2001 {
2002 	struct zfcp_port *port = req->data;
2003 	struct fsf_qtcb_header *header = &req->qtcb->header;
2004 	struct scsi_device *sdev;
2005 
2006 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2007 		return;
2008 
2009 	switch (header->fsf_status) {
2010 	case FSF_PORT_HANDLE_NOT_VALID:
2011 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
2012 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2013 		break;
2014 	case FSF_PORT_BOXED:
2015 		/* can't use generic zfcp_erp_modify_port_status because
2016 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
2017 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2018 		shost_for_each_device(sdev, port->adapter->scsi_host)
2019 			if (sdev_to_zfcp(sdev)->port == port)
2020 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2021 						  &sdev_to_zfcp(sdev)->status);
2022 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2023 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
2024 				     "fscpph2");
2025 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2026 		break;
2027 	case FSF_ADAPTER_STATUS_AVAILABLE:
2028 		switch (header->fsf_status_qual.word[0]) {
2029 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2030 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2031 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2032 			break;
2033 		}
2034 		break;
2035 	case FSF_GOOD:
2036 		/* can't use generic zfcp_erp_modify_port_status because
2037 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
2038 		 */
2039 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
2040 		shost_for_each_device(sdev, port->adapter->scsi_host)
2041 			if (sdev_to_zfcp(sdev)->port == port)
2042 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
2043 						  &sdev_to_zfcp(sdev)->status);
2044 		break;
2045 	}
2046 }
2047 
2048 /**
2049  * zfcp_fsf_close_physical_port - close physical port
2050  * @erp_action: pointer to struct zfcp_erp_action
2051  * Returns: 0 on success
2052  */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)2053 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
2054 {
2055 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2056 	struct zfcp_fsf_req *req;
2057 	int retval = -EIO;
2058 
2059 	spin_lock_irq(&qdio->req_q_lock);
2060 	if (zfcp_qdio_sbal_get(qdio))
2061 		goto out;
2062 
2063 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
2064 				  SBAL_SFLAGS0_TYPE_READ,
2065 				  qdio->adapter->pool.erp_req);
2066 
2067 	if (IS_ERR(req)) {
2068 		retval = PTR_ERR(req);
2069 		goto out;
2070 	}
2071 
2072 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2073 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2074 
2075 	req->data = erp_action->port;
2076 	req->qtcb->header.port_handle = erp_action->port->handle;
2077 	req->erp_action = erp_action;
2078 	req->handler = zfcp_fsf_close_physical_port_handler;
2079 	erp_action->fsf_req_id = req->req_id;
2080 
2081 	zfcp_fsf_start_erp_timer(req);
2082 	retval = zfcp_fsf_req_send(req);
2083 	if (retval) {
2084 		zfcp_fsf_req_free(req);
2085 		erp_action->fsf_req_id = 0;
2086 	}
2087 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2088 out:
2089 	spin_unlock_irq(&qdio->req_q_lock);
2090 	return retval;
2091 }
2092 
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)2093 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
2094 {
2095 	struct zfcp_adapter *adapter = req->adapter;
2096 	struct scsi_device *sdev = req->data;
2097 	struct zfcp_scsi_dev *zfcp_sdev;
2098 	struct fsf_qtcb_header *header = &req->qtcb->header;
2099 	union fsf_status_qual *qual = &header->fsf_status_qual;
2100 
2101 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2102 		return;
2103 
2104 	zfcp_sdev = sdev_to_zfcp(sdev);
2105 
2106 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
2107 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
2108 			  &zfcp_sdev->status);
2109 
2110 	switch (header->fsf_status) {
2111 
2112 	case FSF_PORT_HANDLE_NOT_VALID:
2113 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
2114 		fallthrough;
2115 	case FSF_LUN_ALREADY_OPEN:
2116 		break;
2117 	case FSF_PORT_BOXED:
2118 		zfcp_erp_set_port_status(zfcp_sdev->port,
2119 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2120 		zfcp_erp_port_reopen(zfcp_sdev->port,
2121 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
2122 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2123 		break;
2124 	case FSF_LUN_SHARING_VIOLATION:
2125 		if (qual->word[0])
2126 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
2127 				 "LUN 0x%016Lx on port 0x%016Lx is already in "
2128 				 "use by CSS%d, MIF Image ID %x\n",
2129 				 zfcp_scsi_dev_lun(sdev),
2130 				 (unsigned long long)zfcp_sdev->port->wwpn,
2131 				 qual->fsf_queue_designator.cssid,
2132 				 qual->fsf_queue_designator.hla);
2133 		zfcp_erp_set_lun_status(sdev,
2134 					ZFCP_STATUS_COMMON_ERP_FAILED |
2135 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
2136 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2137 		break;
2138 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
2139 		dev_warn(&adapter->ccw_device->dev,
2140 			 "No handle is available for LUN "
2141 			 "0x%016Lx on port 0x%016Lx\n",
2142 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2143 			 (unsigned long long)zfcp_sdev->port->wwpn);
2144 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
2145 		fallthrough;
2146 	case FSF_INVALID_COMMAND_OPTION:
2147 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2148 		break;
2149 	case FSF_ADAPTER_STATUS_AVAILABLE:
2150 		switch (header->fsf_status_qual.word[0]) {
2151 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2152 			zfcp_fc_test_link(zfcp_sdev->port);
2153 			fallthrough;
2154 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2155 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2156 			break;
2157 		}
2158 		break;
2159 
2160 	case FSF_GOOD:
2161 		zfcp_sdev->lun_handle = header->lun_handle;
2162 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2163 		break;
2164 	}
2165 }
2166 
2167 /**
2168  * zfcp_fsf_open_lun - open LUN
2169  * @erp_action: pointer to struct zfcp_erp_action
2170  * Returns: 0 on success, error otherwise
2171  */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)2172 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
2173 {
2174 	struct zfcp_adapter *adapter = erp_action->adapter;
2175 	struct zfcp_qdio *qdio = adapter->qdio;
2176 	struct zfcp_fsf_req *req;
2177 	int retval = -EIO;
2178 
2179 	spin_lock_irq(&qdio->req_q_lock);
2180 	if (zfcp_qdio_sbal_get(qdio))
2181 		goto out;
2182 
2183 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
2184 				  SBAL_SFLAGS0_TYPE_READ,
2185 				  adapter->pool.erp_req);
2186 
2187 	if (IS_ERR(req)) {
2188 		retval = PTR_ERR(req);
2189 		goto out;
2190 	}
2191 
2192 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2193 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2194 
2195 	req->qtcb->header.port_handle = erp_action->port->handle;
2196 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
2197 	req->handler = zfcp_fsf_open_lun_handler;
2198 	req->data = erp_action->sdev;
2199 	req->erp_action = erp_action;
2200 	erp_action->fsf_req_id = req->req_id;
2201 
2202 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2203 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
2204 
2205 	zfcp_fsf_start_erp_timer(req);
2206 	retval = zfcp_fsf_req_send(req);
2207 	if (retval) {
2208 		zfcp_fsf_req_free(req);
2209 		erp_action->fsf_req_id = 0;
2210 	}
2211 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2212 out:
2213 	spin_unlock_irq(&qdio->req_q_lock);
2214 	return retval;
2215 }
2216 
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)2217 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
2218 {
2219 	struct scsi_device *sdev = req->data;
2220 	struct zfcp_scsi_dev *zfcp_sdev;
2221 
2222 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2223 		return;
2224 
2225 	zfcp_sdev = sdev_to_zfcp(sdev);
2226 
2227 	switch (req->qtcb->header.fsf_status) {
2228 	case FSF_PORT_HANDLE_NOT_VALID:
2229 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
2230 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2231 		break;
2232 	case FSF_LUN_HANDLE_NOT_VALID:
2233 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
2234 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2235 		break;
2236 	case FSF_PORT_BOXED:
2237 		zfcp_erp_set_port_status(zfcp_sdev->port,
2238 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2239 		zfcp_erp_port_reopen(zfcp_sdev->port,
2240 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
2241 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2242 		break;
2243 	case FSF_ADAPTER_STATUS_AVAILABLE:
2244 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
2245 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2246 			zfcp_fc_test_link(zfcp_sdev->port);
2247 			fallthrough;
2248 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2249 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2250 			break;
2251 		}
2252 		break;
2253 	case FSF_GOOD:
2254 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
2255 		break;
2256 	}
2257 }
2258 
2259 /**
2260  * zfcp_fsf_close_LUN - close LUN
2261  * @erp_action: pointer to erp_action triggering the "close LUN"
2262  * Returns: 0 on success, error otherwise
2263  */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)2264 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
2265 {
2266 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2267 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
2268 	struct zfcp_fsf_req *req;
2269 	int retval = -EIO;
2270 
2271 	spin_lock_irq(&qdio->req_q_lock);
2272 	if (zfcp_qdio_sbal_get(qdio))
2273 		goto out;
2274 
2275 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2276 				  SBAL_SFLAGS0_TYPE_READ,
2277 				  qdio->adapter->pool.erp_req);
2278 
2279 	if (IS_ERR(req)) {
2280 		retval = PTR_ERR(req);
2281 		goto out;
2282 	}
2283 
2284 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2285 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2286 
2287 	req->qtcb->header.port_handle = erp_action->port->handle;
2288 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2289 	req->handler = zfcp_fsf_close_lun_handler;
2290 	req->data = erp_action->sdev;
2291 	req->erp_action = erp_action;
2292 	erp_action->fsf_req_id = req->req_id;
2293 
2294 	zfcp_fsf_start_erp_timer(req);
2295 	retval = zfcp_fsf_req_send(req);
2296 	if (retval) {
2297 		zfcp_fsf_req_free(req);
2298 		erp_action->fsf_req_id = 0;
2299 	}
2300 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2301 out:
2302 	spin_unlock_irq(&qdio->req_q_lock);
2303 	return retval;
2304 }
2305 
zfcp_fsf_update_lat(struct zfcp_latency_record * lat_rec,u32 lat)2306 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
2307 {
2308 	lat_rec->sum += lat;
2309 	lat_rec->min = min(lat_rec->min, lat);
2310 	lat_rec->max = max(lat_rec->max, lat);
2311 }
2312 
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2313 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2314 {
2315 	struct fsf_qual_latency_info *lat_in;
2316 	struct zfcp_latency_cont *lat = NULL;
2317 	struct zfcp_scsi_dev *zfcp_sdev;
2318 	struct zfcp_blk_drv_data blktrc;
2319 	int ticks = req->adapter->timer_ticks;
2320 
2321 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2322 
2323 	blktrc.flags = 0;
2324 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2325 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2326 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2327 	blktrc.inb_usage = 0;
2328 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2329 
2330 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2331 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2332 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2333 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2334 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2335 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2336 
2337 		switch (req->qtcb->bottom.io.data_direction) {
2338 		case FSF_DATADIR_DIF_READ_STRIP:
2339 		case FSF_DATADIR_DIF_READ_CONVERT:
2340 		case FSF_DATADIR_READ:
2341 			lat = &zfcp_sdev->latencies.read;
2342 			break;
2343 		case FSF_DATADIR_DIF_WRITE_INSERT:
2344 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2345 		case FSF_DATADIR_WRITE:
2346 			lat = &zfcp_sdev->latencies.write;
2347 			break;
2348 		case FSF_DATADIR_CMND:
2349 			lat = &zfcp_sdev->latencies.cmd;
2350 			break;
2351 		}
2352 
2353 		if (lat) {
2354 			spin_lock(&zfcp_sdev->latencies.lock);
2355 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2356 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2357 			lat->counter++;
2358 			spin_unlock(&zfcp_sdev->latencies.lock);
2359 		}
2360 	}
2361 
2362 	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2363 			    sizeof(blktrc));
2364 }
2365 
2366 /**
2367  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2368  * @req: Pointer to FSF request.
2369  * @sdev: Pointer to SCSI device as request context.
2370  */
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req,struct scsi_device * sdev)2371 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2372 					struct scsi_device *sdev)
2373 {
2374 	struct zfcp_scsi_dev *zfcp_sdev;
2375 	struct fsf_qtcb_header *header = &req->qtcb->header;
2376 
2377 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2378 		return;
2379 
2380 	zfcp_sdev = sdev_to_zfcp(sdev);
2381 
2382 	switch (header->fsf_status) {
2383 	case FSF_HANDLE_MISMATCH:
2384 	case FSF_PORT_HANDLE_NOT_VALID:
2385 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2386 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2387 		break;
2388 	case FSF_FCPLUN_NOT_VALID:
2389 	case FSF_LUN_HANDLE_NOT_VALID:
2390 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2391 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2392 		break;
2393 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2394 		zfcp_fsf_class_not_supp(req);
2395 		break;
2396 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2397 		dev_err(&req->adapter->ccw_device->dev,
2398 			"Incorrect direction %d, LUN 0x%016Lx on port "
2399 			"0x%016Lx closed\n",
2400 			req->qtcb->bottom.io.data_direction,
2401 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2402 			(unsigned long long)zfcp_sdev->port->wwpn);
2403 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2404 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2405 		break;
2406 	case FSF_CMND_LENGTH_NOT_VALID:
2407 		dev_err(&req->adapter->ccw_device->dev,
2408 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2409 			req->qtcb->bottom.io.fcp_cmnd_length);
2410 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2411 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2412 		break;
2413 	case FSF_PORT_BOXED:
2414 		zfcp_erp_set_port_status(zfcp_sdev->port,
2415 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2416 		zfcp_erp_port_reopen(zfcp_sdev->port,
2417 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2418 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2419 		break;
2420 	case FSF_LUN_BOXED:
2421 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2422 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2423 				    "fssfch6");
2424 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2425 		break;
2426 	case FSF_ADAPTER_STATUS_AVAILABLE:
2427 		if (header->fsf_status_qual.word[0] ==
2428 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2429 			zfcp_fc_test_link(zfcp_sdev->port);
2430 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2431 		break;
2432 	case FSF_SECURITY_ERROR:
2433 		zfcp_fsf_log_security_error(&req->adapter->ccw_device->dev,
2434 					    header->fsf_status_qual.word[0],
2435 					    zfcp_sdev->port->wwpn);
2436 		zfcp_erp_port_forced_reopen(zfcp_sdev->port, 0, "fssfch7");
2437 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2438 		break;
2439 	}
2440 }
2441 
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2442 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2443 {
2444 	struct scsi_cmnd *scpnt;
2445 	struct fcp_resp_with_ext *fcp_rsp;
2446 	unsigned long flags;
2447 
2448 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2449 
2450 	scpnt = req->data;
2451 	if (unlikely(!scpnt)) {
2452 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2453 		return;
2454 	}
2455 
2456 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2457 
2458 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2459 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2460 		goto skip_fsfstatus;
2461 	}
2462 
2463 	switch (req->qtcb->header.fsf_status) {
2464 	case FSF_INCONSISTENT_PROT_DATA:
2465 	case FSF_INVALID_PROT_PARM:
2466 		set_host_byte(scpnt, DID_ERROR);
2467 		goto skip_fsfstatus;
2468 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2469 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2470 		goto skip_fsfstatus;
2471 	case FSF_APP_TAG_CHECK_FAILURE:
2472 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2473 		goto skip_fsfstatus;
2474 	case FSF_REF_TAG_CHECK_FAILURE:
2475 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2476 		goto skip_fsfstatus;
2477 	}
2478 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2479 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2480 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2481 
2482 skip_fsfstatus:
2483 	zfcp_fsf_req_trace(req, scpnt);
2484 	zfcp_dbf_scsi_result(scpnt, req);
2485 
2486 	scpnt->host_scribble = NULL;
2487 	(scpnt->scsi_done) (scpnt);
2488 	/*
2489 	 * We must hold this lock until scsi_done has been called.
2490 	 * Otherwise we may call scsi_done after abort regarding this
2491 	 * command has completed.
2492 	 * Note: scsi_done must not block!
2493 	 */
2494 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2495 }
2496 
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2497 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2498 {
2499 	switch (scsi_get_prot_op(scsi_cmnd)) {
2500 	case SCSI_PROT_NORMAL:
2501 		switch (scsi_cmnd->sc_data_direction) {
2502 		case DMA_NONE:
2503 			*data_dir = FSF_DATADIR_CMND;
2504 			break;
2505 		case DMA_FROM_DEVICE:
2506 			*data_dir = FSF_DATADIR_READ;
2507 			break;
2508 		case DMA_TO_DEVICE:
2509 			*data_dir = FSF_DATADIR_WRITE;
2510 			break;
2511 		case DMA_BIDIRECTIONAL:
2512 			return -EINVAL;
2513 		}
2514 		break;
2515 
2516 	case SCSI_PROT_READ_STRIP:
2517 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2518 		break;
2519 	case SCSI_PROT_WRITE_INSERT:
2520 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2521 		break;
2522 	case SCSI_PROT_READ_PASS:
2523 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2524 		break;
2525 	case SCSI_PROT_WRITE_PASS:
2526 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2527 		break;
2528 	default:
2529 		return -EINVAL;
2530 	}
2531 
2532 	return 0;
2533 }
2534 
2535 /**
2536  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2537  * @scsi_cmnd: scsi command to be sent
2538  */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2539 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2540 {
2541 	struct zfcp_fsf_req *req;
2542 	struct fcp_cmnd *fcp_cmnd;
2543 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2544 	int retval = -EIO;
2545 	struct scsi_device *sdev = scsi_cmnd->device;
2546 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2547 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2548 	struct zfcp_qdio *qdio = adapter->qdio;
2549 	struct fsf_qtcb_bottom_io *io;
2550 	unsigned long flags;
2551 
2552 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2553 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2554 		return -EBUSY;
2555 
2556 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2557 	if (atomic_read(&qdio->req_q_free) <= 0) {
2558 		atomic_inc(&qdio->req_q_full);
2559 		goto out;
2560 	}
2561 
2562 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2563 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2564 
2565 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2566 				  sbtype, adapter->pool.scsi_req);
2567 
2568 	if (IS_ERR(req)) {
2569 		retval = PTR_ERR(req);
2570 		goto out;
2571 	}
2572 
2573 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2574 
2575 	io = &req->qtcb->bottom.io;
2576 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2577 	req->data = scsi_cmnd;
2578 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2579 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2580 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2581 	io->service_class = FSF_CLASS_3;
2582 	io->fcp_cmnd_length = FCP_CMND_LEN;
2583 
2584 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2585 		io->data_block_length = scsi_cmnd->device->sector_size;
2586 		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2587 	}
2588 
2589 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2590 		goto failed_scsi_cmnd;
2591 
2592 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2593 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2594 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2595 
2596 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2597 	    scsi_prot_sg_count(scsi_cmnd)) {
2598 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2599 				       scsi_prot_sg_count(scsi_cmnd));
2600 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2601 						 scsi_prot_sglist(scsi_cmnd));
2602 		if (retval)
2603 			goto failed_scsi_cmnd;
2604 		io->prot_data_length = zfcp_qdio_real_bytes(
2605 						scsi_prot_sglist(scsi_cmnd));
2606 	}
2607 
2608 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2609 					 scsi_sglist(scsi_cmnd));
2610 	if (unlikely(retval))
2611 		goto failed_scsi_cmnd;
2612 
2613 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2614 	if (zfcp_adapter_multi_buffer_active(adapter))
2615 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2616 
2617 	retval = zfcp_fsf_req_send(req);
2618 	if (unlikely(retval))
2619 		goto failed_scsi_cmnd;
2620 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2621 
2622 	goto out;
2623 
2624 failed_scsi_cmnd:
2625 	zfcp_fsf_req_free(req);
2626 	scsi_cmnd->host_scribble = NULL;
2627 out:
2628 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2629 	return retval;
2630 }
2631 
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2632 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2633 {
2634 	struct scsi_device *sdev = req->data;
2635 	struct fcp_resp_with_ext *fcp_rsp;
2636 	struct fcp_resp_rsp_info *rsp_info;
2637 
2638 	zfcp_fsf_fcp_handler_common(req, sdev);
2639 
2640 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2641 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2642 
2643 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2644 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2645 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2646 }
2647 
2648 /**
2649  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2650  * @sdev: Pointer to SCSI device to send the task management command to.
2651  * @tm_flags: Unsigned byte for task management flags.
2652  *
2653  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2654  */
zfcp_fsf_fcp_task_mgmt(struct scsi_device * sdev,u8 tm_flags)2655 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2656 					    u8 tm_flags)
2657 {
2658 	struct zfcp_fsf_req *req = NULL;
2659 	struct fcp_cmnd *fcp_cmnd;
2660 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2661 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2662 
2663 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2664 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2665 		return NULL;
2666 
2667 	spin_lock_irq(&qdio->req_q_lock);
2668 	if (zfcp_qdio_sbal_get(qdio))
2669 		goto out;
2670 
2671 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2672 				  SBAL_SFLAGS0_TYPE_WRITE,
2673 				  qdio->adapter->pool.scsi_req);
2674 
2675 	if (IS_ERR(req)) {
2676 		req = NULL;
2677 		goto out;
2678 	}
2679 
2680 	req->data = sdev;
2681 
2682 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2683 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2684 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2685 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2686 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2687 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2688 
2689 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2690 
2691 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2692 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2693 
2694 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2695 	if (!zfcp_fsf_req_send(req)) {
2696 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
2697 		goto out;
2698 	}
2699 
2700 	zfcp_fsf_req_free(req);
2701 	req = NULL;
2702 out:
2703 	spin_unlock_irq(&qdio->req_q_lock);
2704 	return req;
2705 }
2706 
2707 /**
2708  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2709  * @qdio: pointer to struct zfcp_qdio
2710  * @sbal_idx: response queue index of SBAL to be processed
2711  */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2712 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2713 {
2714 	struct zfcp_adapter *adapter = qdio->adapter;
2715 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2716 	struct qdio_buffer_element *sbale;
2717 	struct zfcp_fsf_req *fsf_req;
2718 	unsigned long req_id;
2719 	int idx;
2720 
2721 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2722 
2723 		sbale = &sbal->element[idx];
2724 		req_id = sbale->addr;
2725 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2726 
2727 		if (!fsf_req) {
2728 			/*
2729 			 * Unknown request means that we have potentially memory
2730 			 * corruption and must stop the machine immediately.
2731 			 */
2732 			zfcp_qdio_siosl(adapter);
2733 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2734 			      req_id, dev_name(&adapter->ccw_device->dev));
2735 		}
2736 
2737 		zfcp_fsf_req_complete(fsf_req);
2738 
2739 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2740 			break;
2741 	}
2742 }
2743