1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2018
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/blktrace_api.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <scsi/fc/fc_els.h>
17 #include "zfcp_ext.h"
18 #include "zfcp_fc.h"
19 #include "zfcp_dbf.h"
20 #include "zfcp_qdio.h"
21 #include "zfcp_reqlist.h"
22 
23 /* timeout for FSF requests sent during scsi_eh: abort or FCP TMF */
24 #define ZFCP_FSF_SCSI_ER_TIMEOUT (10*HZ)
25 /* timeout for: exchange config/port data outside ERP, or open/close WKA port */
26 #define ZFCP_FSF_REQUEST_TIMEOUT (60*HZ)
27 
28 struct kmem_cache *zfcp_fsf_qtcb_cache;
29 
30 static bool ber_stop = true;
31 module_param(ber_stop, bool, 0600);
32 MODULE_PARM_DESC(ber_stop,
33 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
34 
zfcp_fsf_request_timeout_handler(struct timer_list * t)35 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
36 {
37 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
38 	struct zfcp_adapter *adapter = fsf_req->adapter;
39 
40 	zfcp_qdio_siosl(adapter);
41 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
42 				"fsrth_1");
43 }
44 
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)45 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
46 				 unsigned long timeout)
47 {
48 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
49 	fsf_req->timer.expires = jiffies + timeout;
50 	add_timer(&fsf_req->timer);
51 }
52 
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)53 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
54 {
55 	BUG_ON(!fsf_req->erp_action);
56 	fsf_req->timer.function = zfcp_erp_timeout_handler;
57 	fsf_req->timer.expires = jiffies + 30 * HZ;
58 	add_timer(&fsf_req->timer);
59 }
60 
61 /* association between FSF command and FSF QTCB type */
62 static u32 fsf_qtcb_type[] = {
63 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
64 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
65 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
66 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
67 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
68 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
70 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
71 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
72 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
73 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
74 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
75 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
76 };
77 
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)78 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
79 {
80 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
81 		"operational because of an unsupported FC class\n");
82 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
83 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
84 }
85 
86 /**
87  * zfcp_fsf_req_free - free memory used by fsf request
88  * @req: pointer to struct zfcp_fsf_req
89  */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)90 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
91 {
92 	if (likely(req->pool)) {
93 		if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
94 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
95 		mempool_free(req, req->pool);
96 		return;
97 	}
98 
99 	if (likely(!zfcp_fsf_req_is_status_read_buffer(req)))
100 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
101 	kfree(req);
102 }
103 
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)104 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
105 {
106 	unsigned long flags;
107 	struct fsf_status_read_buffer *sr_buf = req->data;
108 	struct zfcp_adapter *adapter = req->adapter;
109 	struct zfcp_port *port;
110 	int d_id = ntoh24(sr_buf->d_id);
111 
112 	read_lock_irqsave(&adapter->port_list_lock, flags);
113 	list_for_each_entry(port, &adapter->port_list, list)
114 		if (port->d_id == d_id) {
115 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
116 			break;
117 		}
118 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
119 }
120 
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)121 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
122 					 struct fsf_link_down_info *link_down)
123 {
124 	struct zfcp_adapter *adapter = req->adapter;
125 
126 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
127 		return;
128 
129 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
130 
131 	zfcp_scsi_schedule_rports_block(adapter);
132 
133 	if (!link_down)
134 		goto out;
135 
136 	switch (link_down->error_code) {
137 	case FSF_PSQ_LINK_NO_LIGHT:
138 		dev_warn(&req->adapter->ccw_device->dev,
139 			 "There is no light signal from the local "
140 			 "fibre channel cable\n");
141 		break;
142 	case FSF_PSQ_LINK_WRAP_PLUG:
143 		dev_warn(&req->adapter->ccw_device->dev,
144 			 "There is a wrap plug instead of a fibre "
145 			 "channel cable\n");
146 		break;
147 	case FSF_PSQ_LINK_NO_FCP:
148 		dev_warn(&req->adapter->ccw_device->dev,
149 			 "The adjacent fibre channel node does not "
150 			 "support FCP\n");
151 		break;
152 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
153 		dev_warn(&req->adapter->ccw_device->dev,
154 			 "The FCP device is suspended because of a "
155 			 "firmware update\n");
156 		break;
157 	case FSF_PSQ_LINK_INVALID_WWPN:
158 		dev_warn(&req->adapter->ccw_device->dev,
159 			 "The FCP device detected a WWPN that is "
160 			 "duplicate or not valid\n");
161 		break;
162 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
163 		dev_warn(&req->adapter->ccw_device->dev,
164 			 "The fibre channel fabric does not support NPIV\n");
165 		break;
166 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
167 		dev_warn(&req->adapter->ccw_device->dev,
168 			 "The FCP adapter cannot support more NPIV ports\n");
169 		break;
170 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
171 		dev_warn(&req->adapter->ccw_device->dev,
172 			 "The adjacent switch cannot support "
173 			 "more NPIV ports\n");
174 		break;
175 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
176 		dev_warn(&req->adapter->ccw_device->dev,
177 			 "The FCP adapter could not log in to the "
178 			 "fibre channel fabric\n");
179 		break;
180 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
181 		dev_warn(&req->adapter->ccw_device->dev,
182 			 "The WWPN assignment file on the FCP adapter "
183 			 "has been damaged\n");
184 		break;
185 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
186 		dev_warn(&req->adapter->ccw_device->dev,
187 			 "The mode table on the FCP adapter "
188 			 "has been damaged\n");
189 		break;
190 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
191 		dev_warn(&req->adapter->ccw_device->dev,
192 			 "All NPIV ports on the FCP adapter have "
193 			 "been assigned\n");
194 		break;
195 	default:
196 		dev_warn(&req->adapter->ccw_device->dev,
197 			 "The link between the FCP adapter and "
198 			 "the FC fabric is down\n");
199 	}
200 out:
201 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
202 }
203 
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)204 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
205 {
206 	struct fsf_status_read_buffer *sr_buf = req->data;
207 	struct fsf_link_down_info *ldi =
208 		(struct fsf_link_down_info *) &sr_buf->payload;
209 
210 	switch (sr_buf->status_subtype) {
211 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
212 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
213 		zfcp_fsf_link_down_info_eval(req, ldi);
214 		break;
215 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
216 		zfcp_fsf_link_down_info_eval(req, NULL);
217 	}
218 }
219 
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)220 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
221 {
222 	struct zfcp_adapter *adapter = req->adapter;
223 	struct fsf_status_read_buffer *sr_buf = req->data;
224 
225 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
226 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
227 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
228 		zfcp_fsf_req_free(req);
229 		return;
230 	}
231 
232 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
233 
234 	switch (sr_buf->status_type) {
235 	case FSF_STATUS_READ_PORT_CLOSED:
236 		zfcp_fsf_status_read_port_closed(req);
237 		break;
238 	case FSF_STATUS_READ_INCOMING_ELS:
239 		zfcp_fc_incoming_els(req);
240 		break;
241 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
242 		break;
243 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
244 		zfcp_dbf_hba_bit_err("fssrh_3", req);
245 		if (ber_stop) {
246 			dev_warn(&adapter->ccw_device->dev,
247 				 "All paths over this FCP device are disused because of excessive bit errors\n");
248 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
249 		} else {
250 			dev_warn(&adapter->ccw_device->dev,
251 				 "The error threshold for checksum statistics has been exceeded\n");
252 		}
253 		break;
254 	case FSF_STATUS_READ_LINK_DOWN:
255 		zfcp_fsf_status_read_link_down(req);
256 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
257 		break;
258 	case FSF_STATUS_READ_LINK_UP:
259 		dev_info(&adapter->ccw_device->dev,
260 			 "The local link has been restored\n");
261 		/* All ports should be marked as ready to run again */
262 		zfcp_erp_set_adapter_status(adapter,
263 					    ZFCP_STATUS_COMMON_RUNNING);
264 		zfcp_erp_adapter_reopen(adapter,
265 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
266 					ZFCP_STATUS_COMMON_ERP_FAILED,
267 					"fssrh_2");
268 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
269 
270 		break;
271 	case FSF_STATUS_READ_NOTIFICATION_LOST:
272 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
273 			zfcp_fc_conditional_port_scan(adapter);
274 		break;
275 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
276 		adapter->adapter_features = sr_buf->payload.word[0];
277 		break;
278 	}
279 
280 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
281 	zfcp_fsf_req_free(req);
282 
283 	atomic_inc(&adapter->stat_miss);
284 	queue_work(adapter->work_queue, &adapter->stat_work);
285 }
286 
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)287 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
288 {
289 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
290 	case FSF_SQ_FCP_RSP_AVAILABLE:
291 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
292 	case FSF_SQ_NO_RETRY_POSSIBLE:
293 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
294 		return;
295 	case FSF_SQ_COMMAND_ABORTED:
296 		break;
297 	case FSF_SQ_NO_RECOM:
298 		dev_err(&req->adapter->ccw_device->dev,
299 			"The FCP adapter reported a problem "
300 			"that cannot be recovered\n");
301 		zfcp_qdio_siosl(req->adapter);
302 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
303 		break;
304 	}
305 	/* all non-return stats set FSFREQ_ERROR*/
306 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
307 }
308 
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)309 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
310 {
311 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
312 		return;
313 
314 	switch (req->qtcb->header.fsf_status) {
315 	case FSF_UNKNOWN_COMMAND:
316 		dev_err(&req->adapter->ccw_device->dev,
317 			"The FCP adapter does not recognize the command 0x%x\n",
318 			req->qtcb->header.fsf_command);
319 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
320 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
321 		break;
322 	case FSF_ADAPTER_STATUS_AVAILABLE:
323 		zfcp_fsf_fsfstatus_qual_eval(req);
324 		break;
325 	}
326 }
327 
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)328 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
329 {
330 	struct zfcp_adapter *adapter = req->adapter;
331 	struct fsf_qtcb *qtcb = req->qtcb;
332 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
333 
334 	zfcp_dbf_hba_fsf_response(req);
335 
336 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
337 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
338 		return;
339 	}
340 
341 	switch (qtcb->prefix.prot_status) {
342 	case FSF_PROT_GOOD:
343 	case FSF_PROT_FSF_STATUS_PRESENTED:
344 		return;
345 	case FSF_PROT_QTCB_VERSION_ERROR:
346 		dev_err(&adapter->ccw_device->dev,
347 			"QTCB version 0x%x not supported by FCP adapter "
348 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
349 			psq->word[0], psq->word[1]);
350 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
351 		break;
352 	case FSF_PROT_ERROR_STATE:
353 	case FSF_PROT_SEQ_NUMB_ERROR:
354 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
355 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
356 		break;
357 	case FSF_PROT_UNSUPP_QTCB_TYPE:
358 		dev_err(&adapter->ccw_device->dev,
359 			"The QTCB type is not supported by the FCP adapter\n");
360 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
361 		break;
362 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
363 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
364 				&adapter->status);
365 		break;
366 	case FSF_PROT_DUPLICATE_REQUEST_ID:
367 		dev_err(&adapter->ccw_device->dev,
368 			"0x%Lx is an ambiguous request identifier\n",
369 			(unsigned long long)qtcb->bottom.support.req_handle);
370 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
371 		break;
372 	case FSF_PROT_LINK_DOWN:
373 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
374 		/* go through reopen to flush pending requests */
375 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
376 		break;
377 	case FSF_PROT_REEST_QUEUE:
378 		/* All ports should be marked as ready to run again */
379 		zfcp_erp_set_adapter_status(adapter,
380 					    ZFCP_STATUS_COMMON_RUNNING);
381 		zfcp_erp_adapter_reopen(adapter,
382 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
383 					ZFCP_STATUS_COMMON_ERP_FAILED,
384 					"fspse_8");
385 		break;
386 	default:
387 		dev_err(&adapter->ccw_device->dev,
388 			"0x%x is not a valid transfer protocol status\n",
389 			qtcb->prefix.prot_status);
390 		zfcp_qdio_siosl(adapter);
391 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
392 	}
393 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
394 }
395 
396 /**
397  * zfcp_fsf_req_complete - process completion of a FSF request
398  * @req: The FSF request that has been completed.
399  *
400  * When a request has been completed either from the FCP adapter,
401  * or it has been dismissed due to a queue shutdown, this function
402  * is called to process the completion status and trigger further
403  * events related to the FSF request.
404  */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)405 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
406 {
407 	if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
408 		zfcp_fsf_status_read_handler(req);
409 		return;
410 	}
411 
412 	del_timer(&req->timer);
413 	zfcp_fsf_protstatus_eval(req);
414 	zfcp_fsf_fsfstatus_eval(req);
415 	req->handler(req);
416 
417 	if (req->erp_action)
418 		zfcp_erp_notify(req->erp_action, 0);
419 
420 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
421 		zfcp_fsf_req_free(req);
422 	else
423 		complete(&req->completion);
424 }
425 
426 /**
427  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
428  * @adapter: pointer to struct zfcp_adapter
429  *
430  * Never ever call this without shutting down the adapter first.
431  * Otherwise the adapter would continue using and corrupting s390 storage.
432  * Included BUG_ON() call to ensure this is done.
433  * ERP is supposed to be the only user of this function.
434  */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)435 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
436 {
437 	struct zfcp_fsf_req *req, *tmp;
438 	LIST_HEAD(remove_queue);
439 
440 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
441 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
442 
443 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
444 		list_del(&req->list);
445 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
446 		zfcp_fsf_req_complete(req);
447 	}
448 }
449 
450 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
451 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
452 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
453 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
454 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
455 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
456 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
457 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
458 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
459 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
460 
zfcp_fsf_convert_portspeed(u32 fsf_speed)461 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
462 {
463 	u32 fdmi_speed = 0;
464 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
465 		fdmi_speed |= FC_PORTSPEED_1GBIT;
466 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
467 		fdmi_speed |= FC_PORTSPEED_2GBIT;
468 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
469 		fdmi_speed |= FC_PORTSPEED_4GBIT;
470 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
471 		fdmi_speed |= FC_PORTSPEED_10GBIT;
472 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
473 		fdmi_speed |= FC_PORTSPEED_8GBIT;
474 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
475 		fdmi_speed |= FC_PORTSPEED_16GBIT;
476 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
477 		fdmi_speed |= FC_PORTSPEED_32GBIT;
478 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
479 		fdmi_speed |= FC_PORTSPEED_64GBIT;
480 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
481 		fdmi_speed |= FC_PORTSPEED_128GBIT;
482 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
483 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
484 	return fdmi_speed;
485 }
486 
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)487 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
488 {
489 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
490 	struct zfcp_adapter *adapter = req->adapter;
491 	struct Scsi_Host *shost = adapter->scsi_host;
492 	struct fc_els_flogi *nsp, *plogi;
493 
494 	/* adjust pointers for missing command code */
495 	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
496 					- sizeof(u32));
497 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
498 					- sizeof(u32));
499 
500 	if (req->data)
501 		memcpy(req->data, bottom, sizeof(*bottom));
502 
503 	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
504 	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
505 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
506 
507 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
508 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
509 					 (u16)FSF_STATUS_READS_RECOM);
510 
511 	if (fc_host_permanent_port_name(shost) == -1)
512 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
513 
514 	zfcp_scsi_set_prot(adapter);
515 
516 	/* no error return above here, otherwise must fix call chains */
517 	/* do not evaluate invalid fields */
518 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
519 		return 0;
520 
521 	fc_host_port_id(shost) = ntoh24(bottom->s_id);
522 	fc_host_speed(shost) =
523 		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
524 
525 	adapter->hydra_version = bottom->adapter_type;
526 
527 	switch (bottom->fc_topology) {
528 	case FSF_TOPO_P2P:
529 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
530 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
531 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
532 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
533 		break;
534 	case FSF_TOPO_FABRIC:
535 		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
536 			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
537 		else
538 			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
539 		break;
540 	case FSF_TOPO_AL:
541 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
542 		/* fall through */
543 	default:
544 		dev_err(&adapter->ccw_device->dev,
545 			"Unknown or unsupported arbitrated loop "
546 			"fibre channel topology detected\n");
547 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
548 		return -EIO;
549 	}
550 
551 	return 0;
552 }
553 
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)554 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
555 {
556 	struct zfcp_adapter *adapter = req->adapter;
557 	struct fsf_qtcb *qtcb = req->qtcb;
558 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
559 	struct Scsi_Host *shost = adapter->scsi_host;
560 
561 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
562 		return;
563 
564 	adapter->fsf_lic_version = bottom->lic_version;
565 	adapter->adapter_features = bottom->adapter_features;
566 	adapter->connection_features = bottom->connection_features;
567 	adapter->peer_wwpn = 0;
568 	adapter->peer_wwnn = 0;
569 	adapter->peer_d_id = 0;
570 
571 	switch (qtcb->header.fsf_status) {
572 	case FSF_GOOD:
573 		if (zfcp_fsf_exchange_config_evaluate(req))
574 			return;
575 
576 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
577 			dev_err(&adapter->ccw_device->dev,
578 				"FCP adapter maximum QTCB size (%d bytes) "
579 				"is too small\n",
580 				bottom->max_qtcb_size);
581 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
582 			return;
583 		}
584 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
585 				&adapter->status);
586 		break;
587 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
588 		fc_host_node_name(shost) = 0;
589 		fc_host_port_name(shost) = 0;
590 		fc_host_port_id(shost) = 0;
591 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
592 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
593 		adapter->hydra_version = 0;
594 
595 		/* avoids adapter shutdown to be able to recognize
596 		 * events such as LINK UP */
597 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
598 				&adapter->status);
599 		zfcp_fsf_link_down_info_eval(req,
600 			&qtcb->header.fsf_status_qual.link_down_info);
601 		if (zfcp_fsf_exchange_config_evaluate(req))
602 			return;
603 		break;
604 	default:
605 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
606 		return;
607 	}
608 
609 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
610 		adapter->hardware_version = bottom->hardware_version;
611 		memcpy(fc_host_serial_number(shost), bottom->serial_number,
612 		       min(FC_SERIAL_NUMBER_SIZE, 17));
613 		EBCASC(fc_host_serial_number(shost),
614 		       min(FC_SERIAL_NUMBER_SIZE, 17));
615 	}
616 
617 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
618 		dev_err(&adapter->ccw_device->dev,
619 			"The FCP adapter only supports newer "
620 			"control block versions\n");
621 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
622 		return;
623 	}
624 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
625 		dev_err(&adapter->ccw_device->dev,
626 			"The FCP adapter only supports older "
627 			"control block versions\n");
628 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
629 	}
630 }
631 
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)632 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
633 {
634 	struct zfcp_adapter *adapter = req->adapter;
635 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
636 	struct Scsi_Host *shost = adapter->scsi_host;
637 
638 	if (req->data)
639 		memcpy(req->data, bottom, sizeof(*bottom));
640 
641 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
642 		fc_host_permanent_port_name(shost) = bottom->wwpn;
643 	} else
644 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
645 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
646 	fc_host_supported_speeds(shost) =
647 		zfcp_fsf_convert_portspeed(bottom->supported_speed);
648 	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
649 	       FC_FC4_LIST_SIZE);
650 	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
651 	       FC_FC4_LIST_SIZE);
652 }
653 
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)654 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
655 {
656 	struct fsf_qtcb *qtcb = req->qtcb;
657 
658 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
659 		return;
660 
661 	switch (qtcb->header.fsf_status) {
662 	case FSF_GOOD:
663 		zfcp_fsf_exchange_port_evaluate(req);
664 		break;
665 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
666 		zfcp_fsf_exchange_port_evaluate(req);
667 		zfcp_fsf_link_down_info_eval(req,
668 			&qtcb->header.fsf_status_qual.link_down_info);
669 		break;
670 	}
671 }
672 
zfcp_fsf_alloc(mempool_t * pool)673 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
674 {
675 	struct zfcp_fsf_req *req;
676 
677 	if (likely(pool))
678 		req = mempool_alloc(pool, GFP_ATOMIC);
679 	else
680 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
681 
682 	if (unlikely(!req))
683 		return NULL;
684 
685 	memset(req, 0, sizeof(*req));
686 	req->pool = pool;
687 	return req;
688 }
689 
zfcp_fsf_qtcb_alloc(mempool_t * pool)690 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
691 {
692 	struct fsf_qtcb *qtcb;
693 
694 	if (likely(pool))
695 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
696 	else
697 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
698 
699 	if (unlikely(!qtcb))
700 		return NULL;
701 
702 	memset(qtcb, 0, sizeof(*qtcb));
703 	return qtcb;
704 }
705 
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)706 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
707 						u32 fsf_cmd, u8 sbtype,
708 						mempool_t *pool)
709 {
710 	struct zfcp_adapter *adapter = qdio->adapter;
711 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
712 
713 	if (unlikely(!req))
714 		return ERR_PTR(-ENOMEM);
715 
716 	if (adapter->req_no == 0)
717 		adapter->req_no++;
718 
719 	INIT_LIST_HEAD(&req->list);
720 	timer_setup(&req->timer, NULL, 0);
721 	init_completion(&req->completion);
722 
723 	req->adapter = adapter;
724 	req->req_id = adapter->req_no;
725 
726 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
727 		if (likely(pool))
728 			req->qtcb = zfcp_fsf_qtcb_alloc(
729 				adapter->pool.qtcb_pool);
730 		else
731 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
732 
733 		if (unlikely(!req->qtcb)) {
734 			zfcp_fsf_req_free(req);
735 			return ERR_PTR(-ENOMEM);
736 		}
737 
738 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
739 		req->qtcb->prefix.req_id = req->req_id;
740 		req->qtcb->prefix.ulp_info = 26;
741 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_cmd];
742 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
743 		req->qtcb->header.req_handle = req->req_id;
744 		req->qtcb->header.fsf_command = fsf_cmd;
745 	}
746 
747 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
748 			   req->qtcb, sizeof(struct fsf_qtcb));
749 
750 	return req;
751 }
752 
zfcp_fsf_req_send(struct zfcp_fsf_req * req)753 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
754 {
755 	const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req);
756 	struct zfcp_adapter *adapter = req->adapter;
757 	struct zfcp_qdio *qdio = adapter->qdio;
758 	int req_id = req->req_id;
759 
760 	zfcp_reqlist_add(adapter->req_list, req);
761 
762 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
763 	req->issued = get_tod_clock();
764 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
765 		del_timer(&req->timer);
766 		/* lookup request again, list might have changed */
767 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
768 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
769 		return -EIO;
770 	}
771 
772 	/*
773 	 * NOTE: DO NOT TOUCH ASYNC req PAST THIS POINT.
774 	 *	 ONLY TOUCH SYNC req AGAIN ON req->completion.
775 	 *
776 	 * The request might complete and be freed concurrently at any point
777 	 * now. This is not protected by the QDIO-lock (req_q_lock). So any
778 	 * uncontrolled access after this might result in an use-after-free bug.
779 	 * Only if the request doesn't have ZFCP_STATUS_FSFREQ_CLEANUP set, and
780 	 * when it is completed via req->completion, is it safe to use req
781 	 * again.
782 	 */
783 
784 	/* Don't increase for unsolicited status */
785 	if (!is_srb)
786 		adapter->fsf_req_seq_no++;
787 	adapter->req_no++;
788 
789 	return 0;
790 }
791 
792 /**
793  * zfcp_fsf_status_read - send status read request
794  * @qdio: pointer to struct zfcp_qdio
795  * Returns: 0 on success, ERROR otherwise
796  */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)797 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
798 {
799 	struct zfcp_adapter *adapter = qdio->adapter;
800 	struct zfcp_fsf_req *req;
801 	struct fsf_status_read_buffer *sr_buf;
802 	struct page *page;
803 	int retval = -EIO;
804 
805 	spin_lock_irq(&qdio->req_q_lock);
806 	if (zfcp_qdio_sbal_get(qdio))
807 		goto out;
808 
809 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
810 				  SBAL_SFLAGS0_TYPE_STATUS,
811 				  adapter->pool.status_read_req);
812 	if (IS_ERR(req)) {
813 		retval = PTR_ERR(req);
814 		goto out;
815 	}
816 
817 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
818 	if (!page) {
819 		retval = -ENOMEM;
820 		goto failed_buf;
821 	}
822 	sr_buf = page_address(page);
823 	memset(sr_buf, 0, sizeof(*sr_buf));
824 	req->data = sr_buf;
825 
826 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
827 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
828 
829 	retval = zfcp_fsf_req_send(req);
830 	if (retval)
831 		goto failed_req_send;
832 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
833 
834 	goto out;
835 
836 failed_req_send:
837 	req->data = NULL;
838 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
839 failed_buf:
840 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
841 	zfcp_fsf_req_free(req);
842 out:
843 	spin_unlock_irq(&qdio->req_q_lock);
844 	return retval;
845 }
846 
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)847 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
848 {
849 	struct scsi_device *sdev = req->data;
850 	struct zfcp_scsi_dev *zfcp_sdev;
851 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
852 
853 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
854 		return;
855 
856 	zfcp_sdev = sdev_to_zfcp(sdev);
857 
858 	switch (req->qtcb->header.fsf_status) {
859 	case FSF_PORT_HANDLE_NOT_VALID:
860 		if (fsq->word[0] == fsq->word[1]) {
861 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
862 						"fsafch1");
863 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
864 		}
865 		break;
866 	case FSF_LUN_HANDLE_NOT_VALID:
867 		if (fsq->word[0] == fsq->word[1]) {
868 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
869 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
870 		}
871 		break;
872 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
873 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
874 		break;
875 	case FSF_PORT_BOXED:
876 		zfcp_erp_set_port_status(zfcp_sdev->port,
877 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
878 		zfcp_erp_port_reopen(zfcp_sdev->port,
879 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
880 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
881 		break;
882 	case FSF_LUN_BOXED:
883 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
884 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
885 				    "fsafch4");
886 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
887                 break;
888 	case FSF_ADAPTER_STATUS_AVAILABLE:
889 		switch (fsq->word[0]) {
890 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
891 			zfcp_fc_test_link(zfcp_sdev->port);
892 			/* fall through */
893 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
894 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
895 			break;
896 		}
897 		break;
898 	case FSF_GOOD:
899 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
900 		break;
901 	}
902 }
903 
904 /**
905  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
906  * @scmnd: The SCSI command to abort
907  * Returns: pointer to struct zfcp_fsf_req
908  */
909 
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)910 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
911 {
912 	struct zfcp_fsf_req *req = NULL;
913 	struct scsi_device *sdev = scmnd->device;
914 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
915 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
916 	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
917 
918 	spin_lock_irq(&qdio->req_q_lock);
919 	if (zfcp_qdio_sbal_get(qdio))
920 		goto out;
921 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
922 				  SBAL_SFLAGS0_TYPE_READ,
923 				  qdio->adapter->pool.scsi_abort);
924 	if (IS_ERR(req)) {
925 		req = NULL;
926 		goto out;
927 	}
928 
929 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
930 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
931 		goto out_error_free;
932 
933 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
934 
935 	req->data = sdev;
936 	req->handler = zfcp_fsf_abort_fcp_command_handler;
937 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
938 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
939 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
940 
941 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
942 	if (!zfcp_fsf_req_send(req)) {
943 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
944 		goto out;
945 	}
946 
947 out_error_free:
948 	zfcp_fsf_req_free(req);
949 	req = NULL;
950 out:
951 	spin_unlock_irq(&qdio->req_q_lock);
952 	return req;
953 }
954 
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)955 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
956 {
957 	struct zfcp_adapter *adapter = req->adapter;
958 	struct zfcp_fsf_ct_els *ct = req->data;
959 	struct fsf_qtcb_header *header = &req->qtcb->header;
960 
961 	ct->status = -EINVAL;
962 
963 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
964 		goto skip_fsfstatus;
965 
966 	switch (header->fsf_status) {
967         case FSF_GOOD:
968 		ct->status = 0;
969 		zfcp_dbf_san_res("fsscth2", req);
970 		break;
971         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
972 		zfcp_fsf_class_not_supp(req);
973 		break;
974         case FSF_ADAPTER_STATUS_AVAILABLE:
975                 switch (header->fsf_status_qual.word[0]){
976                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
977                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
978 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
979 			break;
980                 }
981                 break;
982         case FSF_PORT_BOXED:
983 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
984 		break;
985 	case FSF_PORT_HANDLE_NOT_VALID:
986 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
987 		/* fall through */
988 	case FSF_GENERIC_COMMAND_REJECTED:
989 	case FSF_PAYLOAD_SIZE_MISMATCH:
990 	case FSF_REQUEST_SIZE_TOO_LARGE:
991 	case FSF_RESPONSE_SIZE_TOO_LARGE:
992 	case FSF_SBAL_MISMATCH:
993 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
994 		break;
995 	}
996 
997 skip_fsfstatus:
998 	if (ct->handler)
999 		ct->handler(ct->handler_data);
1000 }
1001 
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1002 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
1003 					    struct zfcp_qdio_req *q_req,
1004 					    struct scatterlist *sg_req,
1005 					    struct scatterlist *sg_resp)
1006 {
1007 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
1008 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
1009 	zfcp_qdio_set_sbale_last(qdio, q_req);
1010 }
1011 
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)1012 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1013 				       struct scatterlist *sg_req,
1014 				       struct scatterlist *sg_resp)
1015 {
1016 	struct zfcp_adapter *adapter = req->adapter;
1017 	struct zfcp_qdio *qdio = adapter->qdio;
1018 	struct fsf_qtcb *qtcb = req->qtcb;
1019 	u32 feat = adapter->adapter_features;
1020 
1021 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1022 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1023 			return -EIO;
1024 		qtcb->bottom.support.req_buf_length =
1025 			zfcp_qdio_real_bytes(sg_req);
1026 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1027 			return -EIO;
1028 		qtcb->bottom.support.resp_buf_length =
1029 			zfcp_qdio_real_bytes(sg_resp);
1030 
1031 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1032 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1033 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1034 		return 0;
1035 	}
1036 
1037 	/* use single, unchained SBAL if it can hold the request */
1038 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1039 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1040 						sg_req, sg_resp);
1041 		return 0;
1042 	}
1043 
1044 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1045 		return -EOPNOTSUPP;
1046 
1047 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1048 		return -EIO;
1049 
1050 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1051 
1052 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1053 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1054 
1055 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1056 		return -EIO;
1057 
1058 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1059 
1060 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1061 
1062 	return 0;
1063 }
1064 
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1065 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1066 				 struct scatterlist *sg_req,
1067 				 struct scatterlist *sg_resp,
1068 				 unsigned int timeout)
1069 {
1070 	int ret;
1071 
1072 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1073 	if (ret)
1074 		return ret;
1075 
1076 	/* common settings for ct/gs and els requests */
1077 	if (timeout > 255)
1078 		timeout = 255; /* max value accepted by hardware */
1079 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1080 	req->qtcb->bottom.support.timeout = timeout;
1081 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1082 
1083 	return 0;
1084 }
1085 
1086 /**
1087  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1088  * @wka_port: pointer to zfcp WKA port to send CT/GS to
1089  * @ct: pointer to struct zfcp_send_ct with data for request
1090  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1091  * @timeout: timeout that hardware should use, and a later software timeout
1092  */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1093 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1094 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1095 		     unsigned int timeout)
1096 {
1097 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1098 	struct zfcp_fsf_req *req;
1099 	int ret = -EIO;
1100 
1101 	spin_lock_irq(&qdio->req_q_lock);
1102 	if (zfcp_qdio_sbal_get(qdio))
1103 		goto out;
1104 
1105 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1106 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1107 
1108 	if (IS_ERR(req)) {
1109 		ret = PTR_ERR(req);
1110 		goto out;
1111 	}
1112 
1113 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1114 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1115 	if (ret)
1116 		goto failed_send;
1117 
1118 	req->handler = zfcp_fsf_send_ct_handler;
1119 	req->qtcb->header.port_handle = wka_port->handle;
1120 	ct->d_id = wka_port->d_id;
1121 	req->data = ct;
1122 
1123 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1124 
1125 	ret = zfcp_fsf_req_send(req);
1126 	if (ret)
1127 		goto failed_send;
1128 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1129 
1130 	goto out;
1131 
1132 failed_send:
1133 	zfcp_fsf_req_free(req);
1134 out:
1135 	spin_unlock_irq(&qdio->req_q_lock);
1136 	return ret;
1137 }
1138 
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1139 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1140 {
1141 	struct zfcp_fsf_ct_els *send_els = req->data;
1142 	struct fsf_qtcb_header *header = &req->qtcb->header;
1143 
1144 	send_els->status = -EINVAL;
1145 
1146 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1147 		goto skip_fsfstatus;
1148 
1149 	switch (header->fsf_status) {
1150 	case FSF_GOOD:
1151 		send_els->status = 0;
1152 		zfcp_dbf_san_res("fsselh1", req);
1153 		break;
1154 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1155 		zfcp_fsf_class_not_supp(req);
1156 		break;
1157 	case FSF_ADAPTER_STATUS_AVAILABLE:
1158 		switch (header->fsf_status_qual.word[0]){
1159 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1160 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1161 		case FSF_SQ_RETRY_IF_POSSIBLE:
1162 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1163 			break;
1164 		}
1165 		break;
1166 	case FSF_ELS_COMMAND_REJECTED:
1167 	case FSF_PAYLOAD_SIZE_MISMATCH:
1168 	case FSF_REQUEST_SIZE_TOO_LARGE:
1169 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1170 		break;
1171 	case FSF_SBAL_MISMATCH:
1172 		/* should never occur, avoided in zfcp_fsf_send_els */
1173 		/* fall through */
1174 	default:
1175 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1176 		break;
1177 	}
1178 skip_fsfstatus:
1179 	if (send_els->handler)
1180 		send_els->handler(send_els->handler_data);
1181 }
1182 
1183 /**
1184  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1185  * @adapter: pointer to zfcp adapter
1186  * @d_id: N_Port_ID to send ELS to
1187  * @els: pointer to struct zfcp_send_els with data for the command
1188  * @timeout: timeout that hardware should use, and a later software timeout
1189  */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1190 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1191 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1192 {
1193 	struct zfcp_fsf_req *req;
1194 	struct zfcp_qdio *qdio = adapter->qdio;
1195 	int ret = -EIO;
1196 
1197 	spin_lock_irq(&qdio->req_q_lock);
1198 	if (zfcp_qdio_sbal_get(qdio))
1199 		goto out;
1200 
1201 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1202 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1203 
1204 	if (IS_ERR(req)) {
1205 		ret = PTR_ERR(req);
1206 		goto out;
1207 	}
1208 
1209 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1210 
1211 	if (!zfcp_adapter_multi_buffer_active(adapter))
1212 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1213 
1214 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1215 
1216 	if (ret)
1217 		goto failed_send;
1218 
1219 	hton24(req->qtcb->bottom.support.d_id, d_id);
1220 	req->handler = zfcp_fsf_send_els_handler;
1221 	els->d_id = d_id;
1222 	req->data = els;
1223 
1224 	zfcp_dbf_san_req("fssels1", req, d_id);
1225 
1226 	ret = zfcp_fsf_req_send(req);
1227 	if (ret)
1228 		goto failed_send;
1229 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1230 
1231 	goto out;
1232 
1233 failed_send:
1234 	zfcp_fsf_req_free(req);
1235 out:
1236 	spin_unlock_irq(&qdio->req_q_lock);
1237 	return ret;
1238 }
1239 
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1240 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1241 {
1242 	struct zfcp_fsf_req *req;
1243 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1244 	int retval = -EIO;
1245 
1246 	spin_lock_irq(&qdio->req_q_lock);
1247 	if (zfcp_qdio_sbal_get(qdio))
1248 		goto out;
1249 
1250 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1251 				  SBAL_SFLAGS0_TYPE_READ,
1252 				  qdio->adapter->pool.erp_req);
1253 
1254 	if (IS_ERR(req)) {
1255 		retval = PTR_ERR(req);
1256 		goto out;
1257 	}
1258 
1259 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1260 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1261 
1262 	req->qtcb->bottom.config.feature_selection =
1263 			FSF_FEATURE_NOTIFICATION_LOST |
1264 			FSF_FEATURE_UPDATE_ALERT;
1265 	req->erp_action = erp_action;
1266 	req->handler = zfcp_fsf_exchange_config_data_handler;
1267 	erp_action->fsf_req_id = req->req_id;
1268 
1269 	zfcp_fsf_start_erp_timer(req);
1270 	retval = zfcp_fsf_req_send(req);
1271 	if (retval) {
1272 		zfcp_fsf_req_free(req);
1273 		erp_action->fsf_req_id = 0;
1274 	}
1275 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1276 out:
1277 	spin_unlock_irq(&qdio->req_q_lock);
1278 	return retval;
1279 }
1280 
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1281 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1282 				       struct fsf_qtcb_bottom_config *data)
1283 {
1284 	struct zfcp_fsf_req *req = NULL;
1285 	int retval = -EIO;
1286 
1287 	spin_lock_irq(&qdio->req_q_lock);
1288 	if (zfcp_qdio_sbal_get(qdio))
1289 		goto out_unlock;
1290 
1291 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1292 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1293 
1294 	if (IS_ERR(req)) {
1295 		retval = PTR_ERR(req);
1296 		goto out_unlock;
1297 	}
1298 
1299 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1300 	req->handler = zfcp_fsf_exchange_config_data_handler;
1301 
1302 	req->qtcb->bottom.config.feature_selection =
1303 			FSF_FEATURE_NOTIFICATION_LOST |
1304 			FSF_FEATURE_UPDATE_ALERT;
1305 
1306 	if (data)
1307 		req->data = data;
1308 
1309 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1310 	retval = zfcp_fsf_req_send(req);
1311 	spin_unlock_irq(&qdio->req_q_lock);
1312 	if (!retval) {
1313 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1314 		wait_for_completion(&req->completion);
1315 	}
1316 
1317 	zfcp_fsf_req_free(req);
1318 	return retval;
1319 
1320 out_unlock:
1321 	spin_unlock_irq(&qdio->req_q_lock);
1322 	return retval;
1323 }
1324 
1325 /**
1326  * zfcp_fsf_exchange_port_data - request information about local port
1327  * @erp_action: ERP action for the adapter for which port data is requested
1328  * Returns: 0 on success, error otherwise
1329  */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1330 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1331 {
1332 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1333 	struct zfcp_fsf_req *req;
1334 	int retval = -EIO;
1335 
1336 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1337 		return -EOPNOTSUPP;
1338 
1339 	spin_lock_irq(&qdio->req_q_lock);
1340 	if (zfcp_qdio_sbal_get(qdio))
1341 		goto out;
1342 
1343 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1344 				  SBAL_SFLAGS0_TYPE_READ,
1345 				  qdio->adapter->pool.erp_req);
1346 
1347 	if (IS_ERR(req)) {
1348 		retval = PTR_ERR(req);
1349 		goto out;
1350 	}
1351 
1352 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1353 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1354 
1355 	req->handler = zfcp_fsf_exchange_port_data_handler;
1356 	req->erp_action = erp_action;
1357 	erp_action->fsf_req_id = req->req_id;
1358 
1359 	zfcp_fsf_start_erp_timer(req);
1360 	retval = zfcp_fsf_req_send(req);
1361 	if (retval) {
1362 		zfcp_fsf_req_free(req);
1363 		erp_action->fsf_req_id = 0;
1364 	}
1365 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1366 out:
1367 	spin_unlock_irq(&qdio->req_q_lock);
1368 	return retval;
1369 }
1370 
1371 /**
1372  * zfcp_fsf_exchange_port_data_sync - request information about local port
1373  * @qdio: pointer to struct zfcp_qdio
1374  * @data: pointer to struct fsf_qtcb_bottom_port
1375  * Returns: 0 on success, error otherwise
1376  */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1377 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1378 				     struct fsf_qtcb_bottom_port *data)
1379 {
1380 	struct zfcp_fsf_req *req = NULL;
1381 	int retval = -EIO;
1382 
1383 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1384 		return -EOPNOTSUPP;
1385 
1386 	spin_lock_irq(&qdio->req_q_lock);
1387 	if (zfcp_qdio_sbal_get(qdio))
1388 		goto out_unlock;
1389 
1390 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1391 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1392 
1393 	if (IS_ERR(req)) {
1394 		retval = PTR_ERR(req);
1395 		goto out_unlock;
1396 	}
1397 
1398 	if (data)
1399 		req->data = data;
1400 
1401 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1402 
1403 	req->handler = zfcp_fsf_exchange_port_data_handler;
1404 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1405 	retval = zfcp_fsf_req_send(req);
1406 	spin_unlock_irq(&qdio->req_q_lock);
1407 
1408 	if (!retval) {
1409 		/* NOTE: ONLY TOUCH SYNC req AGAIN ON req->completion. */
1410 		wait_for_completion(&req->completion);
1411 	}
1412 
1413 	zfcp_fsf_req_free(req);
1414 
1415 	return retval;
1416 
1417 out_unlock:
1418 	spin_unlock_irq(&qdio->req_q_lock);
1419 	return retval;
1420 }
1421 
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1422 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1423 {
1424 	struct zfcp_port *port = req->data;
1425 	struct fsf_qtcb_header *header = &req->qtcb->header;
1426 	struct fc_els_flogi *plogi;
1427 
1428 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1429 		goto out;
1430 
1431 	switch (header->fsf_status) {
1432 	case FSF_PORT_ALREADY_OPEN:
1433 		break;
1434 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1435 		dev_warn(&req->adapter->ccw_device->dev,
1436 			 "Not enough FCP adapter resources to open "
1437 			 "remote port 0x%016Lx\n",
1438 			 (unsigned long long)port->wwpn);
1439 		zfcp_erp_set_port_status(port,
1440 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1441 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1442 		break;
1443 	case FSF_ADAPTER_STATUS_AVAILABLE:
1444 		switch (header->fsf_status_qual.word[0]) {
1445 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1446 			/* no zfcp_fc_test_link() with failed open port */
1447 			/* fall through */
1448 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1449 		case FSF_SQ_NO_RETRY_POSSIBLE:
1450 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1451 			break;
1452 		}
1453 		break;
1454 	case FSF_GOOD:
1455 		port->handle = header->port_handle;
1456 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1457 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1458 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1459 		                  &port->status);
1460 		/* check whether D_ID has changed during open */
1461 		/*
1462 		 * FIXME: This check is not airtight, as the FCP channel does
1463 		 * not monitor closures of target port connections caused on
1464 		 * the remote side. Thus, they might miss out on invalidating
1465 		 * locally cached WWPNs (and other N_Port parameters) of gone
1466 		 * target ports. So, our heroic attempt to make things safe
1467 		 * could be undermined by 'open port' response data tagged with
1468 		 * obsolete WWPNs. Another reason to monitor potential
1469 		 * connection closures ourself at least (by interpreting
1470 		 * incoming ELS' and unsolicited status). It just crosses my
1471 		 * mind that one should be able to cross-check by means of
1472 		 * another GID_PN straight after a port has been opened.
1473 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1474 		 */
1475 		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1476 		if (req->qtcb->bottom.support.els1_length >=
1477 		    FSF_PLOGI_MIN_LEN)
1478 				zfcp_fc_plogi_evaluate(port, plogi);
1479 		break;
1480 	case FSF_UNKNOWN_OP_SUBTYPE:
1481 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1482 		break;
1483 	}
1484 
1485 out:
1486 	put_device(&port->dev);
1487 }
1488 
1489 /**
1490  * zfcp_fsf_open_port - create and send open port request
1491  * @erp_action: pointer to struct zfcp_erp_action
1492  * Returns: 0 on success, error otherwise
1493  */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1494 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1495 {
1496 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1497 	struct zfcp_port *port = erp_action->port;
1498 	struct zfcp_fsf_req *req;
1499 	int retval = -EIO;
1500 
1501 	spin_lock_irq(&qdio->req_q_lock);
1502 	if (zfcp_qdio_sbal_get(qdio))
1503 		goto out;
1504 
1505 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1506 				  SBAL_SFLAGS0_TYPE_READ,
1507 				  qdio->adapter->pool.erp_req);
1508 
1509 	if (IS_ERR(req)) {
1510 		retval = PTR_ERR(req);
1511 		goto out;
1512 	}
1513 
1514 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1515 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1516 
1517 	req->handler = zfcp_fsf_open_port_handler;
1518 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1519 	req->data = port;
1520 	req->erp_action = erp_action;
1521 	erp_action->fsf_req_id = req->req_id;
1522 	get_device(&port->dev);
1523 
1524 	zfcp_fsf_start_erp_timer(req);
1525 	retval = zfcp_fsf_req_send(req);
1526 	if (retval) {
1527 		zfcp_fsf_req_free(req);
1528 		erp_action->fsf_req_id = 0;
1529 		put_device(&port->dev);
1530 	}
1531 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1532 out:
1533 	spin_unlock_irq(&qdio->req_q_lock);
1534 	return retval;
1535 }
1536 
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1537 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1538 {
1539 	struct zfcp_port *port = req->data;
1540 
1541 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1542 		return;
1543 
1544 	switch (req->qtcb->header.fsf_status) {
1545 	case FSF_PORT_HANDLE_NOT_VALID:
1546 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1547 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1548 		break;
1549 	case FSF_ADAPTER_STATUS_AVAILABLE:
1550 		break;
1551 	case FSF_GOOD:
1552 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1553 		break;
1554 	}
1555 }
1556 
1557 /**
1558  * zfcp_fsf_close_port - create and send close port request
1559  * @erp_action: pointer to struct zfcp_erp_action
1560  * Returns: 0 on success, error otherwise
1561  */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1562 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1563 {
1564 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1565 	struct zfcp_fsf_req *req;
1566 	int retval = -EIO;
1567 
1568 	spin_lock_irq(&qdio->req_q_lock);
1569 	if (zfcp_qdio_sbal_get(qdio))
1570 		goto out;
1571 
1572 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1573 				  SBAL_SFLAGS0_TYPE_READ,
1574 				  qdio->adapter->pool.erp_req);
1575 
1576 	if (IS_ERR(req)) {
1577 		retval = PTR_ERR(req);
1578 		goto out;
1579 	}
1580 
1581 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1582 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1583 
1584 	req->handler = zfcp_fsf_close_port_handler;
1585 	req->data = erp_action->port;
1586 	req->erp_action = erp_action;
1587 	req->qtcb->header.port_handle = erp_action->port->handle;
1588 	erp_action->fsf_req_id = req->req_id;
1589 
1590 	zfcp_fsf_start_erp_timer(req);
1591 	retval = zfcp_fsf_req_send(req);
1592 	if (retval) {
1593 		zfcp_fsf_req_free(req);
1594 		erp_action->fsf_req_id = 0;
1595 	}
1596 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1597 out:
1598 	spin_unlock_irq(&qdio->req_q_lock);
1599 	return retval;
1600 }
1601 
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1602 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1603 {
1604 	struct zfcp_fc_wka_port *wka_port = req->data;
1605 	struct fsf_qtcb_header *header = &req->qtcb->header;
1606 
1607 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1608 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1609 		goto out;
1610 	}
1611 
1612 	switch (header->fsf_status) {
1613 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1614 		dev_warn(&req->adapter->ccw_device->dev,
1615 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1616 		/* fall through */
1617 	case FSF_ADAPTER_STATUS_AVAILABLE:
1618 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1619 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1620 		break;
1621 	case FSF_GOOD:
1622 		wka_port->handle = header->port_handle;
1623 		/* fall through */
1624 	case FSF_PORT_ALREADY_OPEN:
1625 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1626 	}
1627 out:
1628 	wake_up(&wka_port->completion_wq);
1629 }
1630 
1631 /**
1632  * zfcp_fsf_open_wka_port - create and send open wka-port request
1633  * @wka_port: pointer to struct zfcp_fc_wka_port
1634  * Returns: 0 on success, error otherwise
1635  */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1636 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1637 {
1638 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1639 	struct zfcp_fsf_req *req;
1640 	unsigned long req_id = 0;
1641 	int retval = -EIO;
1642 
1643 	spin_lock_irq(&qdio->req_q_lock);
1644 	if (zfcp_qdio_sbal_get(qdio))
1645 		goto out;
1646 
1647 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1648 				  SBAL_SFLAGS0_TYPE_READ,
1649 				  qdio->adapter->pool.erp_req);
1650 
1651 	if (IS_ERR(req)) {
1652 		retval = PTR_ERR(req);
1653 		goto out;
1654 	}
1655 
1656 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1657 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1658 
1659 	req->handler = zfcp_fsf_open_wka_port_handler;
1660 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1661 	req->data = wka_port;
1662 
1663 	req_id = req->req_id;
1664 
1665 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1666 	retval = zfcp_fsf_req_send(req);
1667 	if (retval)
1668 		zfcp_fsf_req_free(req);
1669 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1670 out:
1671 	spin_unlock_irq(&qdio->req_q_lock);
1672 	if (!retval)
1673 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1674 	return retval;
1675 }
1676 
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1677 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1678 {
1679 	struct zfcp_fc_wka_port *wka_port = req->data;
1680 
1681 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1682 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1683 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1684 	}
1685 
1686 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1687 	wake_up(&wka_port->completion_wq);
1688 }
1689 
1690 /**
1691  * zfcp_fsf_close_wka_port - create and send close wka port request
1692  * @wka_port: WKA port to open
1693  * Returns: 0 on success, error otherwise
1694  */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1695 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1696 {
1697 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1698 	struct zfcp_fsf_req *req;
1699 	unsigned long req_id = 0;
1700 	int retval = -EIO;
1701 
1702 	spin_lock_irq(&qdio->req_q_lock);
1703 	if (zfcp_qdio_sbal_get(qdio))
1704 		goto out;
1705 
1706 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1707 				  SBAL_SFLAGS0_TYPE_READ,
1708 				  qdio->adapter->pool.erp_req);
1709 
1710 	if (IS_ERR(req)) {
1711 		retval = PTR_ERR(req);
1712 		goto out;
1713 	}
1714 
1715 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1716 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1717 
1718 	req->handler = zfcp_fsf_close_wka_port_handler;
1719 	req->data = wka_port;
1720 	req->qtcb->header.port_handle = wka_port->handle;
1721 
1722 	req_id = req->req_id;
1723 
1724 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1725 	retval = zfcp_fsf_req_send(req);
1726 	if (retval)
1727 		zfcp_fsf_req_free(req);
1728 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1729 out:
1730 	spin_unlock_irq(&qdio->req_q_lock);
1731 	if (!retval)
1732 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
1733 	return retval;
1734 }
1735 
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)1736 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1737 {
1738 	struct zfcp_port *port = req->data;
1739 	struct fsf_qtcb_header *header = &req->qtcb->header;
1740 	struct scsi_device *sdev;
1741 
1742 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1743 		return;
1744 
1745 	switch (header->fsf_status) {
1746 	case FSF_PORT_HANDLE_NOT_VALID:
1747 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1748 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1749 		break;
1750 	case FSF_PORT_BOXED:
1751 		/* can't use generic zfcp_erp_modify_port_status because
1752 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1753 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1754 		shost_for_each_device(sdev, port->adapter->scsi_host)
1755 			if (sdev_to_zfcp(sdev)->port == port)
1756 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1757 						  &sdev_to_zfcp(sdev)->status);
1758 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1759 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1760 				     "fscpph2");
1761 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1762 		break;
1763 	case FSF_ADAPTER_STATUS_AVAILABLE:
1764 		switch (header->fsf_status_qual.word[0]) {
1765 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1766 			/* fall through */
1767 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1768 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1769 			break;
1770 		}
1771 		break;
1772 	case FSF_GOOD:
1773 		/* can't use generic zfcp_erp_modify_port_status because
1774 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1775 		 */
1776 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1777 		shost_for_each_device(sdev, port->adapter->scsi_host)
1778 			if (sdev_to_zfcp(sdev)->port == port)
1779 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1780 						  &sdev_to_zfcp(sdev)->status);
1781 		break;
1782 	}
1783 }
1784 
1785 /**
1786  * zfcp_fsf_close_physical_port - close physical port
1787  * @erp_action: pointer to struct zfcp_erp_action
1788  * Returns: 0 on success
1789  */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)1790 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1791 {
1792 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1793 	struct zfcp_fsf_req *req;
1794 	int retval = -EIO;
1795 
1796 	spin_lock_irq(&qdio->req_q_lock);
1797 	if (zfcp_qdio_sbal_get(qdio))
1798 		goto out;
1799 
1800 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1801 				  SBAL_SFLAGS0_TYPE_READ,
1802 				  qdio->adapter->pool.erp_req);
1803 
1804 	if (IS_ERR(req)) {
1805 		retval = PTR_ERR(req);
1806 		goto out;
1807 	}
1808 
1809 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1810 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1811 
1812 	req->data = erp_action->port;
1813 	req->qtcb->header.port_handle = erp_action->port->handle;
1814 	req->erp_action = erp_action;
1815 	req->handler = zfcp_fsf_close_physical_port_handler;
1816 	erp_action->fsf_req_id = req->req_id;
1817 
1818 	zfcp_fsf_start_erp_timer(req);
1819 	retval = zfcp_fsf_req_send(req);
1820 	if (retval) {
1821 		zfcp_fsf_req_free(req);
1822 		erp_action->fsf_req_id = 0;
1823 	}
1824 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1825 out:
1826 	spin_unlock_irq(&qdio->req_q_lock);
1827 	return retval;
1828 }
1829 
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)1830 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1831 {
1832 	struct zfcp_adapter *adapter = req->adapter;
1833 	struct scsi_device *sdev = req->data;
1834 	struct zfcp_scsi_dev *zfcp_sdev;
1835 	struct fsf_qtcb_header *header = &req->qtcb->header;
1836 	union fsf_status_qual *qual = &header->fsf_status_qual;
1837 
1838 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1839 		return;
1840 
1841 	zfcp_sdev = sdev_to_zfcp(sdev);
1842 
1843 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1844 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
1845 			  &zfcp_sdev->status);
1846 
1847 	switch (header->fsf_status) {
1848 
1849 	case FSF_PORT_HANDLE_NOT_VALID:
1850 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1851 		/* fall through */
1852 	case FSF_LUN_ALREADY_OPEN:
1853 		break;
1854 	case FSF_PORT_BOXED:
1855 		zfcp_erp_set_port_status(zfcp_sdev->port,
1856 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1857 		zfcp_erp_port_reopen(zfcp_sdev->port,
1858 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1859 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1860 		break;
1861 	case FSF_LUN_SHARING_VIOLATION:
1862 		if (qual->word[0])
1863 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1864 				 "LUN 0x%016Lx on port 0x%016Lx is already in "
1865 				 "use by CSS%d, MIF Image ID %x\n",
1866 				 zfcp_scsi_dev_lun(sdev),
1867 				 (unsigned long long)zfcp_sdev->port->wwpn,
1868 				 qual->fsf_queue_designator.cssid,
1869 				 qual->fsf_queue_designator.hla);
1870 		zfcp_erp_set_lun_status(sdev,
1871 					ZFCP_STATUS_COMMON_ERP_FAILED |
1872 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
1873 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1874 		break;
1875 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1876 		dev_warn(&adapter->ccw_device->dev,
1877 			 "No handle is available for LUN "
1878 			 "0x%016Lx on port 0x%016Lx\n",
1879 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1880 			 (unsigned long long)zfcp_sdev->port->wwpn);
1881 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1882 		/* fall through */
1883 	case FSF_INVALID_COMMAND_OPTION:
1884 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1885 		break;
1886 	case FSF_ADAPTER_STATUS_AVAILABLE:
1887 		switch (header->fsf_status_qual.word[0]) {
1888 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1889 			zfcp_fc_test_link(zfcp_sdev->port);
1890 			/* fall through */
1891 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1892 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1893 			break;
1894 		}
1895 		break;
1896 
1897 	case FSF_GOOD:
1898 		zfcp_sdev->lun_handle = header->lun_handle;
1899 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1900 		break;
1901 	}
1902 }
1903 
1904 /**
1905  * zfcp_fsf_open_lun - open LUN
1906  * @erp_action: pointer to struct zfcp_erp_action
1907  * Returns: 0 on success, error otherwise
1908  */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)1909 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1910 {
1911 	struct zfcp_adapter *adapter = erp_action->adapter;
1912 	struct zfcp_qdio *qdio = adapter->qdio;
1913 	struct zfcp_fsf_req *req;
1914 	int retval = -EIO;
1915 
1916 	spin_lock_irq(&qdio->req_q_lock);
1917 	if (zfcp_qdio_sbal_get(qdio))
1918 		goto out;
1919 
1920 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1921 				  SBAL_SFLAGS0_TYPE_READ,
1922 				  adapter->pool.erp_req);
1923 
1924 	if (IS_ERR(req)) {
1925 		retval = PTR_ERR(req);
1926 		goto out;
1927 	}
1928 
1929 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1930 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1931 
1932 	req->qtcb->header.port_handle = erp_action->port->handle;
1933 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1934 	req->handler = zfcp_fsf_open_lun_handler;
1935 	req->data = erp_action->sdev;
1936 	req->erp_action = erp_action;
1937 	erp_action->fsf_req_id = req->req_id;
1938 
1939 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1940 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1941 
1942 	zfcp_fsf_start_erp_timer(req);
1943 	retval = zfcp_fsf_req_send(req);
1944 	if (retval) {
1945 		zfcp_fsf_req_free(req);
1946 		erp_action->fsf_req_id = 0;
1947 	}
1948 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
1949 out:
1950 	spin_unlock_irq(&qdio->req_q_lock);
1951 	return retval;
1952 }
1953 
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)1954 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1955 {
1956 	struct scsi_device *sdev = req->data;
1957 	struct zfcp_scsi_dev *zfcp_sdev;
1958 
1959 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1960 		return;
1961 
1962 	zfcp_sdev = sdev_to_zfcp(sdev);
1963 
1964 	switch (req->qtcb->header.fsf_status) {
1965 	case FSF_PORT_HANDLE_NOT_VALID:
1966 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1967 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1968 		break;
1969 	case FSF_LUN_HANDLE_NOT_VALID:
1970 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1971 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1972 		break;
1973 	case FSF_PORT_BOXED:
1974 		zfcp_erp_set_port_status(zfcp_sdev->port,
1975 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1976 		zfcp_erp_port_reopen(zfcp_sdev->port,
1977 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1978 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1979 		break;
1980 	case FSF_ADAPTER_STATUS_AVAILABLE:
1981 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
1982 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1983 			zfcp_fc_test_link(zfcp_sdev->port);
1984 			/* fall through */
1985 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1986 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1987 			break;
1988 		}
1989 		break;
1990 	case FSF_GOOD:
1991 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1992 		break;
1993 	}
1994 }
1995 
1996 /**
1997  * zfcp_fsf_close_LUN - close LUN
1998  * @erp_action: pointer to erp_action triggering the "close LUN"
1999  * Returns: 0 on success, error otherwise
2000  */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)2001 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
2002 {
2003 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2004 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
2005 	struct zfcp_fsf_req *req;
2006 	int retval = -EIO;
2007 
2008 	spin_lock_irq(&qdio->req_q_lock);
2009 	if (zfcp_qdio_sbal_get(qdio))
2010 		goto out;
2011 
2012 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2013 				  SBAL_SFLAGS0_TYPE_READ,
2014 				  qdio->adapter->pool.erp_req);
2015 
2016 	if (IS_ERR(req)) {
2017 		retval = PTR_ERR(req);
2018 		goto out;
2019 	}
2020 
2021 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2022 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2023 
2024 	req->qtcb->header.port_handle = erp_action->port->handle;
2025 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2026 	req->handler = zfcp_fsf_close_lun_handler;
2027 	req->data = erp_action->sdev;
2028 	req->erp_action = erp_action;
2029 	erp_action->fsf_req_id = req->req_id;
2030 
2031 	zfcp_fsf_start_erp_timer(req);
2032 	retval = zfcp_fsf_req_send(req);
2033 	if (retval) {
2034 		zfcp_fsf_req_free(req);
2035 		erp_action->fsf_req_id = 0;
2036 	}
2037 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2038 out:
2039 	spin_unlock_irq(&qdio->req_q_lock);
2040 	return retval;
2041 }
2042 
zfcp_fsf_update_lat(struct zfcp_latency_record * lat_rec,u32 lat)2043 static void zfcp_fsf_update_lat(struct zfcp_latency_record *lat_rec, u32 lat)
2044 {
2045 	lat_rec->sum += lat;
2046 	lat_rec->min = min(lat_rec->min, lat);
2047 	lat_rec->max = max(lat_rec->max, lat);
2048 }
2049 
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2050 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2051 {
2052 	struct fsf_qual_latency_info *lat_in;
2053 	struct zfcp_latency_cont *lat = NULL;
2054 	struct zfcp_scsi_dev *zfcp_sdev;
2055 	struct zfcp_blk_drv_data blktrc;
2056 	int ticks = req->adapter->timer_ticks;
2057 
2058 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2059 
2060 	blktrc.flags = 0;
2061 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2062 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2063 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2064 	blktrc.inb_usage = 0;
2065 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2066 
2067 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2068 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2069 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2070 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2071 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2072 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2073 
2074 		switch (req->qtcb->bottom.io.data_direction) {
2075 		case FSF_DATADIR_DIF_READ_STRIP:
2076 		case FSF_DATADIR_DIF_READ_CONVERT:
2077 		case FSF_DATADIR_READ:
2078 			lat = &zfcp_sdev->latencies.read;
2079 			break;
2080 		case FSF_DATADIR_DIF_WRITE_INSERT:
2081 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2082 		case FSF_DATADIR_WRITE:
2083 			lat = &zfcp_sdev->latencies.write;
2084 			break;
2085 		case FSF_DATADIR_CMND:
2086 			lat = &zfcp_sdev->latencies.cmd;
2087 			break;
2088 		}
2089 
2090 		if (lat) {
2091 			spin_lock(&zfcp_sdev->latencies.lock);
2092 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2093 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2094 			lat->counter++;
2095 			spin_unlock(&zfcp_sdev->latencies.lock);
2096 		}
2097 	}
2098 
2099 	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2100 			    sizeof(blktrc));
2101 }
2102 
2103 /**
2104  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2105  * @req: Pointer to FSF request.
2106  * @sdev: Pointer to SCSI device as request context.
2107  */
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req,struct scsi_device * sdev)2108 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2109 					struct scsi_device *sdev)
2110 {
2111 	struct zfcp_scsi_dev *zfcp_sdev;
2112 	struct fsf_qtcb_header *header = &req->qtcb->header;
2113 
2114 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2115 		return;
2116 
2117 	zfcp_sdev = sdev_to_zfcp(sdev);
2118 
2119 	switch (header->fsf_status) {
2120 	case FSF_HANDLE_MISMATCH:
2121 	case FSF_PORT_HANDLE_NOT_VALID:
2122 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2123 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2124 		break;
2125 	case FSF_FCPLUN_NOT_VALID:
2126 	case FSF_LUN_HANDLE_NOT_VALID:
2127 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2128 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2129 		break;
2130 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2131 		zfcp_fsf_class_not_supp(req);
2132 		break;
2133 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2134 		dev_err(&req->adapter->ccw_device->dev,
2135 			"Incorrect direction %d, LUN 0x%016Lx on port "
2136 			"0x%016Lx closed\n",
2137 			req->qtcb->bottom.io.data_direction,
2138 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2139 			(unsigned long long)zfcp_sdev->port->wwpn);
2140 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2141 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2142 		break;
2143 	case FSF_CMND_LENGTH_NOT_VALID:
2144 		dev_err(&req->adapter->ccw_device->dev,
2145 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2146 			req->qtcb->bottom.io.fcp_cmnd_length);
2147 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2148 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2149 		break;
2150 	case FSF_PORT_BOXED:
2151 		zfcp_erp_set_port_status(zfcp_sdev->port,
2152 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2153 		zfcp_erp_port_reopen(zfcp_sdev->port,
2154 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2155 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2156 		break;
2157 	case FSF_LUN_BOXED:
2158 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2159 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2160 				    "fssfch6");
2161 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2162 		break;
2163 	case FSF_ADAPTER_STATUS_AVAILABLE:
2164 		if (header->fsf_status_qual.word[0] ==
2165 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2166 			zfcp_fc_test_link(zfcp_sdev->port);
2167 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2168 		break;
2169 	}
2170 }
2171 
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2172 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2173 {
2174 	struct scsi_cmnd *scpnt;
2175 	struct fcp_resp_with_ext *fcp_rsp;
2176 	unsigned long flags;
2177 
2178 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2179 
2180 	scpnt = req->data;
2181 	if (unlikely(!scpnt)) {
2182 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2183 		return;
2184 	}
2185 
2186 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2187 
2188 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2189 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2190 		goto skip_fsfstatus;
2191 	}
2192 
2193 	switch (req->qtcb->header.fsf_status) {
2194 	case FSF_INCONSISTENT_PROT_DATA:
2195 	case FSF_INVALID_PROT_PARM:
2196 		set_host_byte(scpnt, DID_ERROR);
2197 		goto skip_fsfstatus;
2198 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2199 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2200 		goto skip_fsfstatus;
2201 	case FSF_APP_TAG_CHECK_FAILURE:
2202 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2203 		goto skip_fsfstatus;
2204 	case FSF_REF_TAG_CHECK_FAILURE:
2205 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2206 		goto skip_fsfstatus;
2207 	}
2208 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2209 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2210 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2211 
2212 skip_fsfstatus:
2213 	zfcp_fsf_req_trace(req, scpnt);
2214 	zfcp_dbf_scsi_result(scpnt, req);
2215 
2216 	scpnt->host_scribble = NULL;
2217 	(scpnt->scsi_done) (scpnt);
2218 	/*
2219 	 * We must hold this lock until scsi_done has been called.
2220 	 * Otherwise we may call scsi_done after abort regarding this
2221 	 * command has completed.
2222 	 * Note: scsi_done must not block!
2223 	 */
2224 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2225 }
2226 
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2227 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2228 {
2229 	switch (scsi_get_prot_op(scsi_cmnd)) {
2230 	case SCSI_PROT_NORMAL:
2231 		switch (scsi_cmnd->sc_data_direction) {
2232 		case DMA_NONE:
2233 			*data_dir = FSF_DATADIR_CMND;
2234 			break;
2235 		case DMA_FROM_DEVICE:
2236 			*data_dir = FSF_DATADIR_READ;
2237 			break;
2238 		case DMA_TO_DEVICE:
2239 			*data_dir = FSF_DATADIR_WRITE;
2240 			break;
2241 		case DMA_BIDIRECTIONAL:
2242 			return -EINVAL;
2243 		}
2244 		break;
2245 
2246 	case SCSI_PROT_READ_STRIP:
2247 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2248 		break;
2249 	case SCSI_PROT_WRITE_INSERT:
2250 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2251 		break;
2252 	case SCSI_PROT_READ_PASS:
2253 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2254 		break;
2255 	case SCSI_PROT_WRITE_PASS:
2256 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2257 		break;
2258 	default:
2259 		return -EINVAL;
2260 	}
2261 
2262 	return 0;
2263 }
2264 
2265 /**
2266  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2267  * @scsi_cmnd: scsi command to be sent
2268  */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2269 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2270 {
2271 	struct zfcp_fsf_req *req;
2272 	struct fcp_cmnd *fcp_cmnd;
2273 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2274 	int retval = -EIO;
2275 	struct scsi_device *sdev = scsi_cmnd->device;
2276 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2277 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2278 	struct zfcp_qdio *qdio = adapter->qdio;
2279 	struct fsf_qtcb_bottom_io *io;
2280 	unsigned long flags;
2281 
2282 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2283 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2284 		return -EBUSY;
2285 
2286 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2287 	if (atomic_read(&qdio->req_q_free) <= 0) {
2288 		atomic_inc(&qdio->req_q_full);
2289 		goto out;
2290 	}
2291 
2292 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2293 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2294 
2295 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2296 				  sbtype, adapter->pool.scsi_req);
2297 
2298 	if (IS_ERR(req)) {
2299 		retval = PTR_ERR(req);
2300 		goto out;
2301 	}
2302 
2303 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2304 
2305 	io = &req->qtcb->bottom.io;
2306 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2307 	req->data = scsi_cmnd;
2308 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2309 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2310 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2311 	io->service_class = FSF_CLASS_3;
2312 	io->fcp_cmnd_length = FCP_CMND_LEN;
2313 
2314 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2315 		io->data_block_length = scsi_cmnd->device->sector_size;
2316 		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2317 	}
2318 
2319 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2320 		goto failed_scsi_cmnd;
2321 
2322 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2323 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2324 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2325 
2326 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2327 	    scsi_prot_sg_count(scsi_cmnd)) {
2328 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2329 				       scsi_prot_sg_count(scsi_cmnd));
2330 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2331 						 scsi_prot_sglist(scsi_cmnd));
2332 		if (retval)
2333 			goto failed_scsi_cmnd;
2334 		io->prot_data_length = zfcp_qdio_real_bytes(
2335 						scsi_prot_sglist(scsi_cmnd));
2336 	}
2337 
2338 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2339 					 scsi_sglist(scsi_cmnd));
2340 	if (unlikely(retval))
2341 		goto failed_scsi_cmnd;
2342 
2343 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2344 	if (zfcp_adapter_multi_buffer_active(adapter))
2345 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2346 
2347 	retval = zfcp_fsf_req_send(req);
2348 	if (unlikely(retval))
2349 		goto failed_scsi_cmnd;
2350 	/* NOTE: DO NOT TOUCH req PAST THIS POINT! */
2351 
2352 	goto out;
2353 
2354 failed_scsi_cmnd:
2355 	zfcp_fsf_req_free(req);
2356 	scsi_cmnd->host_scribble = NULL;
2357 out:
2358 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2359 	return retval;
2360 }
2361 
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2362 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2363 {
2364 	struct scsi_device *sdev = req->data;
2365 	struct fcp_resp_with_ext *fcp_rsp;
2366 	struct fcp_resp_rsp_info *rsp_info;
2367 
2368 	zfcp_fsf_fcp_handler_common(req, sdev);
2369 
2370 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2371 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2372 
2373 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2374 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2375 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2376 }
2377 
2378 /**
2379  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2380  * @sdev: Pointer to SCSI device to send the task management command to.
2381  * @tm_flags: Unsigned byte for task management flags.
2382  *
2383  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2384  */
zfcp_fsf_fcp_task_mgmt(struct scsi_device * sdev,u8 tm_flags)2385 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2386 					    u8 tm_flags)
2387 {
2388 	struct zfcp_fsf_req *req = NULL;
2389 	struct fcp_cmnd *fcp_cmnd;
2390 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2391 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2392 
2393 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2394 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2395 		return NULL;
2396 
2397 	spin_lock_irq(&qdio->req_q_lock);
2398 	if (zfcp_qdio_sbal_get(qdio))
2399 		goto out;
2400 
2401 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2402 				  SBAL_SFLAGS0_TYPE_WRITE,
2403 				  qdio->adapter->pool.scsi_req);
2404 
2405 	if (IS_ERR(req)) {
2406 		req = NULL;
2407 		goto out;
2408 	}
2409 
2410 	req->data = sdev;
2411 
2412 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2413 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2414 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2415 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2416 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2417 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2418 
2419 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2420 
2421 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2422 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2423 
2424 	zfcp_fsf_start_timer(req, ZFCP_FSF_SCSI_ER_TIMEOUT);
2425 	if (!zfcp_fsf_req_send(req)) {
2426 		/* NOTE: DO NOT TOUCH req, UNTIL IT COMPLETES! */
2427 		goto out;
2428 	}
2429 
2430 	zfcp_fsf_req_free(req);
2431 	req = NULL;
2432 out:
2433 	spin_unlock_irq(&qdio->req_q_lock);
2434 	return req;
2435 }
2436 
2437 /**
2438  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2439  * @qdio: pointer to struct zfcp_qdio
2440  * @sbal_idx: response queue index of SBAL to be processed
2441  */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2442 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2443 {
2444 	struct zfcp_adapter *adapter = qdio->adapter;
2445 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2446 	struct qdio_buffer_element *sbale;
2447 	struct zfcp_fsf_req *fsf_req;
2448 	unsigned long req_id;
2449 	int idx;
2450 
2451 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2452 
2453 		sbale = &sbal->element[idx];
2454 		req_id = (unsigned long) sbale->addr;
2455 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2456 
2457 		if (!fsf_req) {
2458 			/*
2459 			 * Unknown request means that we have potentially memory
2460 			 * corruption and must stop the machine immediately.
2461 			 */
2462 			zfcp_qdio_siosl(adapter);
2463 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2464 			      req_id, dev_name(&adapter->ccw_device->dev));
2465 		}
2466 
2467 		zfcp_fsf_req_complete(fsf_req);
2468 
2469 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2470 			break;
2471 	}
2472 }
2473