1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * zfcp device driver
4  *
5  * Implementation of FSF commands.
6  *
7  * Copyright IBM Corp. 2002, 2018
8  */
9 
10 #define KMSG_COMPONENT "zfcp"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 
13 #include <linux/blktrace_api.h>
14 #include <linux/slab.h>
15 #include <scsi/fc/fc_els.h>
16 #include "zfcp_ext.h"
17 #include "zfcp_fc.h"
18 #include "zfcp_dbf.h"
19 #include "zfcp_qdio.h"
20 #include "zfcp_reqlist.h"
21 
22 struct kmem_cache *zfcp_fsf_qtcb_cache;
23 
24 static bool ber_stop = true;
25 module_param(ber_stop, bool, 0600);
26 MODULE_PARM_DESC(ber_stop,
27 		 "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
28 
zfcp_fsf_request_timeout_handler(struct timer_list * t)29 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
30 {
31 	struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
32 	struct zfcp_adapter *adapter = fsf_req->adapter;
33 
34 	zfcp_qdio_siosl(adapter);
35 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
36 				"fsrth_1");
37 }
38 
zfcp_fsf_start_timer(struct zfcp_fsf_req * fsf_req,unsigned long timeout)39 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
40 				 unsigned long timeout)
41 {
42 	fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
43 	fsf_req->timer.expires = jiffies + timeout;
44 	add_timer(&fsf_req->timer);
45 }
46 
zfcp_fsf_start_erp_timer(struct zfcp_fsf_req * fsf_req)47 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
48 {
49 	BUG_ON(!fsf_req->erp_action);
50 	fsf_req->timer.function = zfcp_erp_timeout_handler;
51 	fsf_req->timer.expires = jiffies + 30 * HZ;
52 	add_timer(&fsf_req->timer);
53 }
54 
55 /* association between FSF command and FSF QTCB type */
56 static u32 fsf_qtcb_type[] = {
57 	[FSF_QTCB_FCP_CMND] =             FSF_IO_COMMAND,
58 	[FSF_QTCB_ABORT_FCP_CMND] =       FSF_SUPPORT_COMMAND,
59 	[FSF_QTCB_OPEN_PORT_WITH_DID] =   FSF_SUPPORT_COMMAND,
60 	[FSF_QTCB_OPEN_LUN] =             FSF_SUPPORT_COMMAND,
61 	[FSF_QTCB_CLOSE_LUN] =            FSF_SUPPORT_COMMAND,
62 	[FSF_QTCB_CLOSE_PORT] =           FSF_SUPPORT_COMMAND,
63 	[FSF_QTCB_CLOSE_PHYSICAL_PORT] =  FSF_SUPPORT_COMMAND,
64 	[FSF_QTCB_SEND_ELS] =             FSF_SUPPORT_COMMAND,
65 	[FSF_QTCB_SEND_GENERIC] =         FSF_SUPPORT_COMMAND,
66 	[FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
67 	[FSF_QTCB_EXCHANGE_PORT_DATA] =   FSF_PORT_COMMAND,
68 	[FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
69 	[FSF_QTCB_UPLOAD_CONTROL_FILE] =  FSF_SUPPORT_COMMAND
70 };
71 
zfcp_fsf_class_not_supp(struct zfcp_fsf_req * req)72 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
73 {
74 	dev_err(&req->adapter->ccw_device->dev, "FCP device not "
75 		"operational because of an unsupported FC class\n");
76 	zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
77 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
78 }
79 
80 /**
81  * zfcp_fsf_req_free - free memory used by fsf request
82  * @fsf_req: pointer to struct zfcp_fsf_req
83  */
zfcp_fsf_req_free(struct zfcp_fsf_req * req)84 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
85 {
86 	if (likely(req->pool)) {
87 		if (likely(req->qtcb))
88 			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
89 		mempool_free(req, req->pool);
90 		return;
91 	}
92 
93 	if (likely(req->qtcb))
94 		kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb);
95 	kfree(req);
96 }
97 
zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req * req)98 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
99 {
100 	unsigned long flags;
101 	struct fsf_status_read_buffer *sr_buf = req->data;
102 	struct zfcp_adapter *adapter = req->adapter;
103 	struct zfcp_port *port;
104 	int d_id = ntoh24(sr_buf->d_id);
105 
106 	read_lock_irqsave(&adapter->port_list_lock, flags);
107 	list_for_each_entry(port, &adapter->port_list, list)
108 		if (port->d_id == d_id) {
109 			zfcp_erp_port_reopen(port, 0, "fssrpc1");
110 			break;
111 		}
112 	read_unlock_irqrestore(&adapter->port_list_lock, flags);
113 }
114 
zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req * req,struct fsf_link_down_info * link_down)115 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
116 					 struct fsf_link_down_info *link_down)
117 {
118 	struct zfcp_adapter *adapter = req->adapter;
119 
120 	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
121 		return;
122 
123 	atomic_or(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
124 
125 	zfcp_scsi_schedule_rports_block(adapter);
126 
127 	if (!link_down)
128 		goto out;
129 
130 	switch (link_down->error_code) {
131 	case FSF_PSQ_LINK_NO_LIGHT:
132 		dev_warn(&req->adapter->ccw_device->dev,
133 			 "There is no light signal from the local "
134 			 "fibre channel cable\n");
135 		break;
136 	case FSF_PSQ_LINK_WRAP_PLUG:
137 		dev_warn(&req->adapter->ccw_device->dev,
138 			 "There is a wrap plug instead of a fibre "
139 			 "channel cable\n");
140 		break;
141 	case FSF_PSQ_LINK_NO_FCP:
142 		dev_warn(&req->adapter->ccw_device->dev,
143 			 "The adjacent fibre channel node does not "
144 			 "support FCP\n");
145 		break;
146 	case FSF_PSQ_LINK_FIRMWARE_UPDATE:
147 		dev_warn(&req->adapter->ccw_device->dev,
148 			 "The FCP device is suspended because of a "
149 			 "firmware update\n");
150 		break;
151 	case FSF_PSQ_LINK_INVALID_WWPN:
152 		dev_warn(&req->adapter->ccw_device->dev,
153 			 "The FCP device detected a WWPN that is "
154 			 "duplicate or not valid\n");
155 		break;
156 	case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
157 		dev_warn(&req->adapter->ccw_device->dev,
158 			 "The fibre channel fabric does not support NPIV\n");
159 		break;
160 	case FSF_PSQ_LINK_NO_FCP_RESOURCES:
161 		dev_warn(&req->adapter->ccw_device->dev,
162 			 "The FCP adapter cannot support more NPIV ports\n");
163 		break;
164 	case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
165 		dev_warn(&req->adapter->ccw_device->dev,
166 			 "The adjacent switch cannot support "
167 			 "more NPIV ports\n");
168 		break;
169 	case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
170 		dev_warn(&req->adapter->ccw_device->dev,
171 			 "The FCP adapter could not log in to the "
172 			 "fibre channel fabric\n");
173 		break;
174 	case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
175 		dev_warn(&req->adapter->ccw_device->dev,
176 			 "The WWPN assignment file on the FCP adapter "
177 			 "has been damaged\n");
178 		break;
179 	case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
180 		dev_warn(&req->adapter->ccw_device->dev,
181 			 "The mode table on the FCP adapter "
182 			 "has been damaged\n");
183 		break;
184 	case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
185 		dev_warn(&req->adapter->ccw_device->dev,
186 			 "All NPIV ports on the FCP adapter have "
187 			 "been assigned\n");
188 		break;
189 	default:
190 		dev_warn(&req->adapter->ccw_device->dev,
191 			 "The link between the FCP adapter and "
192 			 "the FC fabric is down\n");
193 	}
194 out:
195 	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
196 }
197 
zfcp_fsf_status_read_link_down(struct zfcp_fsf_req * req)198 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
199 {
200 	struct fsf_status_read_buffer *sr_buf = req->data;
201 	struct fsf_link_down_info *ldi =
202 		(struct fsf_link_down_info *) &sr_buf->payload;
203 
204 	switch (sr_buf->status_subtype) {
205 	case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
206 	case FSF_STATUS_READ_SUB_FDISC_FAILED:
207 		zfcp_fsf_link_down_info_eval(req, ldi);
208 		break;
209 	case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
210 		zfcp_fsf_link_down_info_eval(req, NULL);
211 	}
212 }
213 
zfcp_fsf_status_read_handler(struct zfcp_fsf_req * req)214 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
215 {
216 	struct zfcp_adapter *adapter = req->adapter;
217 	struct fsf_status_read_buffer *sr_buf = req->data;
218 
219 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
220 		zfcp_dbf_hba_fsf_uss("fssrh_1", req);
221 		mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
222 		zfcp_fsf_req_free(req);
223 		return;
224 	}
225 
226 	zfcp_dbf_hba_fsf_uss("fssrh_4", req);
227 
228 	switch (sr_buf->status_type) {
229 	case FSF_STATUS_READ_PORT_CLOSED:
230 		zfcp_fsf_status_read_port_closed(req);
231 		break;
232 	case FSF_STATUS_READ_INCOMING_ELS:
233 		zfcp_fc_incoming_els(req);
234 		break;
235 	case FSF_STATUS_READ_SENSE_DATA_AVAIL:
236 		break;
237 	case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
238 		zfcp_dbf_hba_bit_err("fssrh_3", req);
239 		if (ber_stop) {
240 			dev_warn(&adapter->ccw_device->dev,
241 				 "All paths over this FCP device are disused because of excessive bit errors\n");
242 			zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
243 		} else {
244 			dev_warn(&adapter->ccw_device->dev,
245 				 "The error threshold for checksum statistics has been exceeded\n");
246 		}
247 		break;
248 	case FSF_STATUS_READ_LINK_DOWN:
249 		zfcp_fsf_status_read_link_down(req);
250 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
251 		break;
252 	case FSF_STATUS_READ_LINK_UP:
253 		dev_info(&adapter->ccw_device->dev,
254 			 "The local link has been restored\n");
255 		/* All ports should be marked as ready to run again */
256 		zfcp_erp_set_adapter_status(adapter,
257 					    ZFCP_STATUS_COMMON_RUNNING);
258 		zfcp_erp_adapter_reopen(adapter,
259 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
260 					ZFCP_STATUS_COMMON_ERP_FAILED,
261 					"fssrh_2");
262 		zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
263 
264 		break;
265 	case FSF_STATUS_READ_NOTIFICATION_LOST:
266 		if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
267 			zfcp_fc_conditional_port_scan(adapter);
268 		break;
269 	case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
270 		adapter->adapter_features = sr_buf->payload.word[0];
271 		break;
272 	}
273 
274 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
275 	zfcp_fsf_req_free(req);
276 
277 	atomic_inc(&adapter->stat_miss);
278 	queue_work(adapter->work_queue, &adapter->stat_work);
279 }
280 
zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req * req)281 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
282 {
283 	switch (req->qtcb->header.fsf_status_qual.word[0]) {
284 	case FSF_SQ_FCP_RSP_AVAILABLE:
285 	case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
286 	case FSF_SQ_NO_RETRY_POSSIBLE:
287 	case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
288 		return;
289 	case FSF_SQ_COMMAND_ABORTED:
290 		break;
291 	case FSF_SQ_NO_RECOM:
292 		dev_err(&req->adapter->ccw_device->dev,
293 			"The FCP adapter reported a problem "
294 			"that cannot be recovered\n");
295 		zfcp_qdio_siosl(req->adapter);
296 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
297 		break;
298 	}
299 	/* all non-return stats set FSFREQ_ERROR*/
300 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
301 }
302 
zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req * req)303 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
304 {
305 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
306 		return;
307 
308 	switch (req->qtcb->header.fsf_status) {
309 	case FSF_UNKNOWN_COMMAND:
310 		dev_err(&req->adapter->ccw_device->dev,
311 			"The FCP adapter does not recognize the command 0x%x\n",
312 			req->qtcb->header.fsf_command);
313 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
314 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
315 		break;
316 	case FSF_ADAPTER_STATUS_AVAILABLE:
317 		zfcp_fsf_fsfstatus_qual_eval(req);
318 		break;
319 	}
320 }
321 
zfcp_fsf_protstatus_eval(struct zfcp_fsf_req * req)322 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
323 {
324 	struct zfcp_adapter *adapter = req->adapter;
325 	struct fsf_qtcb *qtcb = req->qtcb;
326 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
327 
328 	zfcp_dbf_hba_fsf_response(req);
329 
330 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
331 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
332 		return;
333 	}
334 
335 	switch (qtcb->prefix.prot_status) {
336 	case FSF_PROT_GOOD:
337 	case FSF_PROT_FSF_STATUS_PRESENTED:
338 		return;
339 	case FSF_PROT_QTCB_VERSION_ERROR:
340 		dev_err(&adapter->ccw_device->dev,
341 			"QTCB version 0x%x not supported by FCP adapter "
342 			"(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
343 			psq->word[0], psq->word[1]);
344 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
345 		break;
346 	case FSF_PROT_ERROR_STATE:
347 	case FSF_PROT_SEQ_NUMB_ERROR:
348 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
349 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
350 		break;
351 	case FSF_PROT_UNSUPP_QTCB_TYPE:
352 		dev_err(&adapter->ccw_device->dev,
353 			"The QTCB type is not supported by the FCP adapter\n");
354 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
355 		break;
356 	case FSF_PROT_HOST_CONNECTION_INITIALIZING:
357 		atomic_or(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
358 				&adapter->status);
359 		break;
360 	case FSF_PROT_DUPLICATE_REQUEST_ID:
361 		dev_err(&adapter->ccw_device->dev,
362 			"0x%Lx is an ambiguous request identifier\n",
363 			(unsigned long long)qtcb->bottom.support.req_handle);
364 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
365 		break;
366 	case FSF_PROT_LINK_DOWN:
367 		zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
368 		/* go through reopen to flush pending requests */
369 		zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
370 		break;
371 	case FSF_PROT_REEST_QUEUE:
372 		/* All ports should be marked as ready to run again */
373 		zfcp_erp_set_adapter_status(adapter,
374 					    ZFCP_STATUS_COMMON_RUNNING);
375 		zfcp_erp_adapter_reopen(adapter,
376 					ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
377 					ZFCP_STATUS_COMMON_ERP_FAILED,
378 					"fspse_8");
379 		break;
380 	default:
381 		dev_err(&adapter->ccw_device->dev,
382 			"0x%x is not a valid transfer protocol status\n",
383 			qtcb->prefix.prot_status);
384 		zfcp_qdio_siosl(adapter);
385 		zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
386 	}
387 	req->status |= ZFCP_STATUS_FSFREQ_ERROR;
388 }
389 
390 /**
391  * zfcp_fsf_req_complete - process completion of a FSF request
392  * @fsf_req: The FSF request that has been completed.
393  *
394  * When a request has been completed either from the FCP adapter,
395  * or it has been dismissed due to a queue shutdown, this function
396  * is called to process the completion status and trigger further
397  * events related to the FSF request.
398  */
zfcp_fsf_req_complete(struct zfcp_fsf_req * req)399 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
400 {
401 	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
402 		zfcp_fsf_status_read_handler(req);
403 		return;
404 	}
405 
406 	del_timer_sync(&req->timer);
407 	zfcp_fsf_protstatus_eval(req);
408 	zfcp_fsf_fsfstatus_eval(req);
409 	req->handler(req);
410 
411 	if (req->erp_action)
412 		zfcp_erp_notify(req->erp_action, 0);
413 
414 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
415 		zfcp_fsf_req_free(req);
416 	else
417 		complete(&req->completion);
418 }
419 
420 /**
421  * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
422  * @adapter: pointer to struct zfcp_adapter
423  *
424  * Never ever call this without shutting down the adapter first.
425  * Otherwise the adapter would continue using and corrupting s390 storage.
426  * Included BUG_ON() call to ensure this is done.
427  * ERP is supposed to be the only user of this function.
428  */
zfcp_fsf_req_dismiss_all(struct zfcp_adapter * adapter)429 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
430 {
431 	struct zfcp_fsf_req *req, *tmp;
432 	LIST_HEAD(remove_queue);
433 
434 	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
435 	zfcp_reqlist_move(adapter->req_list, &remove_queue);
436 
437 	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
438 		list_del(&req->list);
439 		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
440 		zfcp_fsf_req_complete(req);
441 	}
442 }
443 
444 #define ZFCP_FSF_PORTSPEED_1GBIT	(1 <<  0)
445 #define ZFCP_FSF_PORTSPEED_2GBIT	(1 <<  1)
446 #define ZFCP_FSF_PORTSPEED_4GBIT	(1 <<  2)
447 #define ZFCP_FSF_PORTSPEED_10GBIT	(1 <<  3)
448 #define ZFCP_FSF_PORTSPEED_8GBIT	(1 <<  4)
449 #define ZFCP_FSF_PORTSPEED_16GBIT	(1 <<  5)
450 #define ZFCP_FSF_PORTSPEED_32GBIT	(1 <<  6)
451 #define ZFCP_FSF_PORTSPEED_64GBIT	(1 <<  7)
452 #define ZFCP_FSF_PORTSPEED_128GBIT	(1 <<  8)
453 #define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
454 
zfcp_fsf_convert_portspeed(u32 fsf_speed)455 static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
456 {
457 	u32 fdmi_speed = 0;
458 	if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
459 		fdmi_speed |= FC_PORTSPEED_1GBIT;
460 	if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
461 		fdmi_speed |= FC_PORTSPEED_2GBIT;
462 	if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
463 		fdmi_speed |= FC_PORTSPEED_4GBIT;
464 	if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
465 		fdmi_speed |= FC_PORTSPEED_10GBIT;
466 	if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
467 		fdmi_speed |= FC_PORTSPEED_8GBIT;
468 	if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
469 		fdmi_speed |= FC_PORTSPEED_16GBIT;
470 	if (fsf_speed & ZFCP_FSF_PORTSPEED_32GBIT)
471 		fdmi_speed |= FC_PORTSPEED_32GBIT;
472 	if (fsf_speed & ZFCP_FSF_PORTSPEED_64GBIT)
473 		fdmi_speed |= FC_PORTSPEED_64GBIT;
474 	if (fsf_speed & ZFCP_FSF_PORTSPEED_128GBIT)
475 		fdmi_speed |= FC_PORTSPEED_128GBIT;
476 	if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
477 		fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
478 	return fdmi_speed;
479 }
480 
zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req * req)481 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
482 {
483 	struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
484 	struct zfcp_adapter *adapter = req->adapter;
485 	struct Scsi_Host *shost = adapter->scsi_host;
486 	struct fc_els_flogi *nsp, *plogi;
487 
488 	/* adjust pointers for missing command code */
489 	nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
490 					- sizeof(u32));
491 	plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
492 					- sizeof(u32));
493 
494 	if (req->data)
495 		memcpy(req->data, bottom, sizeof(*bottom));
496 
497 	fc_host_port_name(shost) = be64_to_cpu(nsp->fl_wwpn);
498 	fc_host_node_name(shost) = be64_to_cpu(nsp->fl_wwnn);
499 	fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
500 
501 	adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
502 	adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
503 					 (u16)FSF_STATUS_READS_RECOM);
504 
505 	if (fc_host_permanent_port_name(shost) == -1)
506 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
507 
508 	zfcp_scsi_set_prot(adapter);
509 
510 	/* no error return above here, otherwise must fix call chains */
511 	/* do not evaluate invalid fields */
512 	if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE)
513 		return 0;
514 
515 	fc_host_port_id(shost) = ntoh24(bottom->s_id);
516 	fc_host_speed(shost) =
517 		zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
518 
519 	adapter->hydra_version = bottom->adapter_type;
520 
521 	switch (bottom->fc_topology) {
522 	case FSF_TOPO_P2P:
523 		adapter->peer_d_id = ntoh24(bottom->peer_d_id);
524 		adapter->peer_wwpn = be64_to_cpu(plogi->fl_wwpn);
525 		adapter->peer_wwnn = be64_to_cpu(plogi->fl_wwnn);
526 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
527 		break;
528 	case FSF_TOPO_FABRIC:
529 		if (bottom->connection_features & FSF_FEATURE_NPIV_MODE)
530 			fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
531 		else
532 			fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
533 		break;
534 	case FSF_TOPO_AL:
535 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
536 		/* fall through */
537 	default:
538 		dev_err(&adapter->ccw_device->dev,
539 			"Unknown or unsupported arbitrated loop "
540 			"fibre channel topology detected\n");
541 		zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
542 		return -EIO;
543 	}
544 
545 	return 0;
546 }
547 
zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req * req)548 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
549 {
550 	struct zfcp_adapter *adapter = req->adapter;
551 	struct fsf_qtcb *qtcb = req->qtcb;
552 	struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
553 	struct Scsi_Host *shost = adapter->scsi_host;
554 
555 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
556 		return;
557 
558 	adapter->fsf_lic_version = bottom->lic_version;
559 	adapter->adapter_features = bottom->adapter_features;
560 	adapter->connection_features = bottom->connection_features;
561 	adapter->peer_wwpn = 0;
562 	adapter->peer_wwnn = 0;
563 	adapter->peer_d_id = 0;
564 
565 	switch (qtcb->header.fsf_status) {
566 	case FSF_GOOD:
567 		if (zfcp_fsf_exchange_config_evaluate(req))
568 			return;
569 
570 		if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
571 			dev_err(&adapter->ccw_device->dev,
572 				"FCP adapter maximum QTCB size (%d bytes) "
573 				"is too small\n",
574 				bottom->max_qtcb_size);
575 			zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
576 			return;
577 		}
578 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
579 				&adapter->status);
580 		break;
581 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
582 		fc_host_node_name(shost) = 0;
583 		fc_host_port_name(shost) = 0;
584 		fc_host_port_id(shost) = 0;
585 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
586 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
587 		adapter->hydra_version = 0;
588 
589 		/* avoids adapter shutdown to be able to recognize
590 		 * events such as LINK UP */
591 		atomic_or(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
592 				&adapter->status);
593 		zfcp_fsf_link_down_info_eval(req,
594 			&qtcb->header.fsf_status_qual.link_down_info);
595 		if (zfcp_fsf_exchange_config_evaluate(req))
596 			return;
597 		break;
598 	default:
599 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
600 		return;
601 	}
602 
603 	if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
604 		adapter->hardware_version = bottom->hardware_version;
605 		memcpy(fc_host_serial_number(shost), bottom->serial_number,
606 		       min(FC_SERIAL_NUMBER_SIZE, 17));
607 		EBCASC(fc_host_serial_number(shost),
608 		       min(FC_SERIAL_NUMBER_SIZE, 17));
609 	}
610 
611 	if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
612 		dev_err(&adapter->ccw_device->dev,
613 			"The FCP adapter only supports newer "
614 			"control block versions\n");
615 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
616 		return;
617 	}
618 	if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
619 		dev_err(&adapter->ccw_device->dev,
620 			"The FCP adapter only supports older "
621 			"control block versions\n");
622 		zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
623 	}
624 }
625 
zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req * req)626 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
627 {
628 	struct zfcp_adapter *adapter = req->adapter;
629 	struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
630 	struct Scsi_Host *shost = adapter->scsi_host;
631 
632 	if (req->data)
633 		memcpy(req->data, bottom, sizeof(*bottom));
634 
635 	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
636 		fc_host_permanent_port_name(shost) = bottom->wwpn;
637 	} else
638 		fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
639 	fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
640 	fc_host_supported_speeds(shost) =
641 		zfcp_fsf_convert_portspeed(bottom->supported_speed);
642 	memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
643 	       FC_FC4_LIST_SIZE);
644 	memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
645 	       FC_FC4_LIST_SIZE);
646 }
647 
zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req * req)648 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
649 {
650 	struct fsf_qtcb *qtcb = req->qtcb;
651 
652 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
653 		return;
654 
655 	switch (qtcb->header.fsf_status) {
656 	case FSF_GOOD:
657 		zfcp_fsf_exchange_port_evaluate(req);
658 		break;
659 	case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
660 		zfcp_fsf_exchange_port_evaluate(req);
661 		zfcp_fsf_link_down_info_eval(req,
662 			&qtcb->header.fsf_status_qual.link_down_info);
663 		break;
664 	}
665 }
666 
zfcp_fsf_alloc(mempool_t * pool)667 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
668 {
669 	struct zfcp_fsf_req *req;
670 
671 	if (likely(pool))
672 		req = mempool_alloc(pool, GFP_ATOMIC);
673 	else
674 		req = kmalloc(sizeof(*req), GFP_ATOMIC);
675 
676 	if (unlikely(!req))
677 		return NULL;
678 
679 	memset(req, 0, sizeof(*req));
680 	req->pool = pool;
681 	return req;
682 }
683 
zfcp_fsf_qtcb_alloc(mempool_t * pool)684 static struct fsf_qtcb *zfcp_fsf_qtcb_alloc(mempool_t *pool)
685 {
686 	struct fsf_qtcb *qtcb;
687 
688 	if (likely(pool))
689 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
690 	else
691 		qtcb = kmem_cache_alloc(zfcp_fsf_qtcb_cache, GFP_ATOMIC);
692 
693 	if (unlikely(!qtcb))
694 		return NULL;
695 
696 	memset(qtcb, 0, sizeof(*qtcb));
697 	return qtcb;
698 }
699 
zfcp_fsf_req_create(struct zfcp_qdio * qdio,u32 fsf_cmd,u8 sbtype,mempool_t * pool)700 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
701 						u32 fsf_cmd, u8 sbtype,
702 						mempool_t *pool)
703 {
704 	struct zfcp_adapter *adapter = qdio->adapter;
705 	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
706 
707 	if (unlikely(!req))
708 		return ERR_PTR(-ENOMEM);
709 
710 	if (adapter->req_no == 0)
711 		adapter->req_no++;
712 
713 	INIT_LIST_HEAD(&req->list);
714 	timer_setup(&req->timer, NULL, 0);
715 	init_completion(&req->completion);
716 
717 	req->adapter = adapter;
718 	req->fsf_command = fsf_cmd;
719 	req->req_id = adapter->req_no;
720 
721 	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
722 		if (likely(pool))
723 			req->qtcb = zfcp_fsf_qtcb_alloc(
724 				adapter->pool.qtcb_pool);
725 		else
726 			req->qtcb = zfcp_fsf_qtcb_alloc(NULL);
727 
728 		if (unlikely(!req->qtcb)) {
729 			zfcp_fsf_req_free(req);
730 			return ERR_PTR(-ENOMEM);
731 		}
732 
733 		req->seq_no = adapter->fsf_req_seq_no;
734 		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
735 		req->qtcb->prefix.req_id = req->req_id;
736 		req->qtcb->prefix.ulp_info = 26;
737 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
738 		req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
739 		req->qtcb->header.req_handle = req->req_id;
740 		req->qtcb->header.fsf_command = req->fsf_command;
741 	}
742 
743 	zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
744 			   req->qtcb, sizeof(struct fsf_qtcb));
745 
746 	return req;
747 }
748 
zfcp_fsf_req_send(struct zfcp_fsf_req * req)749 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
750 {
751 	struct zfcp_adapter *adapter = req->adapter;
752 	struct zfcp_qdio *qdio = adapter->qdio;
753 	int with_qtcb = (req->qtcb != NULL);
754 	int req_id = req->req_id;
755 
756 	zfcp_reqlist_add(adapter->req_list, req);
757 
758 	req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
759 	req->issued = get_tod_clock();
760 	if (zfcp_qdio_send(qdio, &req->qdio_req)) {
761 		del_timer_sync(&req->timer);
762 		/* lookup request again, list might have changed */
763 		zfcp_reqlist_find_rm(adapter->req_list, req_id);
764 		zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
765 		return -EIO;
766 	}
767 
768 	/* Don't increase for unsolicited status */
769 	if (with_qtcb)
770 		adapter->fsf_req_seq_no++;
771 	adapter->req_no++;
772 
773 	return 0;
774 }
775 
776 /**
777  * zfcp_fsf_status_read - send status read request
778  * @adapter: pointer to struct zfcp_adapter
779  * @req_flags: request flags
780  * Returns: 0 on success, ERROR otherwise
781  */
zfcp_fsf_status_read(struct zfcp_qdio * qdio)782 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
783 {
784 	struct zfcp_adapter *adapter = qdio->adapter;
785 	struct zfcp_fsf_req *req;
786 	struct fsf_status_read_buffer *sr_buf;
787 	struct page *page;
788 	int retval = -EIO;
789 
790 	spin_lock_irq(&qdio->req_q_lock);
791 	if (zfcp_qdio_sbal_get(qdio))
792 		goto out;
793 
794 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
795 				  SBAL_SFLAGS0_TYPE_STATUS,
796 				  adapter->pool.status_read_req);
797 	if (IS_ERR(req)) {
798 		retval = PTR_ERR(req);
799 		goto out;
800 	}
801 
802 	page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC);
803 	if (!page) {
804 		retval = -ENOMEM;
805 		goto failed_buf;
806 	}
807 	sr_buf = page_address(page);
808 	memset(sr_buf, 0, sizeof(*sr_buf));
809 	req->data = sr_buf;
810 
811 	zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
812 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
813 
814 	retval = zfcp_fsf_req_send(req);
815 	if (retval)
816 		goto failed_req_send;
817 
818 	goto out;
819 
820 failed_req_send:
821 	req->data = NULL;
822 	mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data);
823 failed_buf:
824 	zfcp_dbf_hba_fsf_uss("fssr__1", req);
825 	zfcp_fsf_req_free(req);
826 out:
827 	spin_unlock_irq(&qdio->req_q_lock);
828 	return retval;
829 }
830 
zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req * req)831 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
832 {
833 	struct scsi_device *sdev = req->data;
834 	struct zfcp_scsi_dev *zfcp_sdev;
835 	union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
836 
837 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
838 		return;
839 
840 	zfcp_sdev = sdev_to_zfcp(sdev);
841 
842 	switch (req->qtcb->header.fsf_status) {
843 	case FSF_PORT_HANDLE_NOT_VALID:
844 		if (fsq->word[0] == fsq->word[1]) {
845 			zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
846 						"fsafch1");
847 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
848 		}
849 		break;
850 	case FSF_LUN_HANDLE_NOT_VALID:
851 		if (fsq->word[0] == fsq->word[1]) {
852 			zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
853 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
854 		}
855 		break;
856 	case FSF_FCP_COMMAND_DOES_NOT_EXIST:
857 		req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
858 		break;
859 	case FSF_PORT_BOXED:
860 		zfcp_erp_set_port_status(zfcp_sdev->port,
861 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
862 		zfcp_erp_port_reopen(zfcp_sdev->port,
863 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
864 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
865 		break;
866 	case FSF_LUN_BOXED:
867 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
868 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
869 				    "fsafch4");
870 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
871                 break;
872 	case FSF_ADAPTER_STATUS_AVAILABLE:
873 		switch (fsq->word[0]) {
874 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
875 			zfcp_fc_test_link(zfcp_sdev->port);
876 			/* fall through */
877 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
878 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
879 			break;
880 		}
881 		break;
882 	case FSF_GOOD:
883 		req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
884 		break;
885 	}
886 }
887 
888 /**
889  * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
890  * @scmnd: The SCSI command to abort
891  * Returns: pointer to struct zfcp_fsf_req
892  */
893 
zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd * scmnd)894 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
895 {
896 	struct zfcp_fsf_req *req = NULL;
897 	struct scsi_device *sdev = scmnd->device;
898 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
899 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
900 	unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
901 
902 	spin_lock_irq(&qdio->req_q_lock);
903 	if (zfcp_qdio_sbal_get(qdio))
904 		goto out;
905 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
906 				  SBAL_SFLAGS0_TYPE_READ,
907 				  qdio->adapter->pool.scsi_abort);
908 	if (IS_ERR(req)) {
909 		req = NULL;
910 		goto out;
911 	}
912 
913 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
914 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
915 		goto out_error_free;
916 
917 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
918 
919 	req->data = sdev;
920 	req->handler = zfcp_fsf_abort_fcp_command_handler;
921 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
922 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
923 	req->qtcb->bottom.support.req_handle = (u64) old_req_id;
924 
925 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
926 	if (!zfcp_fsf_req_send(req))
927 		goto out;
928 
929 out_error_free:
930 	zfcp_fsf_req_free(req);
931 	req = NULL;
932 out:
933 	spin_unlock_irq(&qdio->req_q_lock);
934 	return req;
935 }
936 
zfcp_fsf_send_ct_handler(struct zfcp_fsf_req * req)937 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
938 {
939 	struct zfcp_adapter *adapter = req->adapter;
940 	struct zfcp_fsf_ct_els *ct = req->data;
941 	struct fsf_qtcb_header *header = &req->qtcb->header;
942 
943 	ct->status = -EINVAL;
944 
945 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
946 		goto skip_fsfstatus;
947 
948 	switch (header->fsf_status) {
949         case FSF_GOOD:
950 		ct->status = 0;
951 		zfcp_dbf_san_res("fsscth2", req);
952 		break;
953         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
954 		zfcp_fsf_class_not_supp(req);
955 		break;
956         case FSF_ADAPTER_STATUS_AVAILABLE:
957                 switch (header->fsf_status_qual.word[0]){
958                 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
959                 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
960 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
961 			break;
962                 }
963                 break;
964         case FSF_PORT_BOXED:
965 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
966 		break;
967 	case FSF_PORT_HANDLE_NOT_VALID:
968 		zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
969 		/* fall through */
970 	case FSF_GENERIC_COMMAND_REJECTED:
971 	case FSF_PAYLOAD_SIZE_MISMATCH:
972 	case FSF_REQUEST_SIZE_TOO_LARGE:
973 	case FSF_RESPONSE_SIZE_TOO_LARGE:
974 	case FSF_SBAL_MISMATCH:
975 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
976 		break;
977 	}
978 
979 skip_fsfstatus:
980 	if (ct->handler)
981 		ct->handler(ct->handler_data);
982 }
983 
zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio * qdio,struct zfcp_qdio_req * q_req,struct scatterlist * sg_req,struct scatterlist * sg_resp)984 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
985 					    struct zfcp_qdio_req *q_req,
986 					    struct scatterlist *sg_req,
987 					    struct scatterlist *sg_resp)
988 {
989 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
990 	zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
991 	zfcp_qdio_set_sbale_last(qdio, q_req);
992 }
993 
zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp)994 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
995 				       struct scatterlist *sg_req,
996 				       struct scatterlist *sg_resp)
997 {
998 	struct zfcp_adapter *adapter = req->adapter;
999 	struct zfcp_qdio *qdio = adapter->qdio;
1000 	struct fsf_qtcb *qtcb = req->qtcb;
1001 	u32 feat = adapter->adapter_features;
1002 
1003 	if (zfcp_adapter_multi_buffer_active(adapter)) {
1004 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1005 			return -EIO;
1006 		qtcb->bottom.support.req_buf_length =
1007 			zfcp_qdio_real_bytes(sg_req);
1008 		if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1009 			return -EIO;
1010 		qtcb->bottom.support.resp_buf_length =
1011 			zfcp_qdio_real_bytes(sg_resp);
1012 
1013 		zfcp_qdio_set_data_div(qdio, &req->qdio_req, sg_nents(sg_req));
1014 		zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1015 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
1016 		return 0;
1017 	}
1018 
1019 	/* use single, unchained SBAL if it can hold the request */
1020 	if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
1021 		zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req,
1022 						sg_req, sg_resp);
1023 		return 0;
1024 	}
1025 
1026 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS))
1027 		return -EOPNOTSUPP;
1028 
1029 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req))
1030 		return -EIO;
1031 
1032 	qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req);
1033 
1034 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1035 	zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req);
1036 
1037 	if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp))
1038 		return -EIO;
1039 
1040 	qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp);
1041 
1042 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1043 
1044 	return 0;
1045 }
1046 
zfcp_fsf_setup_ct_els(struct zfcp_fsf_req * req,struct scatterlist * sg_req,struct scatterlist * sg_resp,unsigned int timeout)1047 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1048 				 struct scatterlist *sg_req,
1049 				 struct scatterlist *sg_resp,
1050 				 unsigned int timeout)
1051 {
1052 	int ret;
1053 
1054 	ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
1055 	if (ret)
1056 		return ret;
1057 
1058 	/* common settings for ct/gs and els requests */
1059 	if (timeout > 255)
1060 		timeout = 255; /* max value accepted by hardware */
1061 	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1062 	req->qtcb->bottom.support.timeout = timeout;
1063 	zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
1064 
1065 	return 0;
1066 }
1067 
1068 /**
1069  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1070  * @ct: pointer to struct zfcp_send_ct with data for request
1071  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1072  */
zfcp_fsf_send_ct(struct zfcp_fc_wka_port * wka_port,struct zfcp_fsf_ct_els * ct,mempool_t * pool,unsigned int timeout)1073 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1074 		     struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1075 		     unsigned int timeout)
1076 {
1077 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1078 	struct zfcp_fsf_req *req;
1079 	int ret = -EIO;
1080 
1081 	spin_lock_irq(&qdio->req_q_lock);
1082 	if (zfcp_qdio_sbal_get(qdio))
1083 		goto out;
1084 
1085 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1086 				  SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1087 
1088 	if (IS_ERR(req)) {
1089 		ret = PTR_ERR(req);
1090 		goto out;
1091 	}
1092 
1093 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1094 	ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1095 	if (ret)
1096 		goto failed_send;
1097 
1098 	req->handler = zfcp_fsf_send_ct_handler;
1099 	req->qtcb->header.port_handle = wka_port->handle;
1100 	ct->d_id = wka_port->d_id;
1101 	req->data = ct;
1102 
1103 	zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1104 
1105 	ret = zfcp_fsf_req_send(req);
1106 	if (ret)
1107 		goto failed_send;
1108 
1109 	goto out;
1110 
1111 failed_send:
1112 	zfcp_fsf_req_free(req);
1113 out:
1114 	spin_unlock_irq(&qdio->req_q_lock);
1115 	return ret;
1116 }
1117 
zfcp_fsf_send_els_handler(struct zfcp_fsf_req * req)1118 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1119 {
1120 	struct zfcp_fsf_ct_els *send_els = req->data;
1121 	struct fsf_qtcb_header *header = &req->qtcb->header;
1122 
1123 	send_els->status = -EINVAL;
1124 
1125 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1126 		goto skip_fsfstatus;
1127 
1128 	switch (header->fsf_status) {
1129 	case FSF_GOOD:
1130 		send_els->status = 0;
1131 		zfcp_dbf_san_res("fsselh1", req);
1132 		break;
1133 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1134 		zfcp_fsf_class_not_supp(req);
1135 		break;
1136 	case FSF_ADAPTER_STATUS_AVAILABLE:
1137 		switch (header->fsf_status_qual.word[0]){
1138 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1139 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1140 		case FSF_SQ_RETRY_IF_POSSIBLE:
1141 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1142 			break;
1143 		}
1144 		break;
1145 	case FSF_ELS_COMMAND_REJECTED:
1146 	case FSF_PAYLOAD_SIZE_MISMATCH:
1147 	case FSF_REQUEST_SIZE_TOO_LARGE:
1148 	case FSF_RESPONSE_SIZE_TOO_LARGE:
1149 		break;
1150 	case FSF_SBAL_MISMATCH:
1151 		/* should never occur, avoided in zfcp_fsf_send_els */
1152 		/* fall through */
1153 	default:
1154 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1155 		break;
1156 	}
1157 skip_fsfstatus:
1158 	if (send_els->handler)
1159 		send_els->handler(send_els->handler_data);
1160 }
1161 
1162 /**
1163  * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1164  * @els: pointer to struct zfcp_send_els with data for the command
1165  */
zfcp_fsf_send_els(struct zfcp_adapter * adapter,u32 d_id,struct zfcp_fsf_ct_els * els,unsigned int timeout)1166 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1167 		      struct zfcp_fsf_ct_els *els, unsigned int timeout)
1168 {
1169 	struct zfcp_fsf_req *req;
1170 	struct zfcp_qdio *qdio = adapter->qdio;
1171 	int ret = -EIO;
1172 
1173 	spin_lock_irq(&qdio->req_q_lock);
1174 	if (zfcp_qdio_sbal_get(qdio))
1175 		goto out;
1176 
1177 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1178 				  SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1179 
1180 	if (IS_ERR(req)) {
1181 		ret = PTR_ERR(req);
1182 		goto out;
1183 	}
1184 
1185 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1186 
1187 	if (!zfcp_adapter_multi_buffer_active(adapter))
1188 		zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1189 
1190 	ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1191 
1192 	if (ret)
1193 		goto failed_send;
1194 
1195 	hton24(req->qtcb->bottom.support.d_id, d_id);
1196 	req->handler = zfcp_fsf_send_els_handler;
1197 	els->d_id = d_id;
1198 	req->data = els;
1199 
1200 	zfcp_dbf_san_req("fssels1", req, d_id);
1201 
1202 	ret = zfcp_fsf_req_send(req);
1203 	if (ret)
1204 		goto failed_send;
1205 
1206 	goto out;
1207 
1208 failed_send:
1209 	zfcp_fsf_req_free(req);
1210 out:
1211 	spin_unlock_irq(&qdio->req_q_lock);
1212 	return ret;
1213 }
1214 
zfcp_fsf_exchange_config_data(struct zfcp_erp_action * erp_action)1215 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1216 {
1217 	struct zfcp_fsf_req *req;
1218 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1219 	int retval = -EIO;
1220 
1221 	spin_lock_irq(&qdio->req_q_lock);
1222 	if (zfcp_qdio_sbal_get(qdio))
1223 		goto out;
1224 
1225 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1226 				  SBAL_SFLAGS0_TYPE_READ,
1227 				  qdio->adapter->pool.erp_req);
1228 
1229 	if (IS_ERR(req)) {
1230 		retval = PTR_ERR(req);
1231 		goto out;
1232 	}
1233 
1234 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1235 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1236 
1237 	req->qtcb->bottom.config.feature_selection =
1238 			FSF_FEATURE_NOTIFICATION_LOST |
1239 			FSF_FEATURE_UPDATE_ALERT;
1240 	req->erp_action = erp_action;
1241 	req->handler = zfcp_fsf_exchange_config_data_handler;
1242 	erp_action->fsf_req_id = req->req_id;
1243 
1244 	zfcp_fsf_start_erp_timer(req);
1245 	retval = zfcp_fsf_req_send(req);
1246 	if (retval) {
1247 		zfcp_fsf_req_free(req);
1248 		erp_action->fsf_req_id = 0;
1249 	}
1250 out:
1251 	spin_unlock_irq(&qdio->req_q_lock);
1252 	return retval;
1253 }
1254 
zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_config * data)1255 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1256 				       struct fsf_qtcb_bottom_config *data)
1257 {
1258 	struct zfcp_fsf_req *req = NULL;
1259 	int retval = -EIO;
1260 
1261 	spin_lock_irq(&qdio->req_q_lock);
1262 	if (zfcp_qdio_sbal_get(qdio))
1263 		goto out_unlock;
1264 
1265 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1266 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1267 
1268 	if (IS_ERR(req)) {
1269 		retval = PTR_ERR(req);
1270 		goto out_unlock;
1271 	}
1272 
1273 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1274 	req->handler = zfcp_fsf_exchange_config_data_handler;
1275 
1276 	req->qtcb->bottom.config.feature_selection =
1277 			FSF_FEATURE_NOTIFICATION_LOST |
1278 			FSF_FEATURE_UPDATE_ALERT;
1279 
1280 	if (data)
1281 		req->data = data;
1282 
1283 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1284 	retval = zfcp_fsf_req_send(req);
1285 	spin_unlock_irq(&qdio->req_q_lock);
1286 	if (!retval)
1287 		wait_for_completion(&req->completion);
1288 
1289 	zfcp_fsf_req_free(req);
1290 	return retval;
1291 
1292 out_unlock:
1293 	spin_unlock_irq(&qdio->req_q_lock);
1294 	return retval;
1295 }
1296 
1297 /**
1298  * zfcp_fsf_exchange_port_data - request information about local port
1299  * @erp_action: ERP action for the adapter for which port data is requested
1300  * Returns: 0 on success, error otherwise
1301  */
zfcp_fsf_exchange_port_data(struct zfcp_erp_action * erp_action)1302 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1303 {
1304 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1305 	struct zfcp_fsf_req *req;
1306 	int retval = -EIO;
1307 
1308 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1309 		return -EOPNOTSUPP;
1310 
1311 	spin_lock_irq(&qdio->req_q_lock);
1312 	if (zfcp_qdio_sbal_get(qdio))
1313 		goto out;
1314 
1315 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1316 				  SBAL_SFLAGS0_TYPE_READ,
1317 				  qdio->adapter->pool.erp_req);
1318 
1319 	if (IS_ERR(req)) {
1320 		retval = PTR_ERR(req);
1321 		goto out;
1322 	}
1323 
1324 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1325 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1326 
1327 	req->handler = zfcp_fsf_exchange_port_data_handler;
1328 	req->erp_action = erp_action;
1329 	erp_action->fsf_req_id = req->req_id;
1330 
1331 	zfcp_fsf_start_erp_timer(req);
1332 	retval = zfcp_fsf_req_send(req);
1333 	if (retval) {
1334 		zfcp_fsf_req_free(req);
1335 		erp_action->fsf_req_id = 0;
1336 	}
1337 out:
1338 	spin_unlock_irq(&qdio->req_q_lock);
1339 	return retval;
1340 }
1341 
1342 /**
1343  * zfcp_fsf_exchange_port_data_sync - request information about local port
1344  * @qdio: pointer to struct zfcp_qdio
1345  * @data: pointer to struct fsf_qtcb_bottom_port
1346  * Returns: 0 on success, error otherwise
1347  */
zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio * qdio,struct fsf_qtcb_bottom_port * data)1348 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1349 				     struct fsf_qtcb_bottom_port *data)
1350 {
1351 	struct zfcp_fsf_req *req = NULL;
1352 	int retval = -EIO;
1353 
1354 	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1355 		return -EOPNOTSUPP;
1356 
1357 	spin_lock_irq(&qdio->req_q_lock);
1358 	if (zfcp_qdio_sbal_get(qdio))
1359 		goto out_unlock;
1360 
1361 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1362 				  SBAL_SFLAGS0_TYPE_READ, NULL);
1363 
1364 	if (IS_ERR(req)) {
1365 		retval = PTR_ERR(req);
1366 		goto out_unlock;
1367 	}
1368 
1369 	if (data)
1370 		req->data = data;
1371 
1372 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1373 
1374 	req->handler = zfcp_fsf_exchange_port_data_handler;
1375 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1376 	retval = zfcp_fsf_req_send(req);
1377 	spin_unlock_irq(&qdio->req_q_lock);
1378 
1379 	if (!retval)
1380 		wait_for_completion(&req->completion);
1381 
1382 	zfcp_fsf_req_free(req);
1383 
1384 	return retval;
1385 
1386 out_unlock:
1387 	spin_unlock_irq(&qdio->req_q_lock);
1388 	return retval;
1389 }
1390 
zfcp_fsf_open_port_handler(struct zfcp_fsf_req * req)1391 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1392 {
1393 	struct zfcp_port *port = req->data;
1394 	struct fsf_qtcb_header *header = &req->qtcb->header;
1395 	struct fc_els_flogi *plogi;
1396 
1397 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1398 		goto out;
1399 
1400 	switch (header->fsf_status) {
1401 	case FSF_PORT_ALREADY_OPEN:
1402 		break;
1403 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1404 		dev_warn(&req->adapter->ccw_device->dev,
1405 			 "Not enough FCP adapter resources to open "
1406 			 "remote port 0x%016Lx\n",
1407 			 (unsigned long long)port->wwpn);
1408 		zfcp_erp_set_port_status(port,
1409 					 ZFCP_STATUS_COMMON_ERP_FAILED);
1410 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1411 		break;
1412 	case FSF_ADAPTER_STATUS_AVAILABLE:
1413 		switch (header->fsf_status_qual.word[0]) {
1414 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1415 			/* no zfcp_fc_test_link() with failed open port */
1416 			/* fall through */
1417 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1418 		case FSF_SQ_NO_RETRY_POSSIBLE:
1419 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1420 			break;
1421 		}
1422 		break;
1423 	case FSF_GOOD:
1424 		port->handle = header->port_handle;
1425 		atomic_or(ZFCP_STATUS_COMMON_OPEN |
1426 				ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1427 		atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_BOXED,
1428 		                  &port->status);
1429 		/* check whether D_ID has changed during open */
1430 		/*
1431 		 * FIXME: This check is not airtight, as the FCP channel does
1432 		 * not monitor closures of target port connections caused on
1433 		 * the remote side. Thus, they might miss out on invalidating
1434 		 * locally cached WWPNs (and other N_Port parameters) of gone
1435 		 * target ports. So, our heroic attempt to make things safe
1436 		 * could be undermined by 'open port' response data tagged with
1437 		 * obsolete WWPNs. Another reason to monitor potential
1438 		 * connection closures ourself at least (by interpreting
1439 		 * incoming ELS' and unsolicited status). It just crosses my
1440 		 * mind that one should be able to cross-check by means of
1441 		 * another GID_PN straight after a port has been opened.
1442 		 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1443 		 */
1444 		plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1445 		if (req->qtcb->bottom.support.els1_length >=
1446 		    FSF_PLOGI_MIN_LEN)
1447 				zfcp_fc_plogi_evaluate(port, plogi);
1448 		break;
1449 	case FSF_UNKNOWN_OP_SUBTYPE:
1450 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1451 		break;
1452 	}
1453 
1454 out:
1455 	put_device(&port->dev);
1456 }
1457 
1458 /**
1459  * zfcp_fsf_open_port - create and send open port request
1460  * @erp_action: pointer to struct zfcp_erp_action
1461  * Returns: 0 on success, error otherwise
1462  */
zfcp_fsf_open_port(struct zfcp_erp_action * erp_action)1463 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1464 {
1465 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1466 	struct zfcp_port *port = erp_action->port;
1467 	struct zfcp_fsf_req *req;
1468 	int retval = -EIO;
1469 
1470 	spin_lock_irq(&qdio->req_q_lock);
1471 	if (zfcp_qdio_sbal_get(qdio))
1472 		goto out;
1473 
1474 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1475 				  SBAL_SFLAGS0_TYPE_READ,
1476 				  qdio->adapter->pool.erp_req);
1477 
1478 	if (IS_ERR(req)) {
1479 		retval = PTR_ERR(req);
1480 		goto out;
1481 	}
1482 
1483 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1484 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1485 
1486 	req->handler = zfcp_fsf_open_port_handler;
1487 	hton24(req->qtcb->bottom.support.d_id, port->d_id);
1488 	req->data = port;
1489 	req->erp_action = erp_action;
1490 	erp_action->fsf_req_id = req->req_id;
1491 	get_device(&port->dev);
1492 
1493 	zfcp_fsf_start_erp_timer(req);
1494 	retval = zfcp_fsf_req_send(req);
1495 	if (retval) {
1496 		zfcp_fsf_req_free(req);
1497 		erp_action->fsf_req_id = 0;
1498 		put_device(&port->dev);
1499 	}
1500 out:
1501 	spin_unlock_irq(&qdio->req_q_lock);
1502 	return retval;
1503 }
1504 
zfcp_fsf_close_port_handler(struct zfcp_fsf_req * req)1505 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1506 {
1507 	struct zfcp_port *port = req->data;
1508 
1509 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1510 		return;
1511 
1512 	switch (req->qtcb->header.fsf_status) {
1513 	case FSF_PORT_HANDLE_NOT_VALID:
1514 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1515 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1516 		break;
1517 	case FSF_ADAPTER_STATUS_AVAILABLE:
1518 		break;
1519 	case FSF_GOOD:
1520 		zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1521 		break;
1522 	}
1523 }
1524 
1525 /**
1526  * zfcp_fsf_close_port - create and send close port request
1527  * @erp_action: pointer to struct zfcp_erp_action
1528  * Returns: 0 on success, error otherwise
1529  */
zfcp_fsf_close_port(struct zfcp_erp_action * erp_action)1530 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1531 {
1532 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1533 	struct zfcp_fsf_req *req;
1534 	int retval = -EIO;
1535 
1536 	spin_lock_irq(&qdio->req_q_lock);
1537 	if (zfcp_qdio_sbal_get(qdio))
1538 		goto out;
1539 
1540 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1541 				  SBAL_SFLAGS0_TYPE_READ,
1542 				  qdio->adapter->pool.erp_req);
1543 
1544 	if (IS_ERR(req)) {
1545 		retval = PTR_ERR(req);
1546 		goto out;
1547 	}
1548 
1549 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1550 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1551 
1552 	req->handler = zfcp_fsf_close_port_handler;
1553 	req->data = erp_action->port;
1554 	req->erp_action = erp_action;
1555 	req->qtcb->header.port_handle = erp_action->port->handle;
1556 	erp_action->fsf_req_id = req->req_id;
1557 
1558 	zfcp_fsf_start_erp_timer(req);
1559 	retval = zfcp_fsf_req_send(req);
1560 	if (retval) {
1561 		zfcp_fsf_req_free(req);
1562 		erp_action->fsf_req_id = 0;
1563 	}
1564 out:
1565 	spin_unlock_irq(&qdio->req_q_lock);
1566 	return retval;
1567 }
1568 
zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req * req)1569 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1570 {
1571 	struct zfcp_fc_wka_port *wka_port = req->data;
1572 	struct fsf_qtcb_header *header = &req->qtcb->header;
1573 
1574 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1575 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1576 		goto out;
1577 	}
1578 
1579 	switch (header->fsf_status) {
1580 	case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1581 		dev_warn(&req->adapter->ccw_device->dev,
1582 			 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1583 		/* fall through */
1584 	case FSF_ADAPTER_STATUS_AVAILABLE:
1585 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1586 		wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1587 		break;
1588 	case FSF_GOOD:
1589 		wka_port->handle = header->port_handle;
1590 		/* fall through */
1591 	case FSF_PORT_ALREADY_OPEN:
1592 		wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1593 	}
1594 out:
1595 	wake_up(&wka_port->opened);
1596 }
1597 
1598 /**
1599  * zfcp_fsf_open_wka_port - create and send open wka-port request
1600  * @wka_port: pointer to struct zfcp_fc_wka_port
1601  * Returns: 0 on success, error otherwise
1602  */
zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port * wka_port)1603 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1604 {
1605 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1606 	struct zfcp_fsf_req *req;
1607 	unsigned long req_id = 0;
1608 	int retval = -EIO;
1609 
1610 	spin_lock_irq(&qdio->req_q_lock);
1611 	if (zfcp_qdio_sbal_get(qdio))
1612 		goto out;
1613 
1614 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1615 				  SBAL_SFLAGS0_TYPE_READ,
1616 				  qdio->adapter->pool.erp_req);
1617 
1618 	if (IS_ERR(req)) {
1619 		retval = PTR_ERR(req);
1620 		goto out;
1621 	}
1622 
1623 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1624 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1625 
1626 	req->handler = zfcp_fsf_open_wka_port_handler;
1627 	hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1628 	req->data = wka_port;
1629 
1630 	req_id = req->req_id;
1631 
1632 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1633 	retval = zfcp_fsf_req_send(req);
1634 	if (retval)
1635 		zfcp_fsf_req_free(req);
1636 out:
1637 	spin_unlock_irq(&qdio->req_q_lock);
1638 	if (!retval)
1639 		zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
1640 	return retval;
1641 }
1642 
zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req * req)1643 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1644 {
1645 	struct zfcp_fc_wka_port *wka_port = req->data;
1646 
1647 	if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1648 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1649 		zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1650 	}
1651 
1652 	wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1653 	wake_up(&wka_port->closed);
1654 }
1655 
1656 /**
1657  * zfcp_fsf_close_wka_port - create and send close wka port request
1658  * @wka_port: WKA port to open
1659  * Returns: 0 on success, error otherwise
1660  */
zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port * wka_port)1661 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1662 {
1663 	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1664 	struct zfcp_fsf_req *req;
1665 	unsigned long req_id = 0;
1666 	int retval = -EIO;
1667 
1668 	spin_lock_irq(&qdio->req_q_lock);
1669 	if (zfcp_qdio_sbal_get(qdio))
1670 		goto out;
1671 
1672 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1673 				  SBAL_SFLAGS0_TYPE_READ,
1674 				  qdio->adapter->pool.erp_req);
1675 
1676 	if (IS_ERR(req)) {
1677 		retval = PTR_ERR(req);
1678 		goto out;
1679 	}
1680 
1681 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1682 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1683 
1684 	req->handler = zfcp_fsf_close_wka_port_handler;
1685 	req->data = wka_port;
1686 	req->qtcb->header.port_handle = wka_port->handle;
1687 
1688 	req_id = req->req_id;
1689 
1690 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1691 	retval = zfcp_fsf_req_send(req);
1692 	if (retval)
1693 		zfcp_fsf_req_free(req);
1694 out:
1695 	spin_unlock_irq(&qdio->req_q_lock);
1696 	if (!retval)
1697 		zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
1698 	return retval;
1699 }
1700 
zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req * req)1701 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1702 {
1703 	struct zfcp_port *port = req->data;
1704 	struct fsf_qtcb_header *header = &req->qtcb->header;
1705 	struct scsi_device *sdev;
1706 
1707 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1708 		return;
1709 
1710 	switch (header->fsf_status) {
1711 	case FSF_PORT_HANDLE_NOT_VALID:
1712 		zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1713 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1714 		break;
1715 	case FSF_PORT_BOXED:
1716 		/* can't use generic zfcp_erp_modify_port_status because
1717 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1718 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1719 		shost_for_each_device(sdev, port->adapter->scsi_host)
1720 			if (sdev_to_zfcp(sdev)->port == port)
1721 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1722 						  &sdev_to_zfcp(sdev)->status);
1723 		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1724 		zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1725 				     "fscpph2");
1726 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1727 		break;
1728 	case FSF_ADAPTER_STATUS_AVAILABLE:
1729 		switch (header->fsf_status_qual.word[0]) {
1730 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1731 			/* fall through */
1732 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1733 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1734 			break;
1735 		}
1736 		break;
1737 	case FSF_GOOD:
1738 		/* can't use generic zfcp_erp_modify_port_status because
1739 		 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1740 		 */
1741 		atomic_andnot(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1742 		shost_for_each_device(sdev, port->adapter->scsi_host)
1743 			if (sdev_to_zfcp(sdev)->port == port)
1744 				atomic_andnot(ZFCP_STATUS_COMMON_OPEN,
1745 						  &sdev_to_zfcp(sdev)->status);
1746 		break;
1747 	}
1748 }
1749 
1750 /**
1751  * zfcp_fsf_close_physical_port - close physical port
1752  * @erp_action: pointer to struct zfcp_erp_action
1753  * Returns: 0 on success
1754  */
zfcp_fsf_close_physical_port(struct zfcp_erp_action * erp_action)1755 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1756 {
1757 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1758 	struct zfcp_fsf_req *req;
1759 	int retval = -EIO;
1760 
1761 	spin_lock_irq(&qdio->req_q_lock);
1762 	if (zfcp_qdio_sbal_get(qdio))
1763 		goto out;
1764 
1765 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1766 				  SBAL_SFLAGS0_TYPE_READ,
1767 				  qdio->adapter->pool.erp_req);
1768 
1769 	if (IS_ERR(req)) {
1770 		retval = PTR_ERR(req);
1771 		goto out;
1772 	}
1773 
1774 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1775 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1776 
1777 	req->data = erp_action->port;
1778 	req->qtcb->header.port_handle = erp_action->port->handle;
1779 	req->erp_action = erp_action;
1780 	req->handler = zfcp_fsf_close_physical_port_handler;
1781 	erp_action->fsf_req_id = req->req_id;
1782 
1783 	zfcp_fsf_start_erp_timer(req);
1784 	retval = zfcp_fsf_req_send(req);
1785 	if (retval) {
1786 		zfcp_fsf_req_free(req);
1787 		erp_action->fsf_req_id = 0;
1788 	}
1789 out:
1790 	spin_unlock_irq(&qdio->req_q_lock);
1791 	return retval;
1792 }
1793 
zfcp_fsf_open_lun_handler(struct zfcp_fsf_req * req)1794 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1795 {
1796 	struct zfcp_adapter *adapter = req->adapter;
1797 	struct scsi_device *sdev = req->data;
1798 	struct zfcp_scsi_dev *zfcp_sdev;
1799 	struct fsf_qtcb_header *header = &req->qtcb->header;
1800 	union fsf_status_qual *qual = &header->fsf_status_qual;
1801 
1802 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1803 		return;
1804 
1805 	zfcp_sdev = sdev_to_zfcp(sdev);
1806 
1807 	atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1808 			  ZFCP_STATUS_COMMON_ACCESS_BOXED,
1809 			  &zfcp_sdev->status);
1810 
1811 	switch (header->fsf_status) {
1812 
1813 	case FSF_PORT_HANDLE_NOT_VALID:
1814 		zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1815 		/* fall through */
1816 	case FSF_LUN_ALREADY_OPEN:
1817 		break;
1818 	case FSF_PORT_BOXED:
1819 		zfcp_erp_set_port_status(zfcp_sdev->port,
1820 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1821 		zfcp_erp_port_reopen(zfcp_sdev->port,
1822 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1823 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1824 		break;
1825 	case FSF_LUN_SHARING_VIOLATION:
1826 		if (qual->word[0])
1827 			dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
1828 				 "LUN 0x%Lx on port 0x%Lx is already in "
1829 				 "use by CSS%d, MIF Image ID %x\n",
1830 				 zfcp_scsi_dev_lun(sdev),
1831 				 (unsigned long long)zfcp_sdev->port->wwpn,
1832 				 qual->fsf_queue_designator.cssid,
1833 				 qual->fsf_queue_designator.hla);
1834 		zfcp_erp_set_lun_status(sdev,
1835 					ZFCP_STATUS_COMMON_ERP_FAILED |
1836 					ZFCP_STATUS_COMMON_ACCESS_DENIED);
1837 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1838 		break;
1839 	case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1840 		dev_warn(&adapter->ccw_device->dev,
1841 			 "No handle is available for LUN "
1842 			 "0x%016Lx on port 0x%016Lx\n",
1843 			 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1844 			 (unsigned long long)zfcp_sdev->port->wwpn);
1845 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1846 		/* fall through */
1847 	case FSF_INVALID_COMMAND_OPTION:
1848 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1849 		break;
1850 	case FSF_ADAPTER_STATUS_AVAILABLE:
1851 		switch (header->fsf_status_qual.word[0]) {
1852 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1853 			zfcp_fc_test_link(zfcp_sdev->port);
1854 			/* fall through */
1855 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1856 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1857 			break;
1858 		}
1859 		break;
1860 
1861 	case FSF_GOOD:
1862 		zfcp_sdev->lun_handle = header->lun_handle;
1863 		atomic_or(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1864 		break;
1865 	}
1866 }
1867 
1868 /**
1869  * zfcp_fsf_open_lun - open LUN
1870  * @erp_action: pointer to struct zfcp_erp_action
1871  * Returns: 0 on success, error otherwise
1872  */
zfcp_fsf_open_lun(struct zfcp_erp_action * erp_action)1873 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1874 {
1875 	struct zfcp_adapter *adapter = erp_action->adapter;
1876 	struct zfcp_qdio *qdio = adapter->qdio;
1877 	struct zfcp_fsf_req *req;
1878 	int retval = -EIO;
1879 
1880 	spin_lock_irq(&qdio->req_q_lock);
1881 	if (zfcp_qdio_sbal_get(qdio))
1882 		goto out;
1883 
1884 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1885 				  SBAL_SFLAGS0_TYPE_READ,
1886 				  adapter->pool.erp_req);
1887 
1888 	if (IS_ERR(req)) {
1889 		retval = PTR_ERR(req);
1890 		goto out;
1891 	}
1892 
1893 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1894 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1895 
1896 	req->qtcb->header.port_handle = erp_action->port->handle;
1897 	req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1898 	req->handler = zfcp_fsf_open_lun_handler;
1899 	req->data = erp_action->sdev;
1900 	req->erp_action = erp_action;
1901 	erp_action->fsf_req_id = req->req_id;
1902 
1903 	if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1904 		req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1905 
1906 	zfcp_fsf_start_erp_timer(req);
1907 	retval = zfcp_fsf_req_send(req);
1908 	if (retval) {
1909 		zfcp_fsf_req_free(req);
1910 		erp_action->fsf_req_id = 0;
1911 	}
1912 out:
1913 	spin_unlock_irq(&qdio->req_q_lock);
1914 	return retval;
1915 }
1916 
zfcp_fsf_close_lun_handler(struct zfcp_fsf_req * req)1917 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1918 {
1919 	struct scsi_device *sdev = req->data;
1920 	struct zfcp_scsi_dev *zfcp_sdev;
1921 
1922 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1923 		return;
1924 
1925 	zfcp_sdev = sdev_to_zfcp(sdev);
1926 
1927 	switch (req->qtcb->header.fsf_status) {
1928 	case FSF_PORT_HANDLE_NOT_VALID:
1929 		zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1930 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1931 		break;
1932 	case FSF_LUN_HANDLE_NOT_VALID:
1933 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1934 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1935 		break;
1936 	case FSF_PORT_BOXED:
1937 		zfcp_erp_set_port_status(zfcp_sdev->port,
1938 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1939 		zfcp_erp_port_reopen(zfcp_sdev->port,
1940 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1941 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1942 		break;
1943 	case FSF_ADAPTER_STATUS_AVAILABLE:
1944 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
1945 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1946 			zfcp_fc_test_link(zfcp_sdev->port);
1947 			/* fall through */
1948 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1949 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1950 			break;
1951 		}
1952 		break;
1953 	case FSF_GOOD:
1954 		atomic_andnot(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1955 		break;
1956 	}
1957 }
1958 
1959 /**
1960  * zfcp_fsf_close_LUN - close LUN
1961  * @erp_action: pointer to erp_action triggering the "close LUN"
1962  * Returns: 0 on success, error otherwise
1963  */
zfcp_fsf_close_lun(struct zfcp_erp_action * erp_action)1964 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1965 {
1966 	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1967 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1968 	struct zfcp_fsf_req *req;
1969 	int retval = -EIO;
1970 
1971 	spin_lock_irq(&qdio->req_q_lock);
1972 	if (zfcp_qdio_sbal_get(qdio))
1973 		goto out;
1974 
1975 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1976 				  SBAL_SFLAGS0_TYPE_READ,
1977 				  qdio->adapter->pool.erp_req);
1978 
1979 	if (IS_ERR(req)) {
1980 		retval = PTR_ERR(req);
1981 		goto out;
1982 	}
1983 
1984 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1985 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1986 
1987 	req->qtcb->header.port_handle = erp_action->port->handle;
1988 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1989 	req->handler = zfcp_fsf_close_lun_handler;
1990 	req->data = erp_action->sdev;
1991 	req->erp_action = erp_action;
1992 	erp_action->fsf_req_id = req->req_id;
1993 
1994 	zfcp_fsf_start_erp_timer(req);
1995 	retval = zfcp_fsf_req_send(req);
1996 	if (retval) {
1997 		zfcp_fsf_req_free(req);
1998 		erp_action->fsf_req_id = 0;
1999 	}
2000 out:
2001 	spin_unlock_irq(&qdio->req_q_lock);
2002 	return retval;
2003 }
2004 
zfcp_fsf_update_lat(struct fsf_latency_record * lat_rec,u32 lat)2005 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2006 {
2007 	lat_rec->sum += lat;
2008 	lat_rec->min = min(lat_rec->min, lat);
2009 	lat_rec->max = max(lat_rec->max, lat);
2010 }
2011 
zfcp_fsf_req_trace(struct zfcp_fsf_req * req,struct scsi_cmnd * scsi)2012 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2013 {
2014 	struct fsf_qual_latency_info *lat_in;
2015 	struct latency_cont *lat = NULL;
2016 	struct zfcp_scsi_dev *zfcp_sdev;
2017 	struct zfcp_blk_drv_data blktrc;
2018 	int ticks = req->adapter->timer_ticks;
2019 
2020 	lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2021 
2022 	blktrc.flags = 0;
2023 	blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2024 	if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2025 		blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2026 	blktrc.inb_usage = 0;
2027 	blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
2028 
2029 	if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
2030 	    !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2031 		zfcp_sdev = sdev_to_zfcp(scsi->device);
2032 		blktrc.flags |= ZFCP_BLK_LAT_VALID;
2033 		blktrc.channel_lat = lat_in->channel_lat * ticks;
2034 		blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2035 
2036 		switch (req->qtcb->bottom.io.data_direction) {
2037 		case FSF_DATADIR_DIF_READ_STRIP:
2038 		case FSF_DATADIR_DIF_READ_CONVERT:
2039 		case FSF_DATADIR_READ:
2040 			lat = &zfcp_sdev->latencies.read;
2041 			break;
2042 		case FSF_DATADIR_DIF_WRITE_INSERT:
2043 		case FSF_DATADIR_DIF_WRITE_CONVERT:
2044 		case FSF_DATADIR_WRITE:
2045 			lat = &zfcp_sdev->latencies.write;
2046 			break;
2047 		case FSF_DATADIR_CMND:
2048 			lat = &zfcp_sdev->latencies.cmd;
2049 			break;
2050 		}
2051 
2052 		if (lat) {
2053 			spin_lock(&zfcp_sdev->latencies.lock);
2054 			zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2055 			zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2056 			lat->counter++;
2057 			spin_unlock(&zfcp_sdev->latencies.lock);
2058 		}
2059 	}
2060 
2061 	blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2062 			    sizeof(blktrc));
2063 }
2064 
2065 /**
2066  * zfcp_fsf_fcp_handler_common() - FCP response handler common to I/O and TMF.
2067  * @req: Pointer to FSF request.
2068  * @sdev: Pointer to SCSI device as request context.
2069  */
zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req * req,struct scsi_device * sdev)2070 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req,
2071 					struct scsi_device *sdev)
2072 {
2073 	struct zfcp_scsi_dev *zfcp_sdev;
2074 	struct fsf_qtcb_header *header = &req->qtcb->header;
2075 
2076 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2077 		return;
2078 
2079 	zfcp_sdev = sdev_to_zfcp(sdev);
2080 
2081 	switch (header->fsf_status) {
2082 	case FSF_HANDLE_MISMATCH:
2083 	case FSF_PORT_HANDLE_NOT_VALID:
2084 		zfcp_erp_adapter_reopen(req->adapter, 0, "fssfch1");
2085 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2086 		break;
2087 	case FSF_FCPLUN_NOT_VALID:
2088 	case FSF_LUN_HANDLE_NOT_VALID:
2089 		zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2090 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2091 		break;
2092 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2093 		zfcp_fsf_class_not_supp(req);
2094 		break;
2095 	case FSF_DIRECTION_INDICATOR_NOT_VALID:
2096 		dev_err(&req->adapter->ccw_device->dev,
2097 			"Incorrect direction %d, LUN 0x%016Lx on port "
2098 			"0x%016Lx closed\n",
2099 			req->qtcb->bottom.io.data_direction,
2100 			(unsigned long long)zfcp_scsi_dev_lun(sdev),
2101 			(unsigned long long)zfcp_sdev->port->wwpn);
2102 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch3");
2103 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2104 		break;
2105 	case FSF_CMND_LENGTH_NOT_VALID:
2106 		dev_err(&req->adapter->ccw_device->dev,
2107 			"Incorrect FCP_CMND length %d, FCP device closed\n",
2108 			req->qtcb->bottom.io.fcp_cmnd_length);
2109 		zfcp_erp_adapter_shutdown(req->adapter, 0, "fssfch4");
2110 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2111 		break;
2112 	case FSF_PORT_BOXED:
2113 		zfcp_erp_set_port_status(zfcp_sdev->port,
2114 					 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2115 		zfcp_erp_port_reopen(zfcp_sdev->port,
2116 				     ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2117 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2118 		break;
2119 	case FSF_LUN_BOXED:
2120 		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2121 		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2122 				    "fssfch6");
2123 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2124 		break;
2125 	case FSF_ADAPTER_STATUS_AVAILABLE:
2126 		if (header->fsf_status_qual.word[0] ==
2127 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2128 			zfcp_fc_test_link(zfcp_sdev->port);
2129 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2130 		break;
2131 	}
2132 }
2133 
zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req * req)2134 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2135 {
2136 	struct scsi_cmnd *scpnt;
2137 	struct fcp_resp_with_ext *fcp_rsp;
2138 	unsigned long flags;
2139 
2140 	read_lock_irqsave(&req->adapter->abort_lock, flags);
2141 
2142 	scpnt = req->data;
2143 	if (unlikely(!scpnt)) {
2144 		read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2145 		return;
2146 	}
2147 
2148 	zfcp_fsf_fcp_handler_common(req, scpnt->device);
2149 
2150 	if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2151 		set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2152 		goto skip_fsfstatus;
2153 	}
2154 
2155 	switch (req->qtcb->header.fsf_status) {
2156 	case FSF_INCONSISTENT_PROT_DATA:
2157 	case FSF_INVALID_PROT_PARM:
2158 		set_host_byte(scpnt, DID_ERROR);
2159 		goto skip_fsfstatus;
2160 	case FSF_BLOCK_GUARD_CHECK_FAILURE:
2161 		zfcp_scsi_dif_sense_error(scpnt, 0x1);
2162 		goto skip_fsfstatus;
2163 	case FSF_APP_TAG_CHECK_FAILURE:
2164 		zfcp_scsi_dif_sense_error(scpnt, 0x2);
2165 		goto skip_fsfstatus;
2166 	case FSF_REF_TAG_CHECK_FAILURE:
2167 		zfcp_scsi_dif_sense_error(scpnt, 0x3);
2168 		goto skip_fsfstatus;
2169 	}
2170 	BUILD_BUG_ON(sizeof(struct fcp_resp_with_ext) > FSF_FCP_RSP_SIZE);
2171 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2172 	zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2173 
2174 skip_fsfstatus:
2175 	zfcp_fsf_req_trace(req, scpnt);
2176 	zfcp_dbf_scsi_result(scpnt, req);
2177 
2178 	scpnt->host_scribble = NULL;
2179 	(scpnt->scsi_done) (scpnt);
2180 	/*
2181 	 * We must hold this lock until scsi_done has been called.
2182 	 * Otherwise we may call scsi_done after abort regarding this
2183 	 * command has completed.
2184 	 * Note: scsi_done must not block!
2185 	 */
2186 	read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2187 }
2188 
zfcp_fsf_set_data_dir(struct scsi_cmnd * scsi_cmnd,u32 * data_dir)2189 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2190 {
2191 	switch (scsi_get_prot_op(scsi_cmnd)) {
2192 	case SCSI_PROT_NORMAL:
2193 		switch (scsi_cmnd->sc_data_direction) {
2194 		case DMA_NONE:
2195 			*data_dir = FSF_DATADIR_CMND;
2196 			break;
2197 		case DMA_FROM_DEVICE:
2198 			*data_dir = FSF_DATADIR_READ;
2199 			break;
2200 		case DMA_TO_DEVICE:
2201 			*data_dir = FSF_DATADIR_WRITE;
2202 			break;
2203 		case DMA_BIDIRECTIONAL:
2204 			return -EINVAL;
2205 		}
2206 		break;
2207 
2208 	case SCSI_PROT_READ_STRIP:
2209 		*data_dir = FSF_DATADIR_DIF_READ_STRIP;
2210 		break;
2211 	case SCSI_PROT_WRITE_INSERT:
2212 		*data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2213 		break;
2214 	case SCSI_PROT_READ_PASS:
2215 		*data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2216 		break;
2217 	case SCSI_PROT_WRITE_PASS:
2218 		*data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2219 		break;
2220 	default:
2221 		return -EINVAL;
2222 	}
2223 
2224 	return 0;
2225 }
2226 
2227 /**
2228  * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2229  * @scsi_cmnd: scsi command to be sent
2230  */
zfcp_fsf_fcp_cmnd(struct scsi_cmnd * scsi_cmnd)2231 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2232 {
2233 	struct zfcp_fsf_req *req;
2234 	struct fcp_cmnd *fcp_cmnd;
2235 	u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2236 	int retval = -EIO;
2237 	struct scsi_device *sdev = scsi_cmnd->device;
2238 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2239 	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2240 	struct zfcp_qdio *qdio = adapter->qdio;
2241 	struct fsf_qtcb_bottom_io *io;
2242 	unsigned long flags;
2243 
2244 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2245 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2246 		return -EBUSY;
2247 
2248 	spin_lock_irqsave(&qdio->req_q_lock, flags);
2249 	if (atomic_read(&qdio->req_q_free) <= 0) {
2250 		atomic_inc(&qdio->req_q_full);
2251 		goto out;
2252 	}
2253 
2254 	if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2255 		sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2256 
2257 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2258 				  sbtype, adapter->pool.scsi_req);
2259 
2260 	if (IS_ERR(req)) {
2261 		retval = PTR_ERR(req);
2262 		goto out;
2263 	}
2264 
2265 	scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2266 
2267 	io = &req->qtcb->bottom.io;
2268 	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2269 	req->data = scsi_cmnd;
2270 	req->handler = zfcp_fsf_fcp_cmnd_handler;
2271 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2272 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2273 	io->service_class = FSF_CLASS_3;
2274 	io->fcp_cmnd_length = FCP_CMND_LEN;
2275 
2276 	if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2277 		io->data_block_length = scsi_cmnd->device->sector_size;
2278 		io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2279 	}
2280 
2281 	if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction))
2282 		goto failed_scsi_cmnd;
2283 
2284 	BUILD_BUG_ON(sizeof(struct fcp_cmnd) > FSF_FCP_CMND_SIZE);
2285 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2286 	zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2287 
2288 	if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
2289 	    scsi_prot_sg_count(scsi_cmnd)) {
2290 		zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2291 				       scsi_prot_sg_count(scsi_cmnd));
2292 		retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2293 						 scsi_prot_sglist(scsi_cmnd));
2294 		if (retval)
2295 			goto failed_scsi_cmnd;
2296 		io->prot_data_length = zfcp_qdio_real_bytes(
2297 						scsi_prot_sglist(scsi_cmnd));
2298 	}
2299 
2300 	retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2301 					 scsi_sglist(scsi_cmnd));
2302 	if (unlikely(retval))
2303 		goto failed_scsi_cmnd;
2304 
2305 	zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2306 	if (zfcp_adapter_multi_buffer_active(adapter))
2307 		zfcp_qdio_set_scount(qdio, &req->qdio_req);
2308 
2309 	retval = zfcp_fsf_req_send(req);
2310 	if (unlikely(retval))
2311 		goto failed_scsi_cmnd;
2312 
2313 	goto out;
2314 
2315 failed_scsi_cmnd:
2316 	zfcp_fsf_req_free(req);
2317 	scsi_cmnd->host_scribble = NULL;
2318 out:
2319 	spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2320 	return retval;
2321 }
2322 
zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req * req)2323 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2324 {
2325 	struct scsi_device *sdev = req->data;
2326 	struct fcp_resp_with_ext *fcp_rsp;
2327 	struct fcp_resp_rsp_info *rsp_info;
2328 
2329 	zfcp_fsf_fcp_handler_common(req, sdev);
2330 
2331 	fcp_rsp = &req->qtcb->bottom.io.fcp_rsp.iu;
2332 	rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2333 
2334 	if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2335 	     (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2336 		req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2337 }
2338 
2339 /**
2340  * zfcp_fsf_fcp_task_mgmt() - Send SCSI task management command (TMF).
2341  * @sdev: Pointer to SCSI device to send the task management command to.
2342  * @tm_flags: Unsigned byte for task management flags.
2343  *
2344  * Return: On success pointer to struct zfcp_fsf_req, %NULL otherwise.
2345  */
zfcp_fsf_fcp_task_mgmt(struct scsi_device * sdev,u8 tm_flags)2346 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_device *sdev,
2347 					    u8 tm_flags)
2348 {
2349 	struct zfcp_fsf_req *req = NULL;
2350 	struct fcp_cmnd *fcp_cmnd;
2351 	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2352 	struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2353 
2354 	if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2355 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
2356 		return NULL;
2357 
2358 	spin_lock_irq(&qdio->req_q_lock);
2359 	if (zfcp_qdio_sbal_get(qdio))
2360 		goto out;
2361 
2362 	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2363 				  SBAL_SFLAGS0_TYPE_WRITE,
2364 				  qdio->adapter->pool.scsi_req);
2365 
2366 	if (IS_ERR(req)) {
2367 		req = NULL;
2368 		goto out;
2369 	}
2370 
2371 	req->data = sdev;
2372 
2373 	req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2374 	req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2375 	req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2376 	req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2377 	req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2378 	req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2379 
2380 	zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2381 
2382 	fcp_cmnd = &req->qtcb->bottom.io.fcp_cmnd.iu;
2383 	zfcp_fc_fcp_tm(fcp_cmnd, sdev, tm_flags);
2384 
2385 	zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2386 	if (!zfcp_fsf_req_send(req))
2387 		goto out;
2388 
2389 	zfcp_fsf_req_free(req);
2390 	req = NULL;
2391 out:
2392 	spin_unlock_irq(&qdio->req_q_lock);
2393 	return req;
2394 }
2395 
2396 /**
2397  * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2398  * @adapter: pointer to struct zfcp_adapter
2399  * @sbal_idx: response queue index of SBAL to be processed
2400  */
zfcp_fsf_reqid_check(struct zfcp_qdio * qdio,int sbal_idx)2401 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2402 {
2403 	struct zfcp_adapter *adapter = qdio->adapter;
2404 	struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2405 	struct qdio_buffer_element *sbale;
2406 	struct zfcp_fsf_req *fsf_req;
2407 	unsigned long req_id;
2408 	int idx;
2409 
2410 	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2411 
2412 		sbale = &sbal->element[idx];
2413 		req_id = (unsigned long) sbale->addr;
2414 		fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2415 
2416 		if (!fsf_req) {
2417 			/*
2418 			 * Unknown request means that we have potentially memory
2419 			 * corruption and must stop the machine immediately.
2420 			 */
2421 			zfcp_qdio_siosl(adapter);
2422 			panic("error: unknown req_id (%lx) on adapter %s.\n",
2423 			      req_id, dev_name(&adapter->ccw_device->dev));
2424 		}
2425 
2426 		zfcp_fsf_req_complete(fsf_req);
2427 
2428 		if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2429 			break;
2430 	}
2431 }
2432