Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / scsi / zfcp_fsf.c
1 /*
2 * zfcp device driver
3 *
4 * Implementation of FSF commands.
5 *
6 * Copyright IBM Corporation 2002, 2010
7 */
8
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12 #include <linux/blktrace_api.h>
13 #include <linux/slab.h>
14 #include <scsi/fc/fc_els.h>
15 #include "zfcp_ext.h"
16 #include "zfcp_fc.h"
17 #include "zfcp_dbf.h"
18 #include "zfcp_qdio.h"
19 #include "zfcp_reqlist.h"
20
21 static void zfcp_fsf_request_timeout_handler(unsigned long data)
22 {
23 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
24 zfcp_qdio_siosl(adapter);
25 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
26 "fsrth_1");
27 }
28
29 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
30 unsigned long timeout)
31 {
32 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
33 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
34 fsf_req->timer.expires = jiffies + timeout;
35 add_timer(&fsf_req->timer);
36 }
37
38 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
39 {
40 BUG_ON(!fsf_req->erp_action);
41 fsf_req->timer.function = zfcp_erp_timeout_handler;
42 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
43 fsf_req->timer.expires = jiffies + 30 * HZ;
44 add_timer(&fsf_req->timer);
45 }
46
47 /* association between FSF command and FSF QTCB type */
48 static u32 fsf_qtcb_type[] = {
49 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
50 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
52 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
54 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
55 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
57 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
58 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
59 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
60 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
61 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
62 };
63
64 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
65 {
66 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
67 "operational because of an unsupported FC class\n");
68 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1");
69 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
70 }
71
72 /**
73 * zfcp_fsf_req_free - free memory used by fsf request
74 * @fsf_req: pointer to struct zfcp_fsf_req
75 */
76 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
77 {
78 if (likely(req->pool)) {
79 if (likely(req->qtcb))
80 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
81 mempool_free(req, req->pool);
82 return;
83 }
84
85 if (likely(req->qtcb))
86 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
87 kfree(req);
88 }
89
90 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
91 {
92 unsigned long flags;
93 struct fsf_status_read_buffer *sr_buf = req->data;
94 struct zfcp_adapter *adapter = req->adapter;
95 struct zfcp_port *port;
96 int d_id = ntoh24(sr_buf->d_id);
97
98 read_lock_irqsave(&adapter->port_list_lock, flags);
99 list_for_each_entry(port, &adapter->port_list, list)
100 if (port->d_id == d_id) {
101 zfcp_erp_port_reopen(port, 0, "fssrpc1");
102 break;
103 }
104 read_unlock_irqrestore(&adapter->port_list_lock, flags);
105 }
106
107 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req,
108 struct fsf_link_down_info *link_down)
109 {
110 struct zfcp_adapter *adapter = req->adapter;
111
112 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
113 return;
114
115 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
116
117 zfcp_scsi_schedule_rports_block(adapter);
118
119 if (!link_down)
120 goto out;
121
122 switch (link_down->error_code) {
123 case FSF_PSQ_LINK_NO_LIGHT:
124 dev_warn(&req->adapter->ccw_device->dev,
125 "There is no light signal from the local "
126 "fibre channel cable\n");
127 break;
128 case FSF_PSQ_LINK_WRAP_PLUG:
129 dev_warn(&req->adapter->ccw_device->dev,
130 "There is a wrap plug instead of a fibre "
131 "channel cable\n");
132 break;
133 case FSF_PSQ_LINK_NO_FCP:
134 dev_warn(&req->adapter->ccw_device->dev,
135 "The adjacent fibre channel node does not "
136 "support FCP\n");
137 break;
138 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
139 dev_warn(&req->adapter->ccw_device->dev,
140 "The FCP device is suspended because of a "
141 "firmware update\n");
142 break;
143 case FSF_PSQ_LINK_INVALID_WWPN:
144 dev_warn(&req->adapter->ccw_device->dev,
145 "The FCP device detected a WWPN that is "
146 "duplicate or not valid\n");
147 break;
148 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
149 dev_warn(&req->adapter->ccw_device->dev,
150 "The fibre channel fabric does not support NPIV\n");
151 break;
152 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
153 dev_warn(&req->adapter->ccw_device->dev,
154 "The FCP adapter cannot support more NPIV ports\n");
155 break;
156 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
157 dev_warn(&req->adapter->ccw_device->dev,
158 "The adjacent switch cannot support "
159 "more NPIV ports\n");
160 break;
161 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
162 dev_warn(&req->adapter->ccw_device->dev,
163 "The FCP adapter could not log in to the "
164 "fibre channel fabric\n");
165 break;
166 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
167 dev_warn(&req->adapter->ccw_device->dev,
168 "The WWPN assignment file on the FCP adapter "
169 "has been damaged\n");
170 break;
171 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
172 dev_warn(&req->adapter->ccw_device->dev,
173 "The mode table on the FCP adapter "
174 "has been damaged\n");
175 break;
176 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
177 dev_warn(&req->adapter->ccw_device->dev,
178 "All NPIV ports on the FCP adapter have "
179 "been assigned\n");
180 break;
181 default:
182 dev_warn(&req->adapter->ccw_device->dev,
183 "The link between the FCP adapter and "
184 "the FC fabric is down\n");
185 }
186 out:
187 zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
188 }
189
190 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
191 {
192 struct fsf_status_read_buffer *sr_buf = req->data;
193 struct fsf_link_down_info *ldi =
194 (struct fsf_link_down_info *) &sr_buf->payload;
195
196 switch (sr_buf->status_subtype) {
197 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
198 zfcp_fsf_link_down_info_eval(req, ldi);
199 break;
200 case FSF_STATUS_READ_SUB_FDISC_FAILED:
201 zfcp_fsf_link_down_info_eval(req, ldi);
202 break;
203 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
204 zfcp_fsf_link_down_info_eval(req, NULL);
205 };
206 }
207
208 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
209 {
210 struct zfcp_adapter *adapter = req->adapter;
211 struct fsf_status_read_buffer *sr_buf = req->data;
212
213 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
214 zfcp_dbf_hba_fsf_uss("fssrh_1", req);
215 mempool_free(sr_buf, adapter->pool.status_read_data);
216 zfcp_fsf_req_free(req);
217 return;
218 }
219
220 zfcp_dbf_hba_fsf_uss("fssrh_2", req);
221
222 switch (sr_buf->status_type) {
223 case FSF_STATUS_READ_PORT_CLOSED:
224 zfcp_fsf_status_read_port_closed(req);
225 break;
226 case FSF_STATUS_READ_INCOMING_ELS:
227 zfcp_fc_incoming_els(req);
228 break;
229 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
230 break;
231 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
232 dev_warn(&adapter->ccw_device->dev,
233 "The error threshold for checksum statistics "
234 "has been exceeded\n");
235 zfcp_dbf_hba_bit_err("fssrh_3", req);
236 break;
237 case FSF_STATUS_READ_LINK_DOWN:
238 zfcp_fsf_status_read_link_down(req);
239 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKDOWN, 0);
240 break;
241 case FSF_STATUS_READ_LINK_UP:
242 dev_info(&adapter->ccw_device->dev,
243 "The local link has been restored\n");
244 /* All ports should be marked as ready to run again */
245 zfcp_erp_set_adapter_status(adapter,
246 ZFCP_STATUS_COMMON_RUNNING);
247 zfcp_erp_adapter_reopen(adapter,
248 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
249 ZFCP_STATUS_COMMON_ERP_FAILED,
250 "fssrh_2");
251 zfcp_fc_enqueue_event(adapter, FCH_EVT_LINKUP, 0);
252
253 break;
254 case FSF_STATUS_READ_NOTIFICATION_LOST:
255 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
256 zfcp_cfdc_adapter_access_changed(adapter);
257 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
258 queue_work(adapter->work_queue, &adapter->scan_work);
259 break;
260 case FSF_STATUS_READ_CFDC_UPDATED:
261 zfcp_cfdc_adapter_access_changed(adapter);
262 break;
263 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
264 adapter->adapter_features = sr_buf->payload.word[0];
265 break;
266 }
267
268 mempool_free(sr_buf, adapter->pool.status_read_data);
269 zfcp_fsf_req_free(req);
270
271 atomic_inc(&adapter->stat_miss);
272 queue_work(adapter->work_queue, &adapter->stat_work);
273 }
274
275 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
276 {
277 switch (req->qtcb->header.fsf_status_qual.word[0]) {
278 case FSF_SQ_FCP_RSP_AVAILABLE:
279 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
280 case FSF_SQ_NO_RETRY_POSSIBLE:
281 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
282 return;
283 case FSF_SQ_COMMAND_ABORTED:
284 break;
285 case FSF_SQ_NO_RECOM:
286 dev_err(&req->adapter->ccw_device->dev,
287 "The FCP adapter reported a problem "
288 "that cannot be recovered\n");
289 zfcp_qdio_siosl(req->adapter);
290 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1");
291 break;
292 }
293 /* all non-return stats set FSFREQ_ERROR*/
294 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
295 }
296
297 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
298 {
299 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
300 return;
301
302 switch (req->qtcb->header.fsf_status) {
303 case FSF_UNKNOWN_COMMAND:
304 dev_err(&req->adapter->ccw_device->dev,
305 "The FCP adapter does not recognize the command 0x%x\n",
306 req->qtcb->header.fsf_command);
307 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1");
308 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
309 break;
310 case FSF_ADAPTER_STATUS_AVAILABLE:
311 zfcp_fsf_fsfstatus_qual_eval(req);
312 break;
313 }
314 }
315
316 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
317 {
318 struct zfcp_adapter *adapter = req->adapter;
319 struct fsf_qtcb *qtcb = req->qtcb;
320 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
321
322 zfcp_dbf_hba_fsf_response(req);
323
324 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
325 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
326 return;
327 }
328
329 switch (qtcb->prefix.prot_status) {
330 case FSF_PROT_GOOD:
331 case FSF_PROT_FSF_STATUS_PRESENTED:
332 return;
333 case FSF_PROT_QTCB_VERSION_ERROR:
334 dev_err(&adapter->ccw_device->dev,
335 "QTCB version 0x%x not supported by FCP adapter "
336 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
337 psq->word[0], psq->word[1]);
338 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1");
339 break;
340 case FSF_PROT_ERROR_STATE:
341 case FSF_PROT_SEQ_NUMB_ERROR:
342 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2");
343 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
344 break;
345 case FSF_PROT_UNSUPP_QTCB_TYPE:
346 dev_err(&adapter->ccw_device->dev,
347 "The QTCB type is not supported by the FCP adapter\n");
348 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3");
349 break;
350 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
351 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
352 &adapter->status);
353 break;
354 case FSF_PROT_DUPLICATE_REQUEST_ID:
355 dev_err(&adapter->ccw_device->dev,
356 "0x%Lx is an ambiguous request identifier\n",
357 (unsigned long long)qtcb->bottom.support.req_handle);
358 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4");
359 break;
360 case FSF_PROT_LINK_DOWN:
361 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info);
362 /* go through reopen to flush pending requests */
363 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6");
364 break;
365 case FSF_PROT_REEST_QUEUE:
366 /* All ports should be marked as ready to run again */
367 zfcp_erp_set_adapter_status(adapter,
368 ZFCP_STATUS_COMMON_RUNNING);
369 zfcp_erp_adapter_reopen(adapter,
370 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
371 ZFCP_STATUS_COMMON_ERP_FAILED,
372 "fspse_8");
373 break;
374 default:
375 dev_err(&adapter->ccw_device->dev,
376 "0x%x is not a valid transfer protocol status\n",
377 qtcb->prefix.prot_status);
378 zfcp_qdio_siosl(adapter);
379 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9");
380 }
381 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
382 }
383
384 /**
385 * zfcp_fsf_req_complete - process completion of a FSF request
386 * @fsf_req: The FSF request that has been completed.
387 *
388 * When a request has been completed either from the FCP adapter,
389 * or it has been dismissed due to a queue shutdown, this function
390 * is called to process the completion status and trigger further
391 * events related to the FSF request.
392 */
393 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
394 {
395 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
396 zfcp_fsf_status_read_handler(req);
397 return;
398 }
399
400 del_timer(&req->timer);
401 zfcp_fsf_protstatus_eval(req);
402 zfcp_fsf_fsfstatus_eval(req);
403 req->handler(req);
404
405 if (req->erp_action)
406 zfcp_erp_notify(req->erp_action, 0);
407
408 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
409 zfcp_fsf_req_free(req);
410 else
411 complete(&req->completion);
412 }
413
414 /**
415 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
416 * @adapter: pointer to struct zfcp_adapter
417 *
418 * Never ever call this without shutting down the adapter first.
419 * Otherwise the adapter would continue using and corrupting s390 storage.
420 * Included BUG_ON() call to ensure this is done.
421 * ERP is supposed to be the only user of this function.
422 */
423 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
424 {
425 struct zfcp_fsf_req *req, *tmp;
426 LIST_HEAD(remove_queue);
427
428 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
429 zfcp_reqlist_move(adapter->req_list, &remove_queue);
430
431 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
432 list_del(&req->list);
433 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
434 zfcp_fsf_req_complete(req);
435 }
436 }
437
438 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
439 {
440 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
441 struct zfcp_adapter *adapter = req->adapter;
442 struct Scsi_Host *shost = adapter->scsi_host;
443 struct fc_els_flogi *nsp, *plogi;
444
445 /* adjust pointers for missing command code */
446 nsp = (struct fc_els_flogi *) ((u8 *)&bottom->nport_serv_param
447 - sizeof(u32));
448 plogi = (struct fc_els_flogi *) ((u8 *)&bottom->plogi_payload
449 - sizeof(u32));
450
451 if (req->data)
452 memcpy(req->data, bottom, sizeof(*bottom));
453
454 fc_host_port_name(shost) = nsp->fl_wwpn;
455 fc_host_node_name(shost) = nsp->fl_wwnn;
456 fc_host_port_id(shost) = ntoh24(bottom->s_id);
457 fc_host_speed(shost) = bottom->fc_link_speed;
458 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
459
460 adapter->hydra_version = bottom->adapter_type;
461 adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK;
462 adapter->stat_read_buf_num = max(bottom->status_read_buf_num,
463 (u16)FSF_STATUS_READS_RECOM);
464
465 if (fc_host_permanent_port_name(shost) == -1)
466 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
467
468 switch (bottom->fc_topology) {
469 case FSF_TOPO_P2P:
470 adapter->peer_d_id = ntoh24(bottom->peer_d_id);
471 adapter->peer_wwpn = plogi->fl_wwpn;
472 adapter->peer_wwnn = plogi->fl_wwnn;
473 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
474 break;
475 case FSF_TOPO_FABRIC:
476 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
477 break;
478 case FSF_TOPO_AL:
479 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
480 /* fall through */
481 default:
482 dev_err(&adapter->ccw_device->dev,
483 "Unknown or unsupported arbitrated loop "
484 "fibre channel topology detected\n");
485 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1");
486 return -EIO;
487 }
488
489 zfcp_scsi_set_prot(adapter);
490
491 return 0;
492 }
493
494 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
495 {
496 struct zfcp_adapter *adapter = req->adapter;
497 struct fsf_qtcb *qtcb = req->qtcb;
498 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
499 struct Scsi_Host *shost = adapter->scsi_host;
500
501 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
502 return;
503
504 adapter->fsf_lic_version = bottom->lic_version;
505 adapter->adapter_features = bottom->adapter_features;
506 adapter->connection_features = bottom->connection_features;
507 adapter->peer_wwpn = 0;
508 adapter->peer_wwnn = 0;
509 adapter->peer_d_id = 0;
510
511 switch (qtcb->header.fsf_status) {
512 case FSF_GOOD:
513 if (zfcp_fsf_exchange_config_evaluate(req))
514 return;
515
516 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
517 dev_err(&adapter->ccw_device->dev,
518 "FCP adapter maximum QTCB size (%d bytes) "
519 "is too small\n",
520 bottom->max_qtcb_size);
521 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1");
522 return;
523 }
524 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
525 &adapter->status);
526 break;
527 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
528 fc_host_node_name(shost) = 0;
529 fc_host_port_name(shost) = 0;
530 fc_host_port_id(shost) = 0;
531 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
532 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
533 adapter->hydra_version = 0;
534
535 zfcp_fsf_link_down_info_eval(req,
536 &qtcb->header.fsf_status_qual.link_down_info);
537 break;
538 default:
539 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3");
540 return;
541 }
542
543 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
544 adapter->hardware_version = bottom->hardware_version;
545 memcpy(fc_host_serial_number(shost), bottom->serial_number,
546 min(FC_SERIAL_NUMBER_SIZE, 17));
547 EBCASC(fc_host_serial_number(shost),
548 min(FC_SERIAL_NUMBER_SIZE, 17));
549 }
550
551 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
552 dev_err(&adapter->ccw_device->dev,
553 "The FCP adapter only supports newer "
554 "control block versions\n");
555 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4");
556 return;
557 }
558 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
559 dev_err(&adapter->ccw_device->dev,
560 "The FCP adapter only supports older "
561 "control block versions\n");
562 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5");
563 }
564 }
565
566 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
567 {
568 struct zfcp_adapter *adapter = req->adapter;
569 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
570 struct Scsi_Host *shost = adapter->scsi_host;
571
572 if (req->data)
573 memcpy(req->data, bottom, sizeof(*bottom));
574
575 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
576 fc_host_permanent_port_name(shost) = bottom->wwpn;
577 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
578 } else
579 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
580 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
581 fc_host_supported_speeds(shost) = bottom->supported_speed;
582 memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
583 FC_FC4_LIST_SIZE);
584 memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
585 FC_FC4_LIST_SIZE);
586 }
587
588 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
589 {
590 struct fsf_qtcb *qtcb = req->qtcb;
591
592 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
593 return;
594
595 switch (qtcb->header.fsf_status) {
596 case FSF_GOOD:
597 zfcp_fsf_exchange_port_evaluate(req);
598 break;
599 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
600 zfcp_fsf_exchange_port_evaluate(req);
601 zfcp_fsf_link_down_info_eval(req,
602 &qtcb->header.fsf_status_qual.link_down_info);
603 break;
604 }
605 }
606
607 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
608 {
609 struct zfcp_fsf_req *req;
610
611 if (likely(pool))
612 req = mempool_alloc(pool, GFP_ATOMIC);
613 else
614 req = kmalloc(sizeof(*req), GFP_ATOMIC);
615
616 if (unlikely(!req))
617 return NULL;
618
619 memset(req, 0, sizeof(*req));
620 req->pool = pool;
621 return req;
622 }
623
624 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
625 {
626 struct fsf_qtcb *qtcb;
627
628 if (likely(pool))
629 qtcb = mempool_alloc(pool, GFP_ATOMIC);
630 else
631 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
632
633 if (unlikely(!qtcb))
634 return NULL;
635
636 memset(qtcb, 0, sizeof(*qtcb));
637 return qtcb;
638 }
639
640 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
641 u32 fsf_cmd, u32 sbtype,
642 mempool_t *pool)
643 {
644 struct zfcp_adapter *adapter = qdio->adapter;
645 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
646
647 if (unlikely(!req))
648 return ERR_PTR(-ENOMEM);
649
650 if (adapter->req_no == 0)
651 adapter->req_no++;
652
653 INIT_LIST_HEAD(&req->list);
654 init_timer(&req->timer);
655 init_completion(&req->completion);
656
657 req->adapter = adapter;
658 req->fsf_command = fsf_cmd;
659 req->req_id = adapter->req_no;
660
661 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
662 if (likely(pool))
663 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
664 else
665 req->qtcb = zfcp_qtcb_alloc(NULL);
666
667 if (unlikely(!req->qtcb)) {
668 zfcp_fsf_req_free(req);
669 return ERR_PTR(-ENOMEM);
670 }
671
672 req->seq_no = adapter->fsf_req_seq_no;
673 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
674 req->qtcb->prefix.req_id = req->req_id;
675 req->qtcb->prefix.ulp_info = 26;
676 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
677 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
678 req->qtcb->header.req_handle = req->req_id;
679 req->qtcb->header.fsf_command = req->fsf_command;
680 }
681
682 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype,
683 req->qtcb, sizeof(struct fsf_qtcb));
684
685 return req;
686 }
687
688 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
689 {
690 struct zfcp_adapter *adapter = req->adapter;
691 struct zfcp_qdio *qdio = adapter->qdio;
692 int with_qtcb = (req->qtcb != NULL);
693 int req_id = req->req_id;
694
695 zfcp_reqlist_add(adapter->req_list, req);
696
697 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
698 req->issued = get_clock();
699 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
700 del_timer(&req->timer);
701 /* lookup request again, list might have changed */
702 zfcp_reqlist_find_rm(adapter->req_list, req_id);
703 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
704 return -EIO;
705 }
706
707 /* Don't increase for unsolicited status */
708 if (with_qtcb)
709 adapter->fsf_req_seq_no++;
710 adapter->req_no++;
711
712 return 0;
713 }
714
715 /**
716 * zfcp_fsf_status_read - send status read request
717 * @adapter: pointer to struct zfcp_adapter
718 * @req_flags: request flags
719 * Returns: 0 on success, ERROR otherwise
720 */
721 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
722 {
723 struct zfcp_adapter *adapter = qdio->adapter;
724 struct zfcp_fsf_req *req;
725 struct fsf_status_read_buffer *sr_buf;
726 int retval = -EIO;
727
728 spin_lock_irq(&qdio->req_q_lock);
729 if (zfcp_qdio_sbal_get(qdio))
730 goto out;
731
732 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
733 adapter->pool.status_read_req);
734 if (IS_ERR(req)) {
735 retval = PTR_ERR(req);
736 goto out;
737 }
738
739 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
740 if (!sr_buf) {
741 retval = -ENOMEM;
742 goto failed_buf;
743 }
744 memset(sr_buf, 0, sizeof(*sr_buf));
745 req->data = sr_buf;
746
747 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf));
748 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
749
750 retval = zfcp_fsf_req_send(req);
751 if (retval)
752 goto failed_req_send;
753
754 goto out;
755
756 failed_req_send:
757 req->data = NULL;
758 mempool_free(sr_buf, adapter->pool.status_read_data);
759 failed_buf:
760 zfcp_dbf_hba_fsf_uss("fssr__1", req);
761 zfcp_fsf_req_free(req);
762 out:
763 spin_unlock_irq(&qdio->req_q_lock);
764 return retval;
765 }
766
767 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
768 {
769 struct scsi_device *sdev = req->data;
770 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
771 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
772
773 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
774 return;
775
776 switch (req->qtcb->header.fsf_status) {
777 case FSF_PORT_HANDLE_NOT_VALID:
778 if (fsq->word[0] == fsq->word[1]) {
779 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0,
780 "fsafch1");
781 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
782 }
783 break;
784 case FSF_LUN_HANDLE_NOT_VALID:
785 if (fsq->word[0] == fsq->word[1]) {
786 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fsafch2");
787 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
788 }
789 break;
790 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
791 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
792 break;
793 case FSF_PORT_BOXED:
794 zfcp_erp_set_port_status(zfcp_sdev->port,
795 ZFCP_STATUS_COMMON_ACCESS_BOXED);
796 zfcp_erp_port_reopen(zfcp_sdev->port,
797 ZFCP_STATUS_COMMON_ERP_FAILED, "fsafch3");
798 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
799 break;
800 case FSF_LUN_BOXED:
801 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
802 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
803 "fsafch4");
804 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
805 break;
806 case FSF_ADAPTER_STATUS_AVAILABLE:
807 switch (fsq->word[0]) {
808 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
809 zfcp_fc_test_link(zfcp_sdev->port);
810 /* fall through */
811 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
812 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
813 break;
814 }
815 break;
816 case FSF_GOOD:
817 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
818 break;
819 }
820 }
821
822 /**
823 * zfcp_fsf_abort_fcp_cmnd - abort running SCSI command
824 * @scmnd: The SCSI command to abort
825 * Returns: pointer to struct zfcp_fsf_req
826 */
827
828 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
829 {
830 struct zfcp_fsf_req *req = NULL;
831 struct scsi_device *sdev = scmnd->device;
832 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
833 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
834 unsigned long old_req_id = (unsigned long) scmnd->host_scribble;
835
836 spin_lock_irq(&qdio->req_q_lock);
837 if (zfcp_qdio_sbal_get(qdio))
838 goto out;
839 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
840 SBAL_FLAGS0_TYPE_READ,
841 qdio->adapter->pool.scsi_abort);
842 if (IS_ERR(req)) {
843 req = NULL;
844 goto out;
845 }
846
847 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
848 ZFCP_STATUS_COMMON_UNBLOCKED)))
849 goto out_error_free;
850
851 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
852
853 req->data = sdev;
854 req->handler = zfcp_fsf_abort_fcp_command_handler;
855 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
856 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
857 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
858
859 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
860 if (!zfcp_fsf_req_send(req))
861 goto out;
862
863 out_error_free:
864 zfcp_fsf_req_free(req);
865 req = NULL;
866 out:
867 spin_unlock_irq(&qdio->req_q_lock);
868 return req;
869 }
870
871 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
872 {
873 struct zfcp_adapter *adapter = req->adapter;
874 struct zfcp_fsf_ct_els *ct = req->data;
875 struct fsf_qtcb_header *header = &req->qtcb->header;
876
877 ct->status = -EINVAL;
878
879 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
880 goto skip_fsfstatus;
881
882 switch (header->fsf_status) {
883 case FSF_GOOD:
884 zfcp_dbf_san_res("fsscth1", req);
885 ct->status = 0;
886 break;
887 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
888 zfcp_fsf_class_not_supp(req);
889 break;
890 case FSF_ADAPTER_STATUS_AVAILABLE:
891 switch (header->fsf_status_qual.word[0]){
892 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
893 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
894 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
895 break;
896 }
897 break;
898 case FSF_ACCESS_DENIED:
899 break;
900 case FSF_PORT_BOXED:
901 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
902 break;
903 case FSF_PORT_HANDLE_NOT_VALID:
904 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
905 /* fall through */
906 case FSF_GENERIC_COMMAND_REJECTED:
907 case FSF_PAYLOAD_SIZE_MISMATCH:
908 case FSF_REQUEST_SIZE_TOO_LARGE:
909 case FSF_RESPONSE_SIZE_TOO_LARGE:
910 case FSF_SBAL_MISMATCH:
911 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
912 break;
913 }
914
915 skip_fsfstatus:
916 if (ct->handler)
917 ct->handler(ct->handler_data);
918 }
919
920 static void zfcp_fsf_setup_ct_els_unchained(struct zfcp_qdio *qdio,
921 struct zfcp_qdio_req *q_req,
922 struct scatterlist *sg_req,
923 struct scatterlist *sg_resp)
924 {
925 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_req), sg_req->length);
926 zfcp_qdio_fill_next(qdio, q_req, sg_virt(sg_resp), sg_resp->length);
927 zfcp_qdio_set_sbale_last(qdio, q_req);
928 }
929
930 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
931 struct scatterlist *sg_req,
932 struct scatterlist *sg_resp)
933 {
934 struct zfcp_adapter *adapter = req->adapter;
935 u32 feat = adapter->adapter_features;
936 int bytes;
937
938 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
939 if (!zfcp_qdio_sg_one_sbale(sg_req) ||
940 !zfcp_qdio_sg_one_sbale(sg_resp))
941 return -EOPNOTSUPP;
942
943 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
944 sg_req, sg_resp);
945 return 0;
946 }
947
948 /* use single, unchained SBAL if it can hold the request */
949 if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) {
950 zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req,
951 sg_req, sg_resp);
952 return 0;
953 }
954
955 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req);
956 if (bytes <= 0)
957 return -EIO;
958 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
959 req->qtcb->bottom.support.req_buf_length = bytes;
960 zfcp_qdio_skip_to_last_sbale(&req->qdio_req);
961
962 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req,
963 sg_resp);
964 req->qtcb->bottom.support.resp_buf_length = bytes;
965 if (bytes <= 0)
966 return -EIO;
967 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
968
969 return 0;
970 }
971
972 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
973 struct scatterlist *sg_req,
974 struct scatterlist *sg_resp,
975 unsigned int timeout)
976 {
977 int ret;
978
979 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp);
980 if (ret)
981 return ret;
982
983 /* common settings for ct/gs and els requests */
984 if (timeout > 255)
985 timeout = 255; /* max value accepted by hardware */
986 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
987 req->qtcb->bottom.support.timeout = timeout;
988 zfcp_fsf_start_timer(req, (timeout + 10) * HZ);
989
990 return 0;
991 }
992
993 /**
994 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
995 * @ct: pointer to struct zfcp_send_ct with data for request
996 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
997 */
998 int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
999 struct zfcp_fsf_ct_els *ct, mempool_t *pool,
1000 unsigned int timeout)
1001 {
1002 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1003 struct zfcp_fsf_req *req;
1004 int ret = -EIO;
1005
1006 spin_lock_irq(&qdio->req_q_lock);
1007 if (zfcp_qdio_sbal_get(qdio))
1008 goto out;
1009
1010 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1011 SBAL_FLAGS0_TYPE_WRITE_READ, pool);
1012
1013 if (IS_ERR(req)) {
1014 ret = PTR_ERR(req);
1015 goto out;
1016 }
1017
1018 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1019 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout);
1020 if (ret)
1021 goto failed_send;
1022
1023 req->handler = zfcp_fsf_send_ct_handler;
1024 req->qtcb->header.port_handle = wka_port->handle;
1025 req->data = ct;
1026
1027 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id);
1028
1029 ret = zfcp_fsf_req_send(req);
1030 if (ret)
1031 goto failed_send;
1032
1033 goto out;
1034
1035 failed_send:
1036 zfcp_fsf_req_free(req);
1037 out:
1038 spin_unlock_irq(&qdio->req_q_lock);
1039 return ret;
1040 }
1041
1042 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1043 {
1044 struct zfcp_fsf_ct_els *send_els = req->data;
1045 struct zfcp_port *port = send_els->port;
1046 struct fsf_qtcb_header *header = &req->qtcb->header;
1047
1048 send_els->status = -EINVAL;
1049
1050 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1051 goto skip_fsfstatus;
1052
1053 switch (header->fsf_status) {
1054 case FSF_GOOD:
1055 zfcp_dbf_san_res("fsselh1", req);
1056 send_els->status = 0;
1057 break;
1058 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1059 zfcp_fsf_class_not_supp(req);
1060 break;
1061 case FSF_ADAPTER_STATUS_AVAILABLE:
1062 switch (header->fsf_status_qual.word[0]){
1063 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1064 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1065 case FSF_SQ_RETRY_IF_POSSIBLE:
1066 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1067 break;
1068 }
1069 break;
1070 case FSF_ELS_COMMAND_REJECTED:
1071 case FSF_PAYLOAD_SIZE_MISMATCH:
1072 case FSF_REQUEST_SIZE_TOO_LARGE:
1073 case FSF_RESPONSE_SIZE_TOO_LARGE:
1074 break;
1075 case FSF_ACCESS_DENIED:
1076 if (port) {
1077 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1078 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1079 }
1080 break;
1081 case FSF_SBAL_MISMATCH:
1082 /* should never occure, avoided in zfcp_fsf_send_els */
1083 /* fall through */
1084 default:
1085 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1086 break;
1087 }
1088 skip_fsfstatus:
1089 if (send_els->handler)
1090 send_els->handler(send_els->handler_data);
1091 }
1092
1093 /**
1094 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1095 * @els: pointer to struct zfcp_send_els with data for the command
1096 */
1097 int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1098 struct zfcp_fsf_ct_els *els, unsigned int timeout)
1099 {
1100 struct zfcp_fsf_req *req;
1101 struct zfcp_qdio *qdio = adapter->qdio;
1102 int ret = -EIO;
1103
1104 spin_lock_irq(&qdio->req_q_lock);
1105 if (zfcp_qdio_sbal_get(qdio))
1106 goto out;
1107
1108 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1109 SBAL_FLAGS0_TYPE_WRITE_READ, NULL);
1110
1111 if (IS_ERR(req)) {
1112 ret = PTR_ERR(req);
1113 goto out;
1114 }
1115
1116 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1117
1118 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2);
1119
1120 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout);
1121
1122 if (ret)
1123 goto failed_send;
1124
1125 hton24(req->qtcb->bottom.support.d_id, d_id);
1126 req->handler = zfcp_fsf_send_els_handler;
1127 req->data = els;
1128
1129 zfcp_dbf_san_req("fssels1", req, d_id);
1130
1131 ret = zfcp_fsf_req_send(req);
1132 if (ret)
1133 goto failed_send;
1134
1135 goto out;
1136
1137 failed_send:
1138 zfcp_fsf_req_free(req);
1139 out:
1140 spin_unlock_irq(&qdio->req_q_lock);
1141 return ret;
1142 }
1143
1144 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1145 {
1146 struct zfcp_fsf_req *req;
1147 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1148 int retval = -EIO;
1149
1150 spin_lock_irq(&qdio->req_q_lock);
1151 if (zfcp_qdio_sbal_get(qdio))
1152 goto out;
1153
1154 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1155 SBAL_FLAGS0_TYPE_READ,
1156 qdio->adapter->pool.erp_req);
1157
1158 if (IS_ERR(req)) {
1159 retval = PTR_ERR(req);
1160 goto out;
1161 }
1162
1163 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1164 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1165
1166 req->qtcb->bottom.config.feature_selection =
1167 FSF_FEATURE_CFDC |
1168 FSF_FEATURE_LUN_SHARING |
1169 FSF_FEATURE_NOTIFICATION_LOST |
1170 FSF_FEATURE_UPDATE_ALERT;
1171 req->erp_action = erp_action;
1172 req->handler = zfcp_fsf_exchange_config_data_handler;
1173 erp_action->fsf_req_id = req->req_id;
1174
1175 zfcp_fsf_start_erp_timer(req);
1176 retval = zfcp_fsf_req_send(req);
1177 if (retval) {
1178 zfcp_fsf_req_free(req);
1179 erp_action->fsf_req_id = 0;
1180 }
1181 out:
1182 spin_unlock_irq(&qdio->req_q_lock);
1183 return retval;
1184 }
1185
1186 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1187 struct fsf_qtcb_bottom_config *data)
1188 {
1189 struct zfcp_fsf_req *req = NULL;
1190 int retval = -EIO;
1191
1192 spin_lock_irq(&qdio->req_q_lock);
1193 if (zfcp_qdio_sbal_get(qdio))
1194 goto out_unlock;
1195
1196 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1197 SBAL_FLAGS0_TYPE_READ, NULL);
1198
1199 if (IS_ERR(req)) {
1200 retval = PTR_ERR(req);
1201 goto out_unlock;
1202 }
1203
1204 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1205 req->handler = zfcp_fsf_exchange_config_data_handler;
1206
1207 req->qtcb->bottom.config.feature_selection =
1208 FSF_FEATURE_CFDC |
1209 FSF_FEATURE_LUN_SHARING |
1210 FSF_FEATURE_NOTIFICATION_LOST |
1211 FSF_FEATURE_UPDATE_ALERT;
1212
1213 if (data)
1214 req->data = data;
1215
1216 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1217 retval = zfcp_fsf_req_send(req);
1218 spin_unlock_irq(&qdio->req_q_lock);
1219 if (!retval)
1220 wait_for_completion(&req->completion);
1221
1222 zfcp_fsf_req_free(req);
1223 return retval;
1224
1225 out_unlock:
1226 spin_unlock_irq(&qdio->req_q_lock);
1227 return retval;
1228 }
1229
1230 /**
1231 * zfcp_fsf_exchange_port_data - request information about local port
1232 * @erp_action: ERP action for the adapter for which port data is requested
1233 * Returns: 0 on success, error otherwise
1234 */
1235 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1236 {
1237 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1238 struct zfcp_fsf_req *req;
1239 int retval = -EIO;
1240
1241 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1242 return -EOPNOTSUPP;
1243
1244 spin_lock_irq(&qdio->req_q_lock);
1245 if (zfcp_qdio_sbal_get(qdio))
1246 goto out;
1247
1248 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1249 SBAL_FLAGS0_TYPE_READ,
1250 qdio->adapter->pool.erp_req);
1251
1252 if (IS_ERR(req)) {
1253 retval = PTR_ERR(req);
1254 goto out;
1255 }
1256
1257 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1258 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1259
1260 req->handler = zfcp_fsf_exchange_port_data_handler;
1261 req->erp_action = erp_action;
1262 erp_action->fsf_req_id = req->req_id;
1263
1264 zfcp_fsf_start_erp_timer(req);
1265 retval = zfcp_fsf_req_send(req);
1266 if (retval) {
1267 zfcp_fsf_req_free(req);
1268 erp_action->fsf_req_id = 0;
1269 }
1270 out:
1271 spin_unlock_irq(&qdio->req_q_lock);
1272 return retval;
1273 }
1274
1275 /**
1276 * zfcp_fsf_exchange_port_data_sync - request information about local port
1277 * @qdio: pointer to struct zfcp_qdio
1278 * @data: pointer to struct fsf_qtcb_bottom_port
1279 * Returns: 0 on success, error otherwise
1280 */
1281 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1282 struct fsf_qtcb_bottom_port *data)
1283 {
1284 struct zfcp_fsf_req *req = NULL;
1285 int retval = -EIO;
1286
1287 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1288 return -EOPNOTSUPP;
1289
1290 spin_lock_irq(&qdio->req_q_lock);
1291 if (zfcp_qdio_sbal_get(qdio))
1292 goto out_unlock;
1293
1294 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1295 SBAL_FLAGS0_TYPE_READ, NULL);
1296
1297 if (IS_ERR(req)) {
1298 retval = PTR_ERR(req);
1299 goto out_unlock;
1300 }
1301
1302 if (data)
1303 req->data = data;
1304
1305 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1306
1307 req->handler = zfcp_fsf_exchange_port_data_handler;
1308 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1309 retval = zfcp_fsf_req_send(req);
1310 spin_unlock_irq(&qdio->req_q_lock);
1311
1312 if (!retval)
1313 wait_for_completion(&req->completion);
1314
1315 zfcp_fsf_req_free(req);
1316
1317 return retval;
1318
1319 out_unlock:
1320 spin_unlock_irq(&qdio->req_q_lock);
1321 return retval;
1322 }
1323
1324 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1325 {
1326 struct zfcp_port *port = req->data;
1327 struct fsf_qtcb_header *header = &req->qtcb->header;
1328 struct fc_els_flogi *plogi;
1329
1330 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1331 goto out;
1332
1333 switch (header->fsf_status) {
1334 case FSF_PORT_ALREADY_OPEN:
1335 break;
1336 case FSF_ACCESS_DENIED:
1337 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1338 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1339 break;
1340 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1341 dev_warn(&req->adapter->ccw_device->dev,
1342 "Not enough FCP adapter resources to open "
1343 "remote port 0x%016Lx\n",
1344 (unsigned long long)port->wwpn);
1345 zfcp_erp_set_port_status(port,
1346 ZFCP_STATUS_COMMON_ERP_FAILED);
1347 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1348 break;
1349 case FSF_ADAPTER_STATUS_AVAILABLE:
1350 switch (header->fsf_status_qual.word[0]) {
1351 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1352 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1353 case FSF_SQ_NO_RETRY_POSSIBLE:
1354 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1355 break;
1356 }
1357 break;
1358 case FSF_GOOD:
1359 port->handle = header->port_handle;
1360 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1361 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1362 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1363 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1364 &port->status);
1365 /* check whether D_ID has changed during open */
1366 /*
1367 * FIXME: This check is not airtight, as the FCP channel does
1368 * not monitor closures of target port connections caused on
1369 * the remote side. Thus, they might miss out on invalidating
1370 * locally cached WWPNs (and other N_Port parameters) of gone
1371 * target ports. So, our heroic attempt to make things safe
1372 * could be undermined by 'open port' response data tagged with
1373 * obsolete WWPNs. Another reason to monitor potential
1374 * connection closures ourself at least (by interpreting
1375 * incoming ELS' and unsolicited status). It just crosses my
1376 * mind that one should be able to cross-check by means of
1377 * another GID_PN straight after a port has been opened.
1378 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1379 */
1380 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els;
1381 if (req->qtcb->bottom.support.els1_length >=
1382 FSF_PLOGI_MIN_LEN)
1383 zfcp_fc_plogi_evaluate(port, plogi);
1384 break;
1385 case FSF_UNKNOWN_OP_SUBTYPE:
1386 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1387 break;
1388 }
1389
1390 out:
1391 put_device(&port->dev);
1392 }
1393
1394 /**
1395 * zfcp_fsf_open_port - create and send open port request
1396 * @erp_action: pointer to struct zfcp_erp_action
1397 * Returns: 0 on success, error otherwise
1398 */
1399 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1400 {
1401 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1402 struct zfcp_port *port = erp_action->port;
1403 struct zfcp_fsf_req *req;
1404 int retval = -EIO;
1405
1406 spin_lock_irq(&qdio->req_q_lock);
1407 if (zfcp_qdio_sbal_get(qdio))
1408 goto out;
1409
1410 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1411 SBAL_FLAGS0_TYPE_READ,
1412 qdio->adapter->pool.erp_req);
1413
1414 if (IS_ERR(req)) {
1415 retval = PTR_ERR(req);
1416 goto out;
1417 }
1418
1419 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1420 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1421
1422 req->handler = zfcp_fsf_open_port_handler;
1423 hton24(req->qtcb->bottom.support.d_id, port->d_id);
1424 req->data = port;
1425 req->erp_action = erp_action;
1426 erp_action->fsf_req_id = req->req_id;
1427 get_device(&port->dev);
1428
1429 zfcp_fsf_start_erp_timer(req);
1430 retval = zfcp_fsf_req_send(req);
1431 if (retval) {
1432 zfcp_fsf_req_free(req);
1433 erp_action->fsf_req_id = 0;
1434 put_device(&port->dev);
1435 }
1436 out:
1437 spin_unlock_irq(&qdio->req_q_lock);
1438 return retval;
1439 }
1440
1441 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1442 {
1443 struct zfcp_port *port = req->data;
1444
1445 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1446 return;
1447
1448 switch (req->qtcb->header.fsf_status) {
1449 case FSF_PORT_HANDLE_NOT_VALID:
1450 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1");
1451 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1452 break;
1453 case FSF_ADAPTER_STATUS_AVAILABLE:
1454 break;
1455 case FSF_GOOD:
1456 zfcp_erp_clear_port_status(port, ZFCP_STATUS_COMMON_OPEN);
1457 break;
1458 }
1459 }
1460
1461 /**
1462 * zfcp_fsf_close_port - create and send close port request
1463 * @erp_action: pointer to struct zfcp_erp_action
1464 * Returns: 0 on success, error otherwise
1465 */
1466 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1467 {
1468 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1469 struct zfcp_fsf_req *req;
1470 int retval = -EIO;
1471
1472 spin_lock_irq(&qdio->req_q_lock);
1473 if (zfcp_qdio_sbal_get(qdio))
1474 goto out;
1475
1476 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1477 SBAL_FLAGS0_TYPE_READ,
1478 qdio->adapter->pool.erp_req);
1479
1480 if (IS_ERR(req)) {
1481 retval = PTR_ERR(req);
1482 goto out;
1483 }
1484
1485 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1486 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1487
1488 req->handler = zfcp_fsf_close_port_handler;
1489 req->data = erp_action->port;
1490 req->erp_action = erp_action;
1491 req->qtcb->header.port_handle = erp_action->port->handle;
1492 erp_action->fsf_req_id = req->req_id;
1493
1494 zfcp_fsf_start_erp_timer(req);
1495 retval = zfcp_fsf_req_send(req);
1496 if (retval) {
1497 zfcp_fsf_req_free(req);
1498 erp_action->fsf_req_id = 0;
1499 }
1500 out:
1501 spin_unlock_irq(&qdio->req_q_lock);
1502 return retval;
1503 }
1504
1505 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1506 {
1507 struct zfcp_fc_wka_port *wka_port = req->data;
1508 struct fsf_qtcb_header *header = &req->qtcb->header;
1509
1510 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1511 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1512 goto out;
1513 }
1514
1515 switch (header->fsf_status) {
1516 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1517 dev_warn(&req->adapter->ccw_device->dev,
1518 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1519 /* fall through */
1520 case FSF_ADAPTER_STATUS_AVAILABLE:
1521 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1522 /* fall through */
1523 case FSF_ACCESS_DENIED:
1524 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1525 break;
1526 case FSF_GOOD:
1527 wka_port->handle = header->port_handle;
1528 /* fall through */
1529 case FSF_PORT_ALREADY_OPEN:
1530 wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
1531 }
1532 out:
1533 wake_up(&wka_port->completion_wq);
1534 }
1535
1536 /**
1537 * zfcp_fsf_open_wka_port - create and send open wka-port request
1538 * @wka_port: pointer to struct zfcp_fc_wka_port
1539 * Returns: 0 on success, error otherwise
1540 */
1541 int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1542 {
1543 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1544 struct zfcp_fsf_req *req;
1545 int retval = -EIO;
1546
1547 spin_lock_irq(&qdio->req_q_lock);
1548 if (zfcp_qdio_sbal_get(qdio))
1549 goto out;
1550
1551 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1552 SBAL_FLAGS0_TYPE_READ,
1553 qdio->adapter->pool.erp_req);
1554
1555 if (unlikely(IS_ERR(req))) {
1556 retval = PTR_ERR(req);
1557 goto out;
1558 }
1559
1560 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1561 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1562
1563 req->handler = zfcp_fsf_open_wka_port_handler;
1564 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
1565 req->data = wka_port;
1566
1567 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1568 retval = zfcp_fsf_req_send(req);
1569 if (retval)
1570 zfcp_fsf_req_free(req);
1571 out:
1572 spin_unlock_irq(&qdio->req_q_lock);
1573 return retval;
1574 }
1575
1576 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1577 {
1578 struct zfcp_fc_wka_port *wka_port = req->data;
1579
1580 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1581 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1582 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1");
1583 }
1584
1585 wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
1586 wake_up(&wka_port->completion_wq);
1587 }
1588
1589 /**
1590 * zfcp_fsf_close_wka_port - create and send close wka port request
1591 * @wka_port: WKA port to open
1592 * Returns: 0 on success, error otherwise
1593 */
1594 int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1595 {
1596 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1597 struct zfcp_fsf_req *req;
1598 int retval = -EIO;
1599
1600 spin_lock_irq(&qdio->req_q_lock);
1601 if (zfcp_qdio_sbal_get(qdio))
1602 goto out;
1603
1604 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1605 SBAL_FLAGS0_TYPE_READ,
1606 qdio->adapter->pool.erp_req);
1607
1608 if (unlikely(IS_ERR(req))) {
1609 retval = PTR_ERR(req);
1610 goto out;
1611 }
1612
1613 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1614 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1615
1616 req->handler = zfcp_fsf_close_wka_port_handler;
1617 req->data = wka_port;
1618 req->qtcb->header.port_handle = wka_port->handle;
1619
1620 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1621 retval = zfcp_fsf_req_send(req);
1622 if (retval)
1623 zfcp_fsf_req_free(req);
1624 out:
1625 spin_unlock_irq(&qdio->req_q_lock);
1626 return retval;
1627 }
1628
1629 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1630 {
1631 struct zfcp_port *port = req->data;
1632 struct fsf_qtcb_header *header = &req->qtcb->header;
1633 struct scsi_device *sdev;
1634
1635 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1636 return;
1637
1638 switch (header->fsf_status) {
1639 case FSF_PORT_HANDLE_NOT_VALID:
1640 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1");
1641 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1642 break;
1643 case FSF_ACCESS_DENIED:
1644 zfcp_cfdc_port_denied(port, &header->fsf_status_qual);
1645 break;
1646 case FSF_PORT_BOXED:
1647 /* can't use generic zfcp_erp_modify_port_status because
1648 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1649 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1650 shost_for_each_device(sdev, port->adapter->scsi_host)
1651 if (sdev_to_zfcp(sdev)->port == port)
1652 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1653 &sdev_to_zfcp(sdev)->status);
1654 zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ACCESS_BOXED);
1655 zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED,
1656 "fscpph2");
1657 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1658 break;
1659 case FSF_ADAPTER_STATUS_AVAILABLE:
1660 switch (header->fsf_status_qual.word[0]) {
1661 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1662 /* fall through */
1663 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1664 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1665 break;
1666 }
1667 break;
1668 case FSF_GOOD:
1669 /* can't use generic zfcp_erp_modify_port_status because
1670 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1671 */
1672 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1673 shost_for_each_device(sdev, port->adapter->scsi_host)
1674 if (sdev_to_zfcp(sdev)->port == port)
1675 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1676 &sdev_to_zfcp(sdev)->status);
1677 break;
1678 }
1679 }
1680
1681 /**
1682 * zfcp_fsf_close_physical_port - close physical port
1683 * @erp_action: pointer to struct zfcp_erp_action
1684 * Returns: 0 on success
1685 */
1686 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1687 {
1688 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1689 struct zfcp_fsf_req *req;
1690 int retval = -EIO;
1691
1692 spin_lock_irq(&qdio->req_q_lock);
1693 if (zfcp_qdio_sbal_get(qdio))
1694 goto out;
1695
1696 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1697 SBAL_FLAGS0_TYPE_READ,
1698 qdio->adapter->pool.erp_req);
1699
1700 if (IS_ERR(req)) {
1701 retval = PTR_ERR(req);
1702 goto out;
1703 }
1704
1705 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1706 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1707
1708 req->data = erp_action->port;
1709 req->qtcb->header.port_handle = erp_action->port->handle;
1710 req->erp_action = erp_action;
1711 req->handler = zfcp_fsf_close_physical_port_handler;
1712 erp_action->fsf_req_id = req->req_id;
1713
1714 zfcp_fsf_start_erp_timer(req);
1715 retval = zfcp_fsf_req_send(req);
1716 if (retval) {
1717 zfcp_fsf_req_free(req);
1718 erp_action->fsf_req_id = 0;
1719 }
1720 out:
1721 spin_unlock_irq(&qdio->req_q_lock);
1722 return retval;
1723 }
1724
1725 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
1726 {
1727 struct zfcp_adapter *adapter = req->adapter;
1728 struct scsi_device *sdev = req->data;
1729 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1730 struct fsf_qtcb_header *header = &req->qtcb->header;
1731 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1732
1733 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1734 return;
1735
1736 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1737 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1738 ZFCP_STATUS_LUN_SHARED |
1739 ZFCP_STATUS_LUN_READONLY,
1740 &zfcp_sdev->status);
1741
1742 switch (header->fsf_status) {
1743
1744 case FSF_PORT_HANDLE_NOT_VALID:
1745 zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
1746 /* fall through */
1747 case FSF_LUN_ALREADY_OPEN:
1748 break;
1749 case FSF_ACCESS_DENIED:
1750 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
1751 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1752 break;
1753 case FSF_PORT_BOXED:
1754 zfcp_erp_set_port_status(zfcp_sdev->port,
1755 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1756 zfcp_erp_port_reopen(zfcp_sdev->port,
1757 ZFCP_STATUS_COMMON_ERP_FAILED, "fsouh_2");
1758 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1759 break;
1760 case FSF_LUN_SHARING_VIOLATION:
1761 zfcp_cfdc_lun_shrng_vltn(sdev, &header->fsf_status_qual);
1762 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1763 break;
1764 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1765 dev_warn(&adapter->ccw_device->dev,
1766 "No handle is available for LUN "
1767 "0x%016Lx on port 0x%016Lx\n",
1768 (unsigned long long)zfcp_scsi_dev_lun(sdev),
1769 (unsigned long long)zfcp_sdev->port->wwpn);
1770 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
1771 /* fall through */
1772 case FSF_INVALID_COMMAND_OPTION:
1773 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1774 break;
1775 case FSF_ADAPTER_STATUS_AVAILABLE:
1776 switch (header->fsf_status_qual.word[0]) {
1777 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1778 zfcp_fc_test_link(zfcp_sdev->port);
1779 /* fall through */
1780 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1781 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1782 break;
1783 }
1784 break;
1785
1786 case FSF_GOOD:
1787 zfcp_sdev->lun_handle = header->lun_handle;
1788 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1789 zfcp_cfdc_open_lun_eval(sdev, bottom);
1790 break;
1791 }
1792 }
1793
1794 /**
1795 * zfcp_fsf_open_lun - open LUN
1796 * @erp_action: pointer to struct zfcp_erp_action
1797 * Returns: 0 on success, error otherwise
1798 */
1799 int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1800 {
1801 struct zfcp_adapter *adapter = erp_action->adapter;
1802 struct zfcp_qdio *qdio = adapter->qdio;
1803 struct zfcp_fsf_req *req;
1804 int retval = -EIO;
1805
1806 spin_lock_irq(&qdio->req_q_lock);
1807 if (zfcp_qdio_sbal_get(qdio))
1808 goto out;
1809
1810 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1811 SBAL_FLAGS0_TYPE_READ,
1812 adapter->pool.erp_req);
1813
1814 if (IS_ERR(req)) {
1815 retval = PTR_ERR(req);
1816 goto out;
1817 }
1818
1819 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1820 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1821
1822 req->qtcb->header.port_handle = erp_action->port->handle;
1823 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev);
1824 req->handler = zfcp_fsf_open_lun_handler;
1825 req->data = erp_action->sdev;
1826 req->erp_action = erp_action;
1827 erp_action->fsf_req_id = req->req_id;
1828
1829 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
1830 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
1831
1832 zfcp_fsf_start_erp_timer(req);
1833 retval = zfcp_fsf_req_send(req);
1834 if (retval) {
1835 zfcp_fsf_req_free(req);
1836 erp_action->fsf_req_id = 0;
1837 }
1838 out:
1839 spin_unlock_irq(&qdio->req_q_lock);
1840 return retval;
1841 }
1842
1843 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
1844 {
1845 struct scsi_device *sdev = req->data;
1846 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1847
1848 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1849 return;
1850
1851 switch (req->qtcb->header.fsf_status) {
1852 case FSF_PORT_HANDLE_NOT_VALID:
1853 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
1854 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1855 break;
1856 case FSF_LUN_HANDLE_NOT_VALID:
1857 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fscuh_2");
1858 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1859 break;
1860 case FSF_PORT_BOXED:
1861 zfcp_erp_set_port_status(zfcp_sdev->port,
1862 ZFCP_STATUS_COMMON_ACCESS_BOXED);
1863 zfcp_erp_port_reopen(zfcp_sdev->port,
1864 ZFCP_STATUS_COMMON_ERP_FAILED, "fscuh_3");
1865 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1866 break;
1867 case FSF_ADAPTER_STATUS_AVAILABLE:
1868 switch (req->qtcb->header.fsf_status_qual.word[0]) {
1869 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1870 zfcp_fc_test_link(zfcp_sdev->port);
1871 /* fall through */
1872 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1873 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1874 break;
1875 }
1876 break;
1877 case FSF_GOOD:
1878 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &zfcp_sdev->status);
1879 break;
1880 }
1881 }
1882
1883 /**
1884 * zfcp_fsf_close_LUN - close LUN
1885 * @erp_action: pointer to erp_action triggering the "close LUN"
1886 * Returns: 0 on success, error otherwise
1887 */
1888 int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1889 {
1890 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1891 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
1892 struct zfcp_fsf_req *req;
1893 int retval = -EIO;
1894
1895 spin_lock_irq(&qdio->req_q_lock);
1896 if (zfcp_qdio_sbal_get(qdio))
1897 goto out;
1898
1899 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1900 SBAL_FLAGS0_TYPE_READ,
1901 qdio->adapter->pool.erp_req);
1902
1903 if (IS_ERR(req)) {
1904 retval = PTR_ERR(req);
1905 goto out;
1906 }
1907
1908 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1909 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
1910
1911 req->qtcb->header.port_handle = erp_action->port->handle;
1912 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
1913 req->handler = zfcp_fsf_close_lun_handler;
1914 req->data = erp_action->sdev;
1915 req->erp_action = erp_action;
1916 erp_action->fsf_req_id = req->req_id;
1917
1918 zfcp_fsf_start_erp_timer(req);
1919 retval = zfcp_fsf_req_send(req);
1920 if (retval) {
1921 zfcp_fsf_req_free(req);
1922 erp_action->fsf_req_id = 0;
1923 }
1924 out:
1925 spin_unlock_irq(&qdio->req_q_lock);
1926 return retval;
1927 }
1928
1929 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
1930 {
1931 lat_rec->sum += lat;
1932 lat_rec->min = min(lat_rec->min, lat);
1933 lat_rec->max = max(lat_rec->max, lat);
1934 }
1935
1936 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
1937 {
1938 struct fsf_qual_latency_info *lat_in;
1939 struct latency_cont *lat = NULL;
1940 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
1941 struct zfcp_blk_drv_data blktrc;
1942 int ticks = req->adapter->timer_ticks;
1943
1944 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
1945
1946 blktrc.flags = 0;
1947 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
1948 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1949 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
1950 blktrc.inb_usage = 0;
1951 blktrc.outb_usage = req->qdio_req.qdio_outb_usage;
1952
1953 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
1954 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
1955 blktrc.flags |= ZFCP_BLK_LAT_VALID;
1956 blktrc.channel_lat = lat_in->channel_lat * ticks;
1957 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
1958
1959 switch (req->qtcb->bottom.io.data_direction) {
1960 case FSF_DATADIR_DIF_READ_STRIP:
1961 case FSF_DATADIR_DIF_READ_CONVERT:
1962 case FSF_DATADIR_READ:
1963 lat = &zfcp_sdev->latencies.read;
1964 break;
1965 case FSF_DATADIR_DIF_WRITE_INSERT:
1966 case FSF_DATADIR_DIF_WRITE_CONVERT:
1967 case FSF_DATADIR_WRITE:
1968 lat = &zfcp_sdev->latencies.write;
1969 break;
1970 case FSF_DATADIR_CMND:
1971 lat = &zfcp_sdev->latencies.cmd;
1972 break;
1973 }
1974
1975 if (lat) {
1976 spin_lock(&zfcp_sdev->latencies.lock);
1977 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
1978 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
1979 lat->counter++;
1980 spin_unlock(&zfcp_sdev->latencies.lock);
1981 }
1982 }
1983
1984 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
1985 sizeof(blktrc));
1986 }
1987
1988 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
1989 {
1990 struct scsi_cmnd *scmnd = req->data;
1991 struct scsi_device *sdev = scmnd->device;
1992 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
1993 struct fsf_qtcb_header *header = &req->qtcb->header;
1994
1995 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
1996 return;
1997
1998 switch (header->fsf_status) {
1999 case FSF_HANDLE_MISMATCH:
2000 case FSF_PORT_HANDLE_NOT_VALID:
2001 zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fssfch1");
2002 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2003 break;
2004 case FSF_FCPLUN_NOT_VALID:
2005 case FSF_LUN_HANDLE_NOT_VALID:
2006 zfcp_erp_port_reopen(zfcp_sdev->port, 0, "fssfch2");
2007 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2008 break;
2009 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2010 zfcp_fsf_class_not_supp(req);
2011 break;
2012 case FSF_ACCESS_DENIED:
2013 zfcp_cfdc_lun_denied(sdev, &header->fsf_status_qual);
2014 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2015 break;
2016 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2017 dev_err(&req->adapter->ccw_device->dev,
2018 "Incorrect direction %d, LUN 0x%016Lx on port "
2019 "0x%016Lx closed\n",
2020 req->qtcb->bottom.io.data_direction,
2021 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2022 (unsigned long long)zfcp_sdev->port->wwpn);
2023 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2024 "fssfch3");
2025 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2026 break;
2027 case FSF_CMND_LENGTH_NOT_VALID:
2028 dev_err(&req->adapter->ccw_device->dev,
2029 "Incorrect CDB length %d, LUN 0x%016Lx on "
2030 "port 0x%016Lx closed\n",
2031 req->qtcb->bottom.io.fcp_cmnd_length,
2032 (unsigned long long)zfcp_scsi_dev_lun(sdev),
2033 (unsigned long long)zfcp_sdev->port->wwpn);
2034 zfcp_erp_adapter_shutdown(zfcp_sdev->port->adapter, 0,
2035 "fssfch4");
2036 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2037 break;
2038 case FSF_PORT_BOXED:
2039 zfcp_erp_set_port_status(zfcp_sdev->port,
2040 ZFCP_STATUS_COMMON_ACCESS_BOXED);
2041 zfcp_erp_port_reopen(zfcp_sdev->port,
2042 ZFCP_STATUS_COMMON_ERP_FAILED, "fssfch5");
2043 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2044 break;
2045 case FSF_LUN_BOXED:
2046 zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ACCESS_BOXED);
2047 zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
2048 "fssfch6");
2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2050 break;
2051 case FSF_ADAPTER_STATUS_AVAILABLE:
2052 if (header->fsf_status_qual.word[0] ==
2053 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2054 zfcp_fc_test_link(zfcp_sdev->port);
2055 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2056 break;
2057 }
2058 }
2059
2060 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req)
2061 {
2062 struct scsi_cmnd *scpnt;
2063 struct fcp_resp_with_ext *fcp_rsp;
2064 unsigned long flags;
2065
2066 read_lock_irqsave(&req->adapter->abort_lock, flags);
2067
2068 scpnt = req->data;
2069 if (unlikely(!scpnt)) {
2070 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2071 return;
2072 }
2073
2074 zfcp_fsf_fcp_handler_common(req);
2075
2076 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2077 set_host_byte(scpnt, DID_TRANSPORT_DISRUPTED);
2078 goto skip_fsfstatus;
2079 }
2080
2081 switch (req->qtcb->header.fsf_status) {
2082 case FSF_INCONSISTENT_PROT_DATA:
2083 case FSF_INVALID_PROT_PARM:
2084 set_host_byte(scpnt, DID_ERROR);
2085 goto skip_fsfstatus;
2086 case FSF_BLOCK_GUARD_CHECK_FAILURE:
2087 zfcp_scsi_dif_sense_error(scpnt, 0x1);
2088 goto skip_fsfstatus;
2089 case FSF_APP_TAG_CHECK_FAILURE:
2090 zfcp_scsi_dif_sense_error(scpnt, 0x2);
2091 goto skip_fsfstatus;
2092 case FSF_REF_TAG_CHECK_FAILURE:
2093 zfcp_scsi_dif_sense_error(scpnt, 0x3);
2094 goto skip_fsfstatus;
2095 }
2096 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2097 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2098
2099 skip_fsfstatus:
2100 zfcp_fsf_req_trace(req, scpnt);
2101 zfcp_dbf_scsi_result(scpnt, req);
2102
2103 scpnt->host_scribble = NULL;
2104 (scpnt->scsi_done) (scpnt);
2105 /*
2106 * We must hold this lock until scsi_done has been called.
2107 * Otherwise we may call scsi_done after abort regarding this
2108 * command has completed.
2109 * Note: scsi_done must not block!
2110 */
2111 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2112 }
2113
2114 static int zfcp_fsf_set_data_dir(struct scsi_cmnd *scsi_cmnd, u32 *data_dir)
2115 {
2116 switch (scsi_get_prot_op(scsi_cmnd)) {
2117 case SCSI_PROT_NORMAL:
2118 switch (scsi_cmnd->sc_data_direction) {
2119 case DMA_NONE:
2120 *data_dir = FSF_DATADIR_CMND;
2121 break;
2122 case DMA_FROM_DEVICE:
2123 *data_dir = FSF_DATADIR_READ;
2124 break;
2125 case DMA_TO_DEVICE:
2126 *data_dir = FSF_DATADIR_WRITE;
2127 break;
2128 case DMA_BIDIRECTIONAL:
2129 return -EINVAL;
2130 }
2131 break;
2132
2133 case SCSI_PROT_READ_STRIP:
2134 *data_dir = FSF_DATADIR_DIF_READ_STRIP;
2135 break;
2136 case SCSI_PROT_WRITE_INSERT:
2137 *data_dir = FSF_DATADIR_DIF_WRITE_INSERT;
2138 break;
2139 case SCSI_PROT_READ_PASS:
2140 *data_dir = FSF_DATADIR_DIF_READ_CONVERT;
2141 break;
2142 case SCSI_PROT_WRITE_PASS:
2143 *data_dir = FSF_DATADIR_DIF_WRITE_CONVERT;
2144 break;
2145 default:
2146 return -EINVAL;
2147 }
2148
2149 return 0;
2150 }
2151
2152 /**
2153 * zfcp_fsf_fcp_cmnd - initiate an FCP command (for a SCSI command)
2154 * @scsi_cmnd: scsi command to be sent
2155 */
2156 int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2157 {
2158 struct zfcp_fsf_req *req;
2159 struct fcp_cmnd *fcp_cmnd;
2160 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2161 int real_bytes, retval = -EIO, dix_bytes = 0;
2162 struct scsi_device *sdev = scsi_cmnd->device;
2163 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
2164 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
2165 struct zfcp_qdio *qdio = adapter->qdio;
2166 struct fsf_qtcb_bottom_io *io;
2167 unsigned long flags;
2168
2169 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2170 ZFCP_STATUS_COMMON_UNBLOCKED)))
2171 return -EBUSY;
2172
2173 spin_lock_irqsave(&qdio->req_q_lock, flags);
2174 if (atomic_read(&qdio->req_q_free) <= 0) {
2175 atomic_inc(&qdio->req_q_full);
2176 goto out;
2177 }
2178
2179 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2180 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2181
2182 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2183 sbtype, adapter->pool.scsi_req);
2184
2185 if (IS_ERR(req)) {
2186 retval = PTR_ERR(req);
2187 goto out;
2188 }
2189
2190 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2191
2192 io = &req->qtcb->bottom.io;
2193 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2194 req->data = scsi_cmnd;
2195 req->handler = zfcp_fsf_fcp_cmnd_handler;
2196 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2197 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2198 io->service_class = FSF_CLASS_3;
2199 io->fcp_cmnd_length = FCP_CMND_LEN;
2200
2201 if (scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) {
2202 io->data_block_length = scsi_cmnd->device->sector_size;
2203 io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF;
2204 }
2205
2206 zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction);
2207
2208 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2209 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2210
2211 if (scsi_prot_sg_count(scsi_cmnd)) {
2212 zfcp_qdio_set_data_div(qdio, &req->qdio_req,
2213 scsi_prot_sg_count(scsi_cmnd));
2214 dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2215 scsi_prot_sglist(scsi_cmnd));
2216 io->prot_data_length = dix_bytes;
2217 }
2218
2219 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
2220 scsi_sglist(scsi_cmnd));
2221
2222 if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0))
2223 goto failed_scsi_cmnd;
2224
2225 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2226
2227 retval = zfcp_fsf_req_send(req);
2228 if (unlikely(retval))
2229 goto failed_scsi_cmnd;
2230
2231 goto out;
2232
2233 failed_scsi_cmnd:
2234 zfcp_fsf_req_free(req);
2235 scsi_cmnd->host_scribble = NULL;
2236 out:
2237 spin_unlock_irqrestore(&qdio->req_q_lock, flags);
2238 return retval;
2239 }
2240
2241 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req)
2242 {
2243 struct fcp_resp_with_ext *fcp_rsp;
2244 struct fcp_resp_rsp_info *rsp_info;
2245
2246 zfcp_fsf_fcp_handler_common(req);
2247
2248 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2249 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2250
2251 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2252 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2253 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2254 }
2255
2256 /**
2257 * zfcp_fsf_fcp_task_mgmt - send SCSI task management command
2258 * @scmnd: SCSI command to send the task management command for
2259 * @tm_flags: unsigned byte for task management flags
2260 * Returns: on success pointer to struct fsf_req, NULL otherwise
2261 */
2262 struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2263 u8 tm_flags)
2264 {
2265 struct zfcp_fsf_req *req = NULL;
2266 struct fcp_cmnd *fcp_cmnd;
2267 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
2268 struct zfcp_qdio *qdio = zfcp_sdev->port->adapter->qdio;
2269
2270 if (unlikely(!(atomic_read(&zfcp_sdev->status) &
2271 ZFCP_STATUS_COMMON_UNBLOCKED)))
2272 return NULL;
2273
2274 spin_lock_irq(&qdio->req_q_lock);
2275 if (zfcp_qdio_sbal_get(qdio))
2276 goto out;
2277
2278 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2279 SBAL_FLAGS0_TYPE_WRITE,
2280 qdio->adapter->pool.scsi_req);
2281
2282 if (IS_ERR(req)) {
2283 req = NULL;
2284 goto out;
2285 }
2286
2287 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2288 req->data = scmnd;
2289 req->handler = zfcp_fsf_fcp_task_mgmt_handler;
2290 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle;
2291 req->qtcb->header.port_handle = zfcp_sdev->port->handle;
2292 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2293 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2294 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2295
2296 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req);
2297
2298 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2299 zfcp_fc_fcp_tm(fcp_cmnd, scmnd->device, tm_flags);
2300
2301 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2302 if (!zfcp_fsf_req_send(req))
2303 goto out;
2304
2305 zfcp_fsf_req_free(req);
2306 req = NULL;
2307 out:
2308 spin_unlock_irq(&qdio->req_q_lock);
2309 return req;
2310 }
2311
2312 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2313 {
2314 }
2315
2316 /**
2317 * zfcp_fsf_control_file - control file upload/download
2318 * @adapter: pointer to struct zfcp_adapter
2319 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2320 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2321 */
2322 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2323 struct zfcp_fsf_cfdc *fsf_cfdc)
2324 {
2325 struct zfcp_qdio *qdio = adapter->qdio;
2326 struct zfcp_fsf_req *req = NULL;
2327 struct fsf_qtcb_bottom_support *bottom;
2328 int direction, retval = -EIO, bytes;
2329
2330 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2331 return ERR_PTR(-EOPNOTSUPP);
2332
2333 switch (fsf_cfdc->command) {
2334 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2335 direction = SBAL_FLAGS0_TYPE_WRITE;
2336 break;
2337 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2338 direction = SBAL_FLAGS0_TYPE_READ;
2339 break;
2340 default:
2341 return ERR_PTR(-EINVAL);
2342 }
2343
2344 spin_lock_irq(&qdio->req_q_lock);
2345 if (zfcp_qdio_sbal_get(qdio))
2346 goto out;
2347
2348 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, direction, NULL);
2349 if (IS_ERR(req)) {
2350 retval = -EPERM;
2351 goto out;
2352 }
2353
2354 req->handler = zfcp_fsf_control_file_handler;
2355
2356 bottom = &req->qtcb->bottom.support;
2357 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2358 bottom->option = fsf_cfdc->option;
2359
2360 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg);
2361
2362 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2363 zfcp_fsf_req_free(req);
2364 goto out;
2365 }
2366 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req);
2367
2368 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2369 retval = zfcp_fsf_req_send(req);
2370 out:
2371 spin_unlock_irq(&qdio->req_q_lock);
2372
2373 if (!retval) {
2374 wait_for_completion(&req->completion);
2375 return req;
2376 }
2377 return ERR_PTR(retval);
2378 }
2379
2380 /**
2381 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2382 * @adapter: pointer to struct zfcp_adapter
2383 * @sbal_idx: response queue index of SBAL to be processed
2384 */
2385 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2386 {
2387 struct zfcp_adapter *adapter = qdio->adapter;
2388 struct qdio_buffer *sbal = qdio->res_q[sbal_idx];
2389 struct qdio_buffer_element *sbale;
2390 struct zfcp_fsf_req *fsf_req;
2391 unsigned long req_id;
2392 int idx;
2393
2394 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2395
2396 sbale = &sbal->element[idx];
2397 req_id = (unsigned long) sbale->addr;
2398 fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
2399
2400 if (!fsf_req) {
2401 /*
2402 * Unknown request means that we have potentially memory
2403 * corruption and must stop the machine immediately.
2404 */
2405 zfcp_qdio_siosl(adapter);
2406 panic("error: unknown req_id (%lx) on adapter %s.\n",
2407 req_id, dev_name(&adapter->ccw_device->dev));
2408 }
2409
2410 fsf_req->qdio_req.sbal_response = sbal_idx;
2411 zfcp_fsf_req_complete(fsf_req);
2412
2413 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
2414 break;
2415 }
2416 }
2417
2418 struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
2419 struct qdio_buffer *sbal)
2420 {
2421 struct qdio_buffer_element *sbale = &sbal->element[0];
2422 u64 req_id = (unsigned long) sbale->addr;
2423
2424 return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
2425 }